repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
MisterWil/abodepy
abodepy/socketio.py
SocketIO.set_cookie
def set_cookie(self, cookie=None): """Set the Cookie header.""" if cookie: self._cookie = cookie.encode() else: self._cookie = None
python
def set_cookie(self, cookie=None): """Set the Cookie header.""" if cookie: self._cookie = cookie.encode() else: self._cookie = None
[ "def", "set_cookie", "(", "self", ",", "cookie", "=", "None", ")", ":", "if", "cookie", ":", "self", ".", "_cookie", "=", "cookie", ".", "encode", "(", ")", "else", ":", "self", ".", "_cookie", "=", "None" ]
Set the Cookie header.
[ "Set", "the", "Cookie", "header", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/socketio.py#L91-L96
train
32,300
MisterWil/abodepy
abodepy/socketio.py
SocketIO.on
def on(self, event_name, callback): """Register callback for a SocketIO event.""" if not event_name: return False _LOGGER.debug("Adding callback for event name: %s", event_name) self._callbacks[event_name].append((callback)) return True
python
def on(self, event_name, callback): """Register callback for a SocketIO event.""" if not event_name: return False _LOGGER.debug("Adding callback for event name: %s", event_name) self._callbacks[event_name].append((callback)) return True
[ "def", "on", "(", "self", ",", "event_name", ",", "callback", ")", ":", "if", "not", "event_name", ":", "return", "False", "_LOGGER", ".", "debug", "(", "\"Adding callback for event name: %s\"", ",", "event_name", ")", "self", ".", "_callbacks", "[", "event_na...
Register callback for a SocketIO event.
[ "Register", "callback", "for", "a", "SocketIO", "event", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/socketio.py#L99-L108
train
32,301
MisterWil/abodepy
abodepy/socketio.py
SocketIO.start
def start(self): """Start a thread to handle SocketIO notifications.""" if not self._thread: _LOGGER.info("Starting SocketIO thread...") self._thread = threading.Thread(target=self._run_socketio_thread, name='SocketIOThread') self._thread.deamon = True self._thread.start()
python
def start(self): """Start a thread to handle SocketIO notifications.""" if not self._thread: _LOGGER.info("Starting SocketIO thread...") self._thread = threading.Thread(target=self._run_socketio_thread, name='SocketIOThread') self._thread.deamon = True self._thread.start()
[ "def", "start", "(", "self", ")", ":", "if", "not", "self", ".", "_thread", ":", "_LOGGER", ".", "info", "(", "\"Starting SocketIO thread...\"", ")", "self", ".", "_thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_run_socketio_t...
Start a thread to handle SocketIO notifications.
[ "Start", "a", "thread", "to", "handle", "SocketIO", "notifications", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/socketio.py#L110-L118
train
32,302
MisterWil/abodepy
abodepy/socketio.py
SocketIO.stop
def stop(self): """Tell the SocketIO thread to terminate.""" if self._thread: _LOGGER.info("Stopping SocketIO thread...") # pylint: disable=W0212 self._running = False if self._exit_event: self._exit_event.set() self._thread.join()
python
def stop(self): """Tell the SocketIO thread to terminate.""" if self._thread: _LOGGER.info("Stopping SocketIO thread...") # pylint: disable=W0212 self._running = False if self._exit_event: self._exit_event.set() self._thread.join()
[ "def", "stop", "(", "self", ")", ":", "if", "self", ".", "_thread", ":", "_LOGGER", ".", "info", "(", "\"Stopping SocketIO thread...\"", ")", "# pylint: disable=W0212\r", "self", ".", "_running", "=", "False", "if", "self", ".", "_exit_event", ":", "self", "...
Tell the SocketIO thread to terminate.
[ "Tell", "the", "SocketIO", "thread", "to", "terminate", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/socketio.py#L120-L131
train
32,303
MisterWil/abodepy
abodepy/event_controller.py
AbodeEventController.add_device_callback
def add_device_callback(self, devices, callback): """Register a device callback.""" if not devices: return False if not isinstance(devices, (tuple, list)): devices = [devices] for device in devices: # Device may be a device_id device_id = device # If they gave us an actual device, get that devices ID if isinstance(device, AbodeDevice): device_id = device.device_id # Validate the device is valid if not self._abode.get_device(device_id): raise AbodeException((ERROR.EVENT_DEVICE_INVALID)) _LOGGER.debug( "Subscribing to updated for device_id: %s", device_id) self._device_callbacks[device_id].append((callback)) return True
python
def add_device_callback(self, devices, callback): """Register a device callback.""" if not devices: return False if not isinstance(devices, (tuple, list)): devices = [devices] for device in devices: # Device may be a device_id device_id = device # If they gave us an actual device, get that devices ID if isinstance(device, AbodeDevice): device_id = device.device_id # Validate the device is valid if not self._abode.get_device(device_id): raise AbodeException((ERROR.EVENT_DEVICE_INVALID)) _LOGGER.debug( "Subscribing to updated for device_id: %s", device_id) self._device_callbacks[device_id].append((callback)) return True
[ "def", "add_device_callback", "(", "self", ",", "devices", ",", "callback", ")", ":", "if", "not", "devices", ":", "return", "False", "if", "not", "isinstance", "(", "devices", ",", "(", "tuple", ",", "list", ")", ")", ":", "devices", "=", "[", "device...
Register a device callback.
[ "Register", "a", "device", "callback", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/event_controller.py#L49-L74
train
32,304
MisterWil/abodepy
abodepy/event_controller.py
AbodeEventController.add_event_callback
def add_event_callback(self, event_groups, callback): """Register callback for a group of timeline events.""" if not event_groups: return False if not isinstance(event_groups, (tuple, list)): event_groups = [event_groups] for event_group in event_groups: if event_group not in TIMELINE.ALL_EVENT_GROUPS: raise AbodeException(ERROR.EVENT_GROUP_INVALID, TIMELINE.ALL_EVENT_GROUPS) _LOGGER.debug("Subscribing to event group: %s", event_group) self._event_callbacks[event_group].append((callback)) return True
python
def add_event_callback(self, event_groups, callback): """Register callback for a group of timeline events.""" if not event_groups: return False if not isinstance(event_groups, (tuple, list)): event_groups = [event_groups] for event_group in event_groups: if event_group not in TIMELINE.ALL_EVENT_GROUPS: raise AbodeException(ERROR.EVENT_GROUP_INVALID, TIMELINE.ALL_EVENT_GROUPS) _LOGGER.debug("Subscribing to event group: %s", event_group) self._event_callbacks[event_group].append((callback)) return True
[ "def", "add_event_callback", "(", "self", ",", "event_groups", ",", "callback", ")", ":", "if", "not", "event_groups", ":", "return", "False", "if", "not", "isinstance", "(", "event_groups", ",", "(", "tuple", ",", "list", ")", ")", ":", "event_groups", "=...
Register callback for a group of timeline events.
[ "Register", "callback", "for", "a", "group", "of", "timeline", "events", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/event_controller.py#L76-L93
train
32,305
MisterWil/abodepy
abodepy/event_controller.py
AbodeEventController.add_timeline_callback
def add_timeline_callback(self, timeline_events, callback): """Register a callback for a specific timeline event.""" if not timeline_events: return False if not isinstance(timeline_events, (tuple, list)): timeline_events = [timeline_events] for timeline_event in timeline_events: if not isinstance(timeline_event, dict): raise AbodeException((ERROR.EVENT_CODE_MISSING)) event_code = timeline_event.get('event_code') if not event_code: raise AbodeException((ERROR.EVENT_CODE_MISSING)) _LOGGER.debug("Subscribing to timeline event: %s", timeline_event) self._timeline_callbacks[event_code].append((callback)) return True
python
def add_timeline_callback(self, timeline_events, callback): """Register a callback for a specific timeline event.""" if not timeline_events: return False if not isinstance(timeline_events, (tuple, list)): timeline_events = [timeline_events] for timeline_event in timeline_events: if not isinstance(timeline_event, dict): raise AbodeException((ERROR.EVENT_CODE_MISSING)) event_code = timeline_event.get('event_code') if not event_code: raise AbodeException((ERROR.EVENT_CODE_MISSING)) _LOGGER.debug("Subscribing to timeline event: %s", timeline_event) self._timeline_callbacks[event_code].append((callback)) return True
[ "def", "add_timeline_callback", "(", "self", ",", "timeline_events", ",", "callback", ")", ":", "if", "not", "timeline_events", ":", "return", "False", "if", "not", "isinstance", "(", "timeline_events", ",", "(", "tuple", ",", "list", ")", ")", ":", "timelin...
Register a callback for a specific timeline event.
[ "Register", "a", "callback", "for", "a", "specific", "timeline", "event", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/event_controller.py#L95-L116
train
32,306
MisterWil/abodepy
abodepy/event_controller.py
AbodeEventController._on_socket_started
def _on_socket_started(self): """Socket IO startup callback.""" # pylint: disable=W0212 cookies = self._abode._get_session().cookies.get_dict() cookie_string = "; ".join( [str(x) + "=" + str(y) for x, y in cookies.items()]) self._socketio.set_cookie(cookie_string)
python
def _on_socket_started(self): """Socket IO startup callback.""" # pylint: disable=W0212 cookies = self._abode._get_session().cookies.get_dict() cookie_string = "; ".join( [str(x) + "=" + str(y) for x, y in cookies.items()]) self._socketio.set_cookie(cookie_string)
[ "def", "_on_socket_started", "(", "self", ")", ":", "# pylint: disable=W0212", "cookies", "=", "self", ".", "_abode", ".", "_get_session", "(", ")", ".", "cookies", ".", "get_dict", "(", ")", "cookie_string", "=", "\"; \"", ".", "join", "(", "[", "str", "(...
Socket IO startup callback.
[ "Socket", "IO", "startup", "callback", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/event_controller.py#L123-L130
train
32,307
MisterWil/abodepy
abodepy/event_controller.py
AbodeEventController._on_device_update
def _on_device_update(self, devid): """Device callback from Abode SocketIO server.""" if isinstance(devid, (tuple, list)): devid = devid[0] if devid is None: _LOGGER.warning("Device update with no device id.") return _LOGGER.debug("Device update event for device ID: %s", devid) device = self._abode.get_device(devid, True) if not device: _LOGGER.debug("Got device update for unknown device: %s", devid) return for callback in self._device_callbacks.get(device.device_id, ()): _execute_callback(callback, device)
python
def _on_device_update(self, devid): """Device callback from Abode SocketIO server.""" if isinstance(devid, (tuple, list)): devid = devid[0] if devid is None: _LOGGER.warning("Device update with no device id.") return _LOGGER.debug("Device update event for device ID: %s", devid) device = self._abode.get_device(devid, True) if not device: _LOGGER.debug("Got device update for unknown device: %s", devid) return for callback in self._device_callbacks.get(device.device_id, ()): _execute_callback(callback, device)
[ "def", "_on_device_update", "(", "self", ",", "devid", ")", ":", "if", "isinstance", "(", "devid", ",", "(", "tuple", ",", "list", ")", ")", ":", "devid", "=", "devid", "[", "0", "]", "if", "devid", "is", "None", ":", "_LOGGER", ".", "warning", "("...
Device callback from Abode SocketIO server.
[ "Device", "callback", "from", "Abode", "SocketIO", "server", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/event_controller.py#L136-L154
train
32,308
MisterWil/abodepy
abodepy/event_controller.py
AbodeEventController._on_mode_change
def _on_mode_change(self, mode): """Mode change broadcast from Abode SocketIO server.""" if isinstance(mode, (tuple, list)): mode = mode[0] if mode is None: _LOGGER.warning("Mode change event with no mode.") return if not mode or mode.lower() not in CONST.ALL_MODES: _LOGGER.warning("Mode change event with unknown mode: %s", mode) return _LOGGER.debug("Alarm mode change event to: %s", mode) # We're just going to convert it to an Alarm device alarm_device = self._abode.get_alarm(refresh=True) # At the time of development, refreshing after mode change notification # didn't seem to get the latest update immediately. As such, we will # force the mode status now to match the notification. # pylint: disable=W0212 alarm_device._json_state['mode']['area_1'] = mode for callback in self._device_callbacks.get(alarm_device.device_id, ()): _execute_callback(callback, alarm_device)
python
def _on_mode_change(self, mode): """Mode change broadcast from Abode SocketIO server.""" if isinstance(mode, (tuple, list)): mode = mode[0] if mode is None: _LOGGER.warning("Mode change event with no mode.") return if not mode or mode.lower() not in CONST.ALL_MODES: _LOGGER.warning("Mode change event with unknown mode: %s", mode) return _LOGGER.debug("Alarm mode change event to: %s", mode) # We're just going to convert it to an Alarm device alarm_device = self._abode.get_alarm(refresh=True) # At the time of development, refreshing after mode change notification # didn't seem to get the latest update immediately. As such, we will # force the mode status now to match the notification. # pylint: disable=W0212 alarm_device._json_state['mode']['area_1'] = mode for callback in self._device_callbacks.get(alarm_device.device_id, ()): _execute_callback(callback, alarm_device)
[ "def", "_on_mode_change", "(", "self", ",", "mode", ")", ":", "if", "isinstance", "(", "mode", ",", "(", "tuple", ",", "list", ")", ")", ":", "mode", "=", "mode", "[", "0", "]", "if", "mode", "is", "None", ":", "_LOGGER", ".", "warning", "(", "\"...
Mode change broadcast from Abode SocketIO server.
[ "Mode", "change", "broadcast", "from", "Abode", "SocketIO", "server", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/event_controller.py#L156-L181
train
32,309
MisterWil/abodepy
abodepy/event_controller.py
AbodeEventController._on_timeline_update
def _on_timeline_update(self, event): """Timeline update broadcast from Abode SocketIO server.""" if isinstance(event, (tuple, list)): event = event[0] event_type = event.get('event_type') event_code = event.get('event_code') if not event_type or not event_code: _LOGGER.warning("Invalid timeline update event: %s", event) return _LOGGER.debug("Timeline event received: %s - %s (%s)", event.get('event_name'), event_type, event_code) # Compress our callbacks into those that match this event_code # or ones registered to get callbacks for all events codes = (event_code, TIMELINE.ALL['event_code']) all_callbacks = [self._timeline_callbacks[code] for code in codes] for callbacks in all_callbacks: for callback in callbacks: _execute_callback(callback, event) # Attempt to map the event code to a group and callback event_group = TIMELINE.map_event_code(event_code) if event_group: for callback in self._event_callbacks.get(event_group, ()): _execute_callback(callback, event)
python
def _on_timeline_update(self, event): """Timeline update broadcast from Abode SocketIO server.""" if isinstance(event, (tuple, list)): event = event[0] event_type = event.get('event_type') event_code = event.get('event_code') if not event_type or not event_code: _LOGGER.warning("Invalid timeline update event: %s", event) return _LOGGER.debug("Timeline event received: %s - %s (%s)", event.get('event_name'), event_type, event_code) # Compress our callbacks into those that match this event_code # or ones registered to get callbacks for all events codes = (event_code, TIMELINE.ALL['event_code']) all_callbacks = [self._timeline_callbacks[code] for code in codes] for callbacks in all_callbacks: for callback in callbacks: _execute_callback(callback, event) # Attempt to map the event code to a group and callback event_group = TIMELINE.map_event_code(event_code) if event_group: for callback in self._event_callbacks.get(event_group, ()): _execute_callback(callback, event)
[ "def", "_on_timeline_update", "(", "self", ",", "event", ")", ":", "if", "isinstance", "(", "event", ",", "(", "tuple", ",", "list", ")", ")", ":", "event", "=", "event", "[", "0", "]", "event_type", "=", "event", ".", "get", "(", "'event_type'", ")"...
Timeline update broadcast from Abode SocketIO server.
[ "Timeline", "update", "broadcast", "from", "Abode", "SocketIO", "server", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/event_controller.py#L183-L212
train
32,310
MisterWil/abodepy
abodepy/event_controller.py
AbodeEventController._on_automation_update
def _on_automation_update(self, event): """Automation update broadcast from Abode SocketIO server.""" event_group = TIMELINE.AUTOMATION_EDIT_GROUP if isinstance(event, (tuple, list)): event = event[0] for callback in self._event_callbacks.get(event_group, ()): _execute_callback(callback, event)
python
def _on_automation_update(self, event): """Automation update broadcast from Abode SocketIO server.""" event_group = TIMELINE.AUTOMATION_EDIT_GROUP if isinstance(event, (tuple, list)): event = event[0] for callback in self._event_callbacks.get(event_group, ()): _execute_callback(callback, event)
[ "def", "_on_automation_update", "(", "self", ",", "event", ")", ":", "event_group", "=", "TIMELINE", ".", "AUTOMATION_EDIT_GROUP", "if", "isinstance", "(", "event", ",", "(", "tuple", ",", "list", ")", ")", ":", "event", "=", "event", "[", "0", "]", "for...
Automation update broadcast from Abode SocketIO server.
[ "Automation", "update", "broadcast", "from", "Abode", "SocketIO", "server", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/event_controller.py#L214-L222
train
32,311
MisterWil/abodepy
abodepy/__init__.py
Abode.login
def login(self, username=None, password=None): """Explicit Abode login.""" if username is not None: self._cache[CONST.ID] = username if password is not None: self._cache[CONST.PASSWORD] = password if (self._cache[CONST.ID] is None or not isinstance(self._cache[CONST.ID], str)): raise AbodeAuthenticationException(ERROR.USERNAME) if (self._cache[CONST.PASSWORD] is None or not isinstance(self._cache[CONST.PASSWORD], str)): raise AbodeAuthenticationException(ERROR.PASSWORD) self._save_cache() self._token = None login_data = { CONST.ID: self._cache[CONST.ID], CONST.PASSWORD: self._cache[CONST.PASSWORD], CONST.UUID: self._cache[CONST.UUID] } response = self._session.post(CONST.LOGIN_URL, json=login_data) response_object = json.loads(response.text) oauth_token = self._session.get(CONST.OAUTH_TOKEN_URL) oauth_token_object = json.loads(oauth_token.text) if response.status_code != 200: raise AbodeAuthenticationException((response.status_code, response_object['message'])) _LOGGER.debug("Login Response: %s", response.text) self._token = response_object['token'] self._panel = response_object['panel'] self._user = response_object['user'] self._oauth_token = oauth_token_object['access_token'] _LOGGER.info("Login successful") return True
python
def login(self, username=None, password=None): """Explicit Abode login.""" if username is not None: self._cache[CONST.ID] = username if password is not None: self._cache[CONST.PASSWORD] = password if (self._cache[CONST.ID] is None or not isinstance(self._cache[CONST.ID], str)): raise AbodeAuthenticationException(ERROR.USERNAME) if (self._cache[CONST.PASSWORD] is None or not isinstance(self._cache[CONST.PASSWORD], str)): raise AbodeAuthenticationException(ERROR.PASSWORD) self._save_cache() self._token = None login_data = { CONST.ID: self._cache[CONST.ID], CONST.PASSWORD: self._cache[CONST.PASSWORD], CONST.UUID: self._cache[CONST.UUID] } response = self._session.post(CONST.LOGIN_URL, json=login_data) response_object = json.loads(response.text) oauth_token = self._session.get(CONST.OAUTH_TOKEN_URL) oauth_token_object = json.loads(oauth_token.text) if response.status_code != 200: raise AbodeAuthenticationException((response.status_code, response_object['message'])) _LOGGER.debug("Login Response: %s", response.text) self._token = response_object['token'] self._panel = response_object['panel'] self._user = response_object['user'] self._oauth_token = oauth_token_object['access_token'] _LOGGER.info("Login successful") return True
[ "def", "login", "(", "self", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "if", "username", "is", "not", "None", ":", "self", ".", "_cache", "[", "CONST", ".", "ID", "]", "=", "username", "if", "password", "is", "not", "Non...
Explicit Abode login.
[ "Explicit", "Abode", "login", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L107-L151
train
32,312
MisterWil/abodepy
abodepy/__init__.py
Abode.logout
def logout(self): """Explicit Abode logout.""" if self._token: header_data = { 'ABODE-API-KEY': self._token } self._session = requests.session() self._token = None self._panel = None self._user = None self._devices = None self._automations = None try: response = self._session.post( CONST.LOGOUT_URL, headers=header_data) response_object = json.loads(response.text) except OSError as exc: _LOGGER.warning("Caught exception during logout: %s", str(exc)) return False if response.status_code != 200: raise AbodeAuthenticationException( (response.status_code, response_object['message'])) _LOGGER.debug("Logout Response: %s", response.text) _LOGGER.info("Logout successful") return True
python
def logout(self): """Explicit Abode logout.""" if self._token: header_data = { 'ABODE-API-KEY': self._token } self._session = requests.session() self._token = None self._panel = None self._user = None self._devices = None self._automations = None try: response = self._session.post( CONST.LOGOUT_URL, headers=header_data) response_object = json.loads(response.text) except OSError as exc: _LOGGER.warning("Caught exception during logout: %s", str(exc)) return False if response.status_code != 200: raise AbodeAuthenticationException( (response.status_code, response_object['message'])) _LOGGER.debug("Logout Response: %s", response.text) _LOGGER.info("Logout successful") return True
[ "def", "logout", "(", "self", ")", ":", "if", "self", ".", "_token", ":", "header_data", "=", "{", "'ABODE-API-KEY'", ":", "self", ".", "_token", "}", "self", ".", "_session", "=", "requests", ".", "session", "(", ")", "self", ".", "_token", "=", "No...
Explicit Abode logout.
[ "Explicit", "Abode", "logout", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L153-L183
train
32,313
MisterWil/abodepy
abodepy/__init__.py
Abode.refresh
def refresh(self): """Do a full refresh of all devices and automations.""" self.get_devices(refresh=True) self.get_automations(refresh=True)
python
def refresh(self): """Do a full refresh of all devices and automations.""" self.get_devices(refresh=True) self.get_automations(refresh=True)
[ "def", "refresh", "(", "self", ")", ":", "self", ".", "get_devices", "(", "refresh", "=", "True", ")", "self", ".", "get_automations", "(", "refresh", "=", "True", ")" ]
Do a full refresh of all devices and automations.
[ "Do", "a", "full", "refresh", "of", "all", "devices", "and", "automations", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L185-L188
train
32,314
MisterWil/abodepy
abodepy/__init__.py
Abode.get_device
def get_device(self, device_id, refresh=False): """Get a single device.""" if self._devices is None: self.get_devices() refresh = False device = self._devices.get(device_id) if device and refresh: device.refresh() return device
python
def get_device(self, device_id, refresh=False): """Get a single device.""" if self._devices is None: self.get_devices() refresh = False device = self._devices.get(device_id) if device and refresh: device.refresh() return device
[ "def", "get_device", "(", "self", ",", "device_id", ",", "refresh", "=", "False", ")", ":", "if", "self", ".", "_devices", "is", "None", ":", "self", ".", "get_devices", "(", ")", "refresh", "=", "False", "device", "=", "self", ".", "_devices", ".", ...
Get a single device.
[ "Get", "a", "single", "device", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L251-L262
train
32,315
MisterWil/abodepy
abodepy/__init__.py
Abode.get_automations
def get_automations(self, refresh=False, generic_type=None): """Get all automations.""" if refresh or self._automations is None: if self._automations is None: # Set up the device libraries self._automations = {} _LOGGER.info("Updating all automations...") response = self.send_request("get", CONST.AUTOMATION_URL) response_object = json.loads(response.text) if (response_object and not isinstance(response_object, (tuple, list))): response_object = [response_object] _LOGGER.debug("Get Automations Response: %s", response.text) for automation_json in response_object: # Attempt to reuse an existing automation object automation = self._automations.get(str(automation_json['id'])) # No existing automation, create a new one if automation: automation.update(automation_json) else: automation = AbodeAutomation(self, automation_json) self._automations[automation.automation_id] = automation if generic_type: automations = [] for automation in self._automations.values(): if (automation.generic_type is not None and automation.generic_type in generic_type): automations.append(automation) return automations return list(self._automations.values())
python
def get_automations(self, refresh=False, generic_type=None): """Get all automations.""" if refresh or self._automations is None: if self._automations is None: # Set up the device libraries self._automations = {} _LOGGER.info("Updating all automations...") response = self.send_request("get", CONST.AUTOMATION_URL) response_object = json.loads(response.text) if (response_object and not isinstance(response_object, (tuple, list))): response_object = [response_object] _LOGGER.debug("Get Automations Response: %s", response.text) for automation_json in response_object: # Attempt to reuse an existing automation object automation = self._automations.get(str(automation_json['id'])) # No existing automation, create a new one if automation: automation.update(automation_json) else: automation = AbodeAutomation(self, automation_json) self._automations[automation.automation_id] = automation if generic_type: automations = [] for automation in self._automations.values(): if (automation.generic_type is not None and automation.generic_type in generic_type): automations.append(automation) return automations return list(self._automations.values())
[ "def", "get_automations", "(", "self", ",", "refresh", "=", "False", ",", "generic_type", "=", "None", ")", ":", "if", "refresh", "or", "self", ".", "_automations", "is", "None", ":", "if", "self", ".", "_automations", "is", "None", ":", "# Set up the devi...
Get all automations.
[ "Get", "all", "automations", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L264-L300
train
32,316
MisterWil/abodepy
abodepy/__init__.py
Abode.get_automation
def get_automation(self, automation_id, refresh=False): """Get a single automation.""" if self._automations is None: self.get_automations() refresh = False automation = self._automations.get(str(automation_id)) if automation and refresh: automation.refresh() return automation
python
def get_automation(self, automation_id, refresh=False): """Get a single automation.""" if self._automations is None: self.get_automations() refresh = False automation = self._automations.get(str(automation_id)) if automation and refresh: automation.refresh() return automation
[ "def", "get_automation", "(", "self", ",", "automation_id", ",", "refresh", "=", "False", ")", ":", "if", "self", ".", "_automations", "is", "None", ":", "self", ".", "get_automations", "(", ")", "refresh", "=", "False", "automation", "=", "self", ".", "...
Get a single automation.
[ "Get", "a", "single", "automation", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L302-L313
train
32,317
MisterWil/abodepy
abodepy/__init__.py
Abode.get_alarm
def get_alarm(self, area='1', refresh=False): """Shortcut method to get the alarm device.""" if self._devices is None: self.get_devices() refresh = False return self.get_device(CONST.ALARM_DEVICE_ID + area, refresh)
python
def get_alarm(self, area='1', refresh=False): """Shortcut method to get the alarm device.""" if self._devices is None: self.get_devices() refresh = False return self.get_device(CONST.ALARM_DEVICE_ID + area, refresh)
[ "def", "get_alarm", "(", "self", ",", "area", "=", "'1'", ",", "refresh", "=", "False", ")", ":", "if", "self", ".", "_devices", "is", "None", ":", "self", ".", "get_devices", "(", ")", "refresh", "=", "False", "return", "self", ".", "get_device", "(...
Shortcut method to get the alarm device.
[ "Shortcut", "method", "to", "get", "the", "alarm", "device", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L315-L321
train
32,318
MisterWil/abodepy
abodepy/__init__.py
Abode.set_default_mode
def set_default_mode(self, default_mode): """Set the default mode when alarms are turned 'on'.""" if default_mode.lower() not in (CONST.MODE_AWAY, CONST.MODE_HOME): raise AbodeException(ERROR.INVALID_DEFAULT_ALARM_MODE) self._default_alarm_mode = default_mode.lower()
python
def set_default_mode(self, default_mode): """Set the default mode when alarms are turned 'on'.""" if default_mode.lower() not in (CONST.MODE_AWAY, CONST.MODE_HOME): raise AbodeException(ERROR.INVALID_DEFAULT_ALARM_MODE) self._default_alarm_mode = default_mode.lower()
[ "def", "set_default_mode", "(", "self", ",", "default_mode", ")", ":", "if", "default_mode", ".", "lower", "(", ")", "not", "in", "(", "CONST", ".", "MODE_AWAY", ",", "CONST", ".", "MODE_HOME", ")", ":", "raise", "AbodeException", "(", "ERROR", ".", "INV...
Set the default mode when alarms are turned 'on'.
[ "Set", "the", "default", "mode", "when", "alarms", "are", "turned", "on", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L323-L328
train
32,319
MisterWil/abodepy
abodepy/__init__.py
Abode.set_setting
def set_setting(self, setting, value, area='1', validate_value=True): """Set an abode system setting to a given value.""" setting = setting.lower() if setting not in CONST.ALL_SETTINGS: raise AbodeException(ERROR.INVALID_SETTING, CONST.ALL_SETTINGS) if setting in CONST.PANEL_SETTINGS: url = CONST.SETTINGS_URL data = self._panel_settings(setting, value, validate_value) elif setting in CONST.AREA_SETTINGS: url = CONST.AREAS_URL data = self._area_settings(area, setting, value, validate_value) elif setting in CONST.SOUND_SETTINGS: url = CONST.SOUNDS_URL data = self._sound_settings(area, setting, value, validate_value) elif setting in CONST.SIREN_SETTINGS: url = CONST.SIREN_URL data = self._siren_settings(setting, value, validate_value) return self.send_request(method="put", url=url, data=data)
python
def set_setting(self, setting, value, area='1', validate_value=True): """Set an abode system setting to a given value.""" setting = setting.lower() if setting not in CONST.ALL_SETTINGS: raise AbodeException(ERROR.INVALID_SETTING, CONST.ALL_SETTINGS) if setting in CONST.PANEL_SETTINGS: url = CONST.SETTINGS_URL data = self._panel_settings(setting, value, validate_value) elif setting in CONST.AREA_SETTINGS: url = CONST.AREAS_URL data = self._area_settings(area, setting, value, validate_value) elif setting in CONST.SOUND_SETTINGS: url = CONST.SOUNDS_URL data = self._sound_settings(area, setting, value, validate_value) elif setting in CONST.SIREN_SETTINGS: url = CONST.SIREN_URL data = self._siren_settings(setting, value, validate_value) return self.send_request(method="put", url=url, data=data)
[ "def", "set_setting", "(", "self", ",", "setting", ",", "value", ",", "area", "=", "'1'", ",", "validate_value", "=", "True", ")", ":", "setting", "=", "setting", ".", "lower", "(", ")", "if", "setting", "not", "in", "CONST", ".", "ALL_SETTINGS", ":", ...
Set an abode system setting to a given value.
[ "Set", "an", "abode", "system", "setting", "to", "a", "given", "value", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L330-L350
train
32,320
MisterWil/abodepy
abodepy/__init__.py
Abode._panel_settings
def _panel_settings(setting, value, validate_value): """Will validate panel settings and values, returns data packet.""" if validate_value: if (setting == CONST.SETTING_CAMERA_RESOLUTION and value not in CONST.SETTING_ALL_CAMERA_RES): raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.SETTING_ALL_CAMERA_RES) elif (setting in [CONST.SETTING_CAMERA_GRAYSCALE, CONST.SETTING_SILENCE_SOUNDS] and value not in CONST.SETTING_DISABLE_ENABLE): raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.SETTING_DISABLE_ENABLE) return {setting: value}
python
def _panel_settings(setting, value, validate_value): """Will validate panel settings and values, returns data packet.""" if validate_value: if (setting == CONST.SETTING_CAMERA_RESOLUTION and value not in CONST.SETTING_ALL_CAMERA_RES): raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.SETTING_ALL_CAMERA_RES) elif (setting in [CONST.SETTING_CAMERA_GRAYSCALE, CONST.SETTING_SILENCE_SOUNDS] and value not in CONST.SETTING_DISABLE_ENABLE): raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.SETTING_DISABLE_ENABLE) return {setting: value}
[ "def", "_panel_settings", "(", "setting", ",", "value", ",", "validate_value", ")", ":", "if", "validate_value", ":", "if", "(", "setting", "==", "CONST", ".", "SETTING_CAMERA_RESOLUTION", "and", "value", "not", "in", "CONST", ".", "SETTING_ALL_CAMERA_RES", ")",...
Will validate panel settings and values, returns data packet.
[ "Will", "validate", "panel", "settings", "and", "values", "returns", "data", "packet", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L353-L368
train
32,321
MisterWil/abodepy
abodepy/__init__.py
Abode._area_settings
def _area_settings(area, setting, value, validate_value): """Will validate area settings and values, returns data packet.""" if validate_value: # Exit delay has some specific limitations apparently if (setting == CONST.SETTING_EXIT_DELAY_AWAY and value not in CONST.VALID_SETTING_EXIT_AWAY): raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.VALID_SETTING_EXIT_AWAY) elif value not in CONST.ALL_SETTING_ENTRY_EXIT_DELAY: raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.ALL_SETTING_ENTRY_EXIT_DELAY) return {'area': area, setting: value}
python
def _area_settings(area, setting, value, validate_value): """Will validate area settings and values, returns data packet.""" if validate_value: # Exit delay has some specific limitations apparently if (setting == CONST.SETTING_EXIT_DELAY_AWAY and value not in CONST.VALID_SETTING_EXIT_AWAY): raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.VALID_SETTING_EXIT_AWAY) elif value not in CONST.ALL_SETTING_ENTRY_EXIT_DELAY: raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.ALL_SETTING_ENTRY_EXIT_DELAY) return {'area': area, setting: value}
[ "def", "_area_settings", "(", "area", ",", "setting", ",", "value", ",", "validate_value", ")", ":", "if", "validate_value", ":", "# Exit delay has some specific limitations apparently", "if", "(", "setting", "==", "CONST", ".", "SETTING_EXIT_DELAY_AWAY", "and", "valu...
Will validate area settings and values, returns data packet.
[ "Will", "validate", "area", "settings", "and", "values", "returns", "data", "packet", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L371-L383
train
32,322
MisterWil/abodepy
abodepy/__init__.py
Abode._sound_settings
def _sound_settings(area, setting, value, validate_value): """Will validate sound settings and values, returns data packet.""" if validate_value: if (setting in CONST.VALID_SOUND_SETTINGS and value not in CONST.ALL_SETTING_SOUND): raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.ALL_SETTING_SOUND) elif (setting == CONST.SETTING_ALARM_LENGTH and value not in CONST.ALL_SETTING_ALARM_LENGTH): raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.ALL_SETTING_ALARM_LENGTH) elif (setting == CONST.SETTING_FINAL_BEEPS and value not in CONST.ALL_SETTING_FINAL_BEEPS): raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.ALL_SETTING_FINAL_BEEPS) return {'area': area, setting: value}
python
def _sound_settings(area, setting, value, validate_value): """Will validate sound settings and values, returns data packet.""" if validate_value: if (setting in CONST.VALID_SOUND_SETTINGS and value not in CONST.ALL_SETTING_SOUND): raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.ALL_SETTING_SOUND) elif (setting == CONST.SETTING_ALARM_LENGTH and value not in CONST.ALL_SETTING_ALARM_LENGTH): raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.ALL_SETTING_ALARM_LENGTH) elif (setting == CONST.SETTING_FINAL_BEEPS and value not in CONST.ALL_SETTING_FINAL_BEEPS): raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.ALL_SETTING_FINAL_BEEPS) return {'area': area, setting: value}
[ "def", "_sound_settings", "(", "area", ",", "setting", ",", "value", ",", "validate_value", ")", ":", "if", "validate_value", ":", "if", "(", "setting", "in", "CONST", ".", "VALID_SOUND_SETTINGS", "and", "value", "not", "in", "CONST", ".", "ALL_SETTING_SOUND",...
Will validate sound settings and values, returns data packet.
[ "Will", "validate", "sound", "settings", "and", "values", "returns", "data", "packet", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L386-L402
train
32,323
MisterWil/abodepy
abodepy/__init__.py
Abode._siren_settings
def _siren_settings(setting, value, validate_value): """Will validate siren settings and values, returns data packet.""" if validate_value: if value not in CONST.SETTING_DISABLE_ENABLE: raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.SETTING_DISABLE_ENABLE) return {'action': setting, 'option': value}
python
def _siren_settings(setting, value, validate_value): """Will validate siren settings and values, returns data packet.""" if validate_value: if value not in CONST.SETTING_DISABLE_ENABLE: raise AbodeException(ERROR.INVALID_SETTING_VALUE, CONST.SETTING_DISABLE_ENABLE) return {'action': setting, 'option': value}
[ "def", "_siren_settings", "(", "setting", ",", "value", ",", "validate_value", ")", ":", "if", "validate_value", ":", "if", "value", "not", "in", "CONST", ".", "SETTING_DISABLE_ENABLE", ":", "raise", "AbodeException", "(", "ERROR", ".", "INVALID_SETTING_VALUE", ...
Will validate siren settings and values, returns data packet.
[ "Will", "validate", "siren", "settings", "and", "values", "returns", "data", "packet", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L405-L412
train
32,324
MisterWil/abodepy
abodepy/__init__.py
Abode.send_request
def send_request(self, method, url, headers=None, data=None, is_retry=False): """Send requests to Abode.""" if not self._token: self.login() if not headers: headers = {} headers['Authorization'] = 'Bearer ' + self._oauth_token headers['ABODE-API-KEY'] = self._token try: response = getattr(self._session, method)( url, headers=headers, json=data) if response and response.status_code < 400: return response except RequestException: _LOGGER.info("Abode connection reset...") if not is_retry: # Delete our current token and try again -- will force a login # attempt. self._token = None return self.send_request(method, url, headers, data, True) raise AbodeException((ERROR.REQUEST))
python
def send_request(self, method, url, headers=None, data=None, is_retry=False): """Send requests to Abode.""" if not self._token: self.login() if not headers: headers = {} headers['Authorization'] = 'Bearer ' + self._oauth_token headers['ABODE-API-KEY'] = self._token try: response = getattr(self._session, method)( url, headers=headers, json=data) if response and response.status_code < 400: return response except RequestException: _LOGGER.info("Abode connection reset...") if not is_retry: # Delete our current token and try again -- will force a login # attempt. self._token = None return self.send_request(method, url, headers, data, True) raise AbodeException((ERROR.REQUEST))
[ "def", "send_request", "(", "self", ",", "method", ",", "url", ",", "headers", "=", "None", ",", "data", "=", "None", ",", "is_retry", "=", "False", ")", ":", "if", "not", "self", ".", "_token", ":", "self", ".", "login", "(", ")", "if", "not", "...
Send requests to Abode.
[ "Send", "requests", "to", "Abode", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L414-L442
train
32,325
MisterWil/abodepy
abodepy/__init__.py
Abode._save_cache
def _save_cache(self): """Trigger a cache save.""" if not self._disable_cache: UTILS.save_cache(self._cache, self._cache_path)
python
def _save_cache(self): """Trigger a cache save.""" if not self._disable_cache: UTILS.save_cache(self._cache, self._cache_path)
[ "def", "_save_cache", "(", "self", ")", ":", "if", "not", "self", ".", "_disable_cache", ":", "UTILS", ".", "save_cache", "(", "self", ".", "_cache", ",", "self", ".", "_cache_path", ")" ]
Trigger a cache save.
[ "Trigger", "a", "cache", "save", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/__init__.py#L475-L478
train
32,326
MisterWil/abodepy
abodepy/helpers/constants.py
get_generic_type
def get_generic_type(type_tag): """Map type tag to generic type.""" return { # Alarm DEVICE_ALARM: TYPE_ALARM, # Binary Sensors - Connectivity DEVICE_GLASS_BREAK: TYPE_CONNECTIVITY, DEVICE_KEYPAD: TYPE_CONNECTIVITY, DEVICE_REMOTE_CONTROLLER: TYPE_CONNECTIVITY, DEVICE_SIREN: TYPE_CONNECTIVITY, DEVICE_STATUS_DISPLAY: TYPE_CONNECTIVITY, # Binary Sensors - Opening DEVICE_DOOR_CONTACT: TYPE_OPENING, # Cameras DEVICE_MOTION_CAMERA: TYPE_CAMERA, DEVICE_MOTION_VIDEO_CAMERA: TYPE_CAMERA, DEVICE_IP_CAM: TYPE_CAMERA, DEVICE_OUTDOOR_MOTION_CAMERA: TYPE_CAMERA, # Covers DEVICE_SECURE_BARRIER: TYPE_COVER, # Lights (Dimmers) DEVICE_DIMMER: TYPE_LIGHT, DEVICE_DIMMER_METER: TYPE_LIGHT, DEVICE_HUE: TYPE_LIGHT, # Locks DEVICE_DOOR_LOCK: TYPE_LOCK, # Moisture DEVICE_WATER_SENSOR: TYPE_CONNECTIVITY, # Switches DEVICE_SWITCH: TYPE_SWITCH, DEVICE_NIGHT_SWITCH: TYPE_SWITCH, DEVICE_POWER_SWITCH_SENSOR: TYPE_SWITCH, DEVICE_POWER_SWITCH_METER: TYPE_SWITCH, # Water Valve DEVICE_VALVE: TYPE_VALVE, # Unknown Sensors # More data needed to determine type DEVICE_ROOM_SENSOR: TYPE_UNKNOWN_SENSOR, DEVICE_TEMPERATURE_SENSOR: TYPE_UNKNOWN_SENSOR, DEVICE_MULTI_SENSOR: TYPE_UNKNOWN_SENSOR, DEVICE_PIR: TYPE_UNKNOWN_SENSOR, DEVICE_POVS: TYPE_UNKNOWN_SENSOR, }.get(type_tag.lower(), None)
python
def get_generic_type(type_tag): """Map type tag to generic type.""" return { # Alarm DEVICE_ALARM: TYPE_ALARM, # Binary Sensors - Connectivity DEVICE_GLASS_BREAK: TYPE_CONNECTIVITY, DEVICE_KEYPAD: TYPE_CONNECTIVITY, DEVICE_REMOTE_CONTROLLER: TYPE_CONNECTIVITY, DEVICE_SIREN: TYPE_CONNECTIVITY, DEVICE_STATUS_DISPLAY: TYPE_CONNECTIVITY, # Binary Sensors - Opening DEVICE_DOOR_CONTACT: TYPE_OPENING, # Cameras DEVICE_MOTION_CAMERA: TYPE_CAMERA, DEVICE_MOTION_VIDEO_CAMERA: TYPE_CAMERA, DEVICE_IP_CAM: TYPE_CAMERA, DEVICE_OUTDOOR_MOTION_CAMERA: TYPE_CAMERA, # Covers DEVICE_SECURE_BARRIER: TYPE_COVER, # Lights (Dimmers) DEVICE_DIMMER: TYPE_LIGHT, DEVICE_DIMMER_METER: TYPE_LIGHT, DEVICE_HUE: TYPE_LIGHT, # Locks DEVICE_DOOR_LOCK: TYPE_LOCK, # Moisture DEVICE_WATER_SENSOR: TYPE_CONNECTIVITY, # Switches DEVICE_SWITCH: TYPE_SWITCH, DEVICE_NIGHT_SWITCH: TYPE_SWITCH, DEVICE_POWER_SWITCH_SENSOR: TYPE_SWITCH, DEVICE_POWER_SWITCH_METER: TYPE_SWITCH, # Water Valve DEVICE_VALVE: TYPE_VALVE, # Unknown Sensors # More data needed to determine type DEVICE_ROOM_SENSOR: TYPE_UNKNOWN_SENSOR, DEVICE_TEMPERATURE_SENSOR: TYPE_UNKNOWN_SENSOR, DEVICE_MULTI_SENSOR: TYPE_UNKNOWN_SENSOR, DEVICE_PIR: TYPE_UNKNOWN_SENSOR, DEVICE_POVS: TYPE_UNKNOWN_SENSOR, }.get(type_tag.lower(), None)
[ "def", "get_generic_type", "(", "type_tag", ")", ":", "return", "{", "# Alarm", "DEVICE_ALARM", ":", "TYPE_ALARM", ",", "# Binary Sensors - Connectivity", "DEVICE_GLASS_BREAK", ":", "TYPE_CONNECTIVITY", ",", "DEVICE_KEYPAD", ":", "TYPE_CONNECTIVITY", ",", "DEVICE_REMOTE_C...
Map type tag to generic type.
[ "Map", "type", "tag", "to", "generic", "type", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/helpers/constants.py#L228-L280
train
32,327
MisterWil/abodepy
abodepy/devices/lock.py
AbodeLock.lock
def lock(self): """Lock the device.""" success = self.set_status(CONST.STATUS_LOCKCLOSED_INT) if success: self._json_state['status'] = CONST.STATUS_LOCKCLOSED return success
python
def lock(self): """Lock the device.""" success = self.set_status(CONST.STATUS_LOCKCLOSED_INT) if success: self._json_state['status'] = CONST.STATUS_LOCKCLOSED return success
[ "def", "lock", "(", "self", ")", ":", "success", "=", "self", ".", "set_status", "(", "CONST", ".", "STATUS_LOCKCLOSED_INT", ")", "if", "success", ":", "self", ".", "_json_state", "[", "'status'", "]", "=", "CONST", ".", "STATUS_LOCKCLOSED", "return", "suc...
Lock the device.
[ "Lock", "the", "device", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/lock.py#L10-L17
train
32,328
MisterWil/abodepy
abodepy/devices/lock.py
AbodeLock.unlock
def unlock(self): """Unlock the device.""" success = self.set_status(CONST.STATUS_LOCKOPEN_INT) if success: self._json_state['status'] = CONST.STATUS_LOCKOPEN return success
python
def unlock(self): """Unlock the device.""" success = self.set_status(CONST.STATUS_LOCKOPEN_INT) if success: self._json_state['status'] = CONST.STATUS_LOCKOPEN return success
[ "def", "unlock", "(", "self", ")", ":", "success", "=", "self", ".", "set_status", "(", "CONST", ".", "STATUS_LOCKOPEN_INT", ")", "if", "success", ":", "self", ".", "_json_state", "[", "'status'", "]", "=", "CONST", ".", "STATUS_LOCKOPEN", "return", "succe...
Unlock the device.
[ "Unlock", "the", "device", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/lock.py#L19-L26
train
32,329
MisterWil/abodepy
abodepy/devices/__init__.py
AbodeDevice.set_status
def set_status(self, status): """Set device status.""" if self._json_state['control_url']: url = CONST.BASE_URL + self._json_state['control_url'] status_data = { 'status': str(status) } response = self._abode.send_request( method="put", url=url, data=status_data) response_object = json.loads(response.text) _LOGGER.debug("Set Status Response: %s", response.text) if response_object['id'] != self.device_id: raise AbodeException((ERROR.SET_STATUS_DEV_ID)) if response_object['status'] != str(status): raise AbodeException((ERROR.SET_STATUS_STATE)) # Note: Status result is of int type, not of new status of device. # Seriously, why would you do that? # So, can't set status here must be done at device level. _LOGGER.info("Set device %s status to: %s", self.device_id, status) return True return False
python
def set_status(self, status): """Set device status.""" if self._json_state['control_url']: url = CONST.BASE_URL + self._json_state['control_url'] status_data = { 'status': str(status) } response = self._abode.send_request( method="put", url=url, data=status_data) response_object = json.loads(response.text) _LOGGER.debug("Set Status Response: %s", response.text) if response_object['id'] != self.device_id: raise AbodeException((ERROR.SET_STATUS_DEV_ID)) if response_object['status'] != str(status): raise AbodeException((ERROR.SET_STATUS_STATE)) # Note: Status result is of int type, not of new status of device. # Seriously, why would you do that? # So, can't set status here must be done at device level. _LOGGER.info("Set device %s status to: %s", self.device_id, status) return True return False
[ "def", "set_status", "(", "self", ",", "status", ")", ":", "if", "self", ".", "_json_state", "[", "'control_url'", "]", ":", "url", "=", "CONST", ".", "BASE_URL", "+", "self", ".", "_json_state", "[", "'control_url'", "]", "status_data", "=", "{", "'stat...
Set device status.
[ "Set", "device", "status", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/__init__.py#L30-L59
train
32,330
MisterWil/abodepy
abodepy/devices/__init__.py
AbodeDevice.set_level
def set_level(self, level): """Set device level.""" if self._json_state['control_url']: url = CONST.BASE_URL + self._json_state['control_url'] level_data = { 'level': str(level) } response = self._abode.send_request( "put", url, data=level_data) response_object = json.loads(response.text) _LOGGER.debug("Set Level Response: %s", response.text) if response_object['id'] != self.device_id: raise AbodeException((ERROR.SET_STATUS_DEV_ID)) if response_object['level'] != str(level): raise AbodeException((ERROR.SET_STATUS_STATE)) self.update(response_object) _LOGGER.info("Set device %s level to: %s", self.device_id, level) return True return False
python
def set_level(self, level): """Set device level.""" if self._json_state['control_url']: url = CONST.BASE_URL + self._json_state['control_url'] level_data = { 'level': str(level) } response = self._abode.send_request( "put", url, data=level_data) response_object = json.loads(response.text) _LOGGER.debug("Set Level Response: %s", response.text) if response_object['id'] != self.device_id: raise AbodeException((ERROR.SET_STATUS_DEV_ID)) if response_object['level'] != str(level): raise AbodeException((ERROR.SET_STATUS_STATE)) self.update(response_object) _LOGGER.info("Set device %s level to: %s", self.device_id, level) return True return False
[ "def", "set_level", "(", "self", ",", "level", ")", ":", "if", "self", ".", "_json_state", "[", "'control_url'", "]", ":", "url", "=", "CONST", ".", "BASE_URL", "+", "self", ".", "_json_state", "[", "'control_url'", "]", "level_data", "=", "{", "'level'"...
Set device level.
[ "Set", "device", "level", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/__init__.py#L61-L88
train
32,331
MisterWil/abodepy
abodepy/devices/__init__.py
AbodeDevice._update_name
def _update_name(self): """Set the device name from _json_state, with a sensible default.""" self._name = self._json_state.get('name') if not self._name: self._name = self.type + ' ' + self.device_id
python
def _update_name(self): """Set the device name from _json_state, with a sensible default.""" self._name = self._json_state.get('name') if not self._name: self._name = self.type + ' ' + self.device_id
[ "def", "_update_name", "(", "self", ")", ":", "self", ".", "_name", "=", "self", ".", "_json_state", ".", "get", "(", "'name'", ")", "if", "not", "self", ".", "_name", ":", "self", ".", "_name", "=", "self", ".", "type", "+", "' '", "+", "self", ...
Set the device name from _json_state, with a sensible default.
[ "Set", "the", "device", "name", "from", "_json_state", "with", "a", "sensible", "default", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/__init__.py#L189-L193
train
32,332
MisterWil/abodepy
abodepy/devices/light.py
AbodeLight.set_color_temp
def set_color_temp(self, color_temp): """Set device color.""" if self._json_state['control_url']: url = CONST.INTEGRATIONS_URL + self._device_uuid color_data = { 'action': 'setcolortemperature', 'colorTemperature': int(color_temp) } response = self._abode.send_request("post", url, data=color_data) response_object = json.loads(response.text) _LOGGER.debug("Set Color Temp Response: %s", response.text) if response_object['idForPanel'] != self.device_id: raise AbodeException((ERROR.SET_STATUS_DEV_ID)) if response_object['colorTemperature'] != int(color_temp): _LOGGER.warning( ("Set color temp mismatch for device %s. " "Request val: %s, Response val: %s "), self.device_id, color_temp, response_object['colorTemperature']) self.update(response_object) _LOGGER.info("Set device %s color_temp to: %s", self.device_id, color_temp) return True return False
python
def set_color_temp(self, color_temp): """Set device color.""" if self._json_state['control_url']: url = CONST.INTEGRATIONS_URL + self._device_uuid color_data = { 'action': 'setcolortemperature', 'colorTemperature': int(color_temp) } response = self._abode.send_request("post", url, data=color_data) response_object = json.loads(response.text) _LOGGER.debug("Set Color Temp Response: %s", response.text) if response_object['idForPanel'] != self.device_id: raise AbodeException((ERROR.SET_STATUS_DEV_ID)) if response_object['colorTemperature'] != int(color_temp): _LOGGER.warning( ("Set color temp mismatch for device %s. " "Request val: %s, Response val: %s "), self.device_id, color_temp, response_object['colorTemperature']) self.update(response_object) _LOGGER.info("Set device %s color_temp to: %s", self.device_id, color_temp) return True return False
[ "def", "set_color_temp", "(", "self", ",", "color_temp", ")", ":", "if", "self", ".", "_json_state", "[", "'control_url'", "]", ":", "url", "=", "CONST", ".", "INTEGRATIONS_URL", "+", "self", ".", "_device_uuid", "color_data", "=", "{", "'action'", ":", "'...
Set device color.
[ "Set", "device", "color", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/light.py#L18-L49
train
32,333
MisterWil/abodepy
abodepy/devices/light.py
AbodeLight.color
def color(self): """Get light color.""" return (self.get_value(CONST.STATUSES_KEY).get('hue'), self.get_value(CONST.STATUSES_KEY).get('saturation'))
python
def color(self): """Get light color.""" return (self.get_value(CONST.STATUSES_KEY).get('hue'), self.get_value(CONST.STATUSES_KEY).get('saturation'))
[ "def", "color", "(", "self", ")", ":", "return", "(", "self", ".", "get_value", "(", "CONST", ".", "STATUSES_KEY", ")", ".", "get", "(", "'hue'", ")", ",", "self", ".", "get_value", "(", "CONST", ".", "STATUSES_KEY", ")", ".", "get", "(", "'saturatio...
Get light color.
[ "Get", "light", "color", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/light.py#L97-L100
train
32,334
MisterWil/abodepy
abodepy/devices/light.py
AbodeLight.has_color
def has_color(self): """Device is using color mode.""" if (self.get_value(CONST.STATUSES_KEY).get('color_mode') == str(CONST.COLOR_MODE_ON)): return True return False
python
def has_color(self): """Device is using color mode.""" if (self.get_value(CONST.STATUSES_KEY).get('color_mode') == str(CONST.COLOR_MODE_ON)): return True return False
[ "def", "has_color", "(", "self", ")", ":", "if", "(", "self", ".", "get_value", "(", "CONST", ".", "STATUSES_KEY", ")", ".", "get", "(", "'color_mode'", ")", "==", "str", "(", "CONST", ".", "COLOR_MODE_ON", ")", ")", ":", "return", "True", "return", ...
Device is using color mode.
[ "Device", "is", "using", "color", "mode", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/light.py#L108-L113
train
32,335
MisterWil/abodepy
abodepy/devices/camera.py
AbodeCamera.capture
def capture(self): """Request a new camera image.""" url = str.replace(CONST.CAMS_ID_CAPTURE_URL, '$DEVID$', self.device_id) try: response = self._abode.send_request("put", url) _LOGGER.debug("Capture image response: %s", response.text) return True except AbodeException as exc: _LOGGER.warning("Failed to capture image: %s", exc) return False
python
def capture(self): """Request a new camera image.""" url = str.replace(CONST.CAMS_ID_CAPTURE_URL, '$DEVID$', self.device_id) try: response = self._abode.send_request("put", url) _LOGGER.debug("Capture image response: %s", response.text) return True except AbodeException as exc: _LOGGER.warning("Failed to capture image: %s", exc) return False
[ "def", "capture", "(", "self", ")", ":", "url", "=", "str", ".", "replace", "(", "CONST", ".", "CAMS_ID_CAPTURE_URL", ",", "'$DEVID$'", ",", "self", ".", "device_id", ")", "try", ":", "response", "=", "self", ".", "_abode", ".", "send_request", "(", "\...
Request a new camera image.
[ "Request", "a", "new", "camera", "image", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/camera.py#L24-L38
train
32,336
MisterWil/abodepy
abodepy/devices/camera.py
AbodeCamera.refresh_image
def refresh_image(self): """Get the most recent camera image.""" url = str.replace(CONST.TIMELINE_IMAGES_ID_URL, '$DEVID$', self.device_id) response = self._abode.send_request("get", url) _LOGGER.debug("Get image response: %s", response.text) return self.update_image_location(json.loads(response.text))
python
def refresh_image(self): """Get the most recent camera image.""" url = str.replace(CONST.TIMELINE_IMAGES_ID_URL, '$DEVID$', self.device_id) response = self._abode.send_request("get", url) _LOGGER.debug("Get image response: %s", response.text) return self.update_image_location(json.loads(response.text))
[ "def", "refresh_image", "(", "self", ")", ":", "url", "=", "str", ".", "replace", "(", "CONST", ".", "TIMELINE_IMAGES_ID_URL", ",", "'$DEVID$'", ",", "self", ".", "device_id", ")", "response", "=", "self", ".", "_abode", ".", "send_request", "(", "\"get\""...
Get the most recent camera image.
[ "Get", "the", "most", "recent", "camera", "image", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/camera.py#L40-L48
train
32,337
MisterWil/abodepy
abodepy/devices/camera.py
AbodeCamera.update_image_location
def update_image_location(self, timeline_json): """Update the image location.""" if not timeline_json: return False # If we get a list of objects back (likely) # then we just want the first one as it should be the "newest" if isinstance(timeline_json, (tuple, list)): timeline_json = timeline_json[0] # Verify that the event code is of the "CAPTURE IMAGE" event event_code = timeline_json.get('event_code') if event_code != TIMELINE.CAPTURE_IMAGE['event_code']: raise AbodeException((ERROR.CAM_TIMELINE_EVENT_INVALID)) # The timeline response has an entry for "file_path" that acts as the # location of the image within the Abode servers. file_path = timeline_json.get('file_path') if not file_path: raise AbodeException((ERROR.CAM_IMAGE_REFRESH_NO_FILE)) # Perform a "head" request for the image and look for a # 302 Found response url = CONST.BASE_URL + file_path response = self._abode.send_request("head", url) if response.status_code != 302: _LOGGER.warning("Unexected response code %s with body: %s", str(response.status_code), response.text) raise AbodeException((ERROR.CAM_IMAGE_UNEXPECTED_RESPONSE)) # The response should have a location header that is the actual # location of the image stored on AWS location = response.headers.get('location') if not location: raise AbodeException((ERROR.CAM_IMAGE_NO_LOCATION_HEADER)) self._image_url = location return True
python
def update_image_location(self, timeline_json): """Update the image location.""" if not timeline_json: return False # If we get a list of objects back (likely) # then we just want the first one as it should be the "newest" if isinstance(timeline_json, (tuple, list)): timeline_json = timeline_json[0] # Verify that the event code is of the "CAPTURE IMAGE" event event_code = timeline_json.get('event_code') if event_code != TIMELINE.CAPTURE_IMAGE['event_code']: raise AbodeException((ERROR.CAM_TIMELINE_EVENT_INVALID)) # The timeline response has an entry for "file_path" that acts as the # location of the image within the Abode servers. file_path = timeline_json.get('file_path') if not file_path: raise AbodeException((ERROR.CAM_IMAGE_REFRESH_NO_FILE)) # Perform a "head" request for the image and look for a # 302 Found response url = CONST.BASE_URL + file_path response = self._abode.send_request("head", url) if response.status_code != 302: _LOGGER.warning("Unexected response code %s with body: %s", str(response.status_code), response.text) raise AbodeException((ERROR.CAM_IMAGE_UNEXPECTED_RESPONSE)) # The response should have a location header that is the actual # location of the image stored on AWS location = response.headers.get('location') if not location: raise AbodeException((ERROR.CAM_IMAGE_NO_LOCATION_HEADER)) self._image_url = location return True
[ "def", "update_image_location", "(", "self", ",", "timeline_json", ")", ":", "if", "not", "timeline_json", ":", "return", "False", "# If we get a list of objects back (likely)", "# then we just want the first one as it should be the \"newest\"", "if", "isinstance", "(", "timeli...
Update the image location.
[ "Update", "the", "image", "location", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/camera.py#L50-L89
train
32,338
MisterWil/abodepy
abodepy/devices/camera.py
AbodeCamera.image_to_file
def image_to_file(self, path, get_image=True): """Write the image to a file.""" if not self.image_url or get_image: if not self.refresh_image(): return False response = requests.get(self.image_url, stream=True) if response.status_code != 200: _LOGGER.warning( "Unexpected response code %s when requesting image: %s", str(response.status_code), response.text) raise AbodeException((ERROR.CAM_IMAGE_REQUEST_INVALID)) with open(path, 'wb') as imgfile: copyfileobj(response.raw, imgfile) return True
python
def image_to_file(self, path, get_image=True): """Write the image to a file.""" if not self.image_url or get_image: if not self.refresh_image(): return False response = requests.get(self.image_url, stream=True) if response.status_code != 200: _LOGGER.warning( "Unexpected response code %s when requesting image: %s", str(response.status_code), response.text) raise AbodeException((ERROR.CAM_IMAGE_REQUEST_INVALID)) with open(path, 'wb') as imgfile: copyfileobj(response.raw, imgfile) return True
[ "def", "image_to_file", "(", "self", ",", "path", ",", "get_image", "=", "True", ")", ":", "if", "not", "self", ".", "image_url", "or", "get_image", ":", "if", "not", "self", ".", "refresh_image", "(", ")", ":", "return", "False", "response", "=", "req...
Write the image to a file.
[ "Write", "the", "image", "to", "a", "file", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/camera.py#L91-L108
train
32,339
MisterWil/abodepy
abodepy/devices/alarm.py
create_alarm
def create_alarm(panel_json, abode, area='1'): """Create a new alarm device from a panel response.""" panel_json['name'] = CONST.ALARM_NAME panel_json['id'] = CONST.ALARM_DEVICE_ID + area panel_json['type'] = CONST.ALARM_TYPE panel_json['type_tag'] = CONST.DEVICE_ALARM panel_json['generic_type'] = CONST.TYPE_ALARM return AbodeAlarm(panel_json, abode, area)
python
def create_alarm(panel_json, abode, area='1'): """Create a new alarm device from a panel response.""" panel_json['name'] = CONST.ALARM_NAME panel_json['id'] = CONST.ALARM_DEVICE_ID + area panel_json['type'] = CONST.ALARM_TYPE panel_json['type_tag'] = CONST.DEVICE_ALARM panel_json['generic_type'] = CONST.TYPE_ALARM return AbodeAlarm(panel_json, abode, area)
[ "def", "create_alarm", "(", "panel_json", ",", "abode", ",", "area", "=", "'1'", ")", ":", "panel_json", "[", "'name'", "]", "=", "CONST", ".", "ALARM_NAME", "panel_json", "[", "'id'", "]", "=", "CONST", ".", "ALARM_DEVICE_ID", "+", "area", "panel_json", ...
Create a new alarm device from a panel response.
[ "Create", "a", "new", "alarm", "device", "from", "a", "panel", "response", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/alarm.py#L14-L22
train
32,340
MisterWil/abodepy
abodepy/devices/alarm.py
AbodeAlarm.set_mode
def set_mode(self, mode): """Set Abode alarm mode.""" if not mode: raise AbodeException(ERROR.MISSING_ALARM_MODE) elif mode.lower() not in CONST.ALL_MODES: raise AbodeException(ERROR.INVALID_ALARM_MODE, CONST.ALL_MODES) mode = mode.lower() response = self._abode.send_request( "put", CONST.get_panel_mode_url(self._area, mode)) _LOGGER.debug("Set Alarm Home Response: %s", response.text) response_object = json.loads(response.text) if response_object['area'] != self._area: raise AbodeException(ERROR.SET_MODE_AREA) if response_object['mode'] != mode: raise AbodeException(ERROR.SET_MODE_MODE) self._json_state['mode'][(self.device_id)] = response_object['mode'] _LOGGER.info("Set alarm %s mode to: %s", self._device_id, response_object['mode']) return True
python
def set_mode(self, mode): """Set Abode alarm mode.""" if not mode: raise AbodeException(ERROR.MISSING_ALARM_MODE) elif mode.lower() not in CONST.ALL_MODES: raise AbodeException(ERROR.INVALID_ALARM_MODE, CONST.ALL_MODES) mode = mode.lower() response = self._abode.send_request( "put", CONST.get_panel_mode_url(self._area, mode)) _LOGGER.debug("Set Alarm Home Response: %s", response.text) response_object = json.loads(response.text) if response_object['area'] != self._area: raise AbodeException(ERROR.SET_MODE_AREA) if response_object['mode'] != mode: raise AbodeException(ERROR.SET_MODE_MODE) self._json_state['mode'][(self.device_id)] = response_object['mode'] _LOGGER.info("Set alarm %s mode to: %s", self._device_id, response_object['mode']) return True
[ "def", "set_mode", "(", "self", ",", "mode", ")", ":", "if", "not", "mode", ":", "raise", "AbodeException", "(", "ERROR", ".", "MISSING_ALARM_MODE", ")", "elif", "mode", ".", "lower", "(", ")", "not", "in", "CONST", ".", "ALL_MODES", ":", "raise", "Abo...
Set Abode alarm mode.
[ "Set", "Abode", "alarm", "mode", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/alarm.py#L33-L60
train
32,341
MisterWil/abodepy
abodepy/devices/alarm.py
AbodeAlarm.refresh
def refresh(self, url=CONST.PANEL_URL): """Refresh the alarm device.""" response_object = AbodeDevice.refresh(self, url) # pylint: disable=W0212 self._abode._panel.update(response_object[0]) return response_object
python
def refresh(self, url=CONST.PANEL_URL): """Refresh the alarm device.""" response_object = AbodeDevice.refresh(self, url) # pylint: disable=W0212 self._abode._panel.update(response_object[0]) return response_object
[ "def", "refresh", "(", "self", ",", "url", "=", "CONST", ".", "PANEL_URL", ")", ":", "response_object", "=", "AbodeDevice", ".", "refresh", "(", "self", ",", "url", ")", "# pylint: disable=W0212", "self", ".", "_abode", ".", "_panel", ".", "update", "(", ...
Refresh the alarm device.
[ "Refresh", "the", "alarm", "device", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/alarm.py#L82-L88
train
32,342
MisterWil/abodepy
abodepy/devices/alarm.py
AbodeAlarm.mode
def mode(self): """Get alarm mode.""" mode = self.get_value('mode').get(self.device_id, None) return mode.lower()
python
def mode(self): """Get alarm mode.""" mode = self.get_value('mode').get(self.device_id, None) return mode.lower()
[ "def", "mode", "(", "self", ")", ":", "mode", "=", "self", ".", "get_value", "(", "'mode'", ")", ".", "get", "(", "self", ".", "device_id", ",", "None", ")", "return", "mode", ".", "lower", "(", ")" ]
Get alarm mode.
[ "Get", "alarm", "mode", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/devices/alarm.py#L111-L115
train
32,343
MisterWil/abodepy
abodepy/utils.py
update
def update(dct, dct_merge): """Recursively merge dicts.""" for key, value in dct_merge.items(): if key in dct and isinstance(dct[key], dict): dct[key] = update(dct[key], value) else: dct[key] = value return dct
python
def update(dct, dct_merge): """Recursively merge dicts.""" for key, value in dct_merge.items(): if key in dct and isinstance(dct[key], dict): dct[key] = update(dct[key], value) else: dct[key] = value return dct
[ "def", "update", "(", "dct", ",", "dct_merge", ")", ":", "for", "key", ",", "value", "in", "dct_merge", ".", "items", "(", ")", ":", "if", "key", "in", "dct", "and", "isinstance", "(", "dct", "[", "key", "]", ",", "dict", ")", ":", "dct", "[", ...
Recursively merge dicts.
[ "Recursively", "merge", "dicts", "." ]
6f84bb428fd1da98855f55083cd427bebbcc57ae
https://github.com/MisterWil/abodepy/blob/6f84bb428fd1da98855f55083cd427bebbcc57ae/abodepy/utils.py#L23-L30
train
32,344
Akrog/cinderlib
cinderlib/nos_brick.py
RBDConnector.check_valid_device
def check_valid_device(self, path, run_as_root=True): """Verify an existing RBD handle is connected and valid.""" if self.im_root: try: with open(path, 'r') as f: f.read(4096) except Exception: return False return True try: self._execute('dd', 'if=' + path, 'of=/dev/null', 'bs=4096', 'count=1', root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError: return False return True
python
def check_valid_device(self, path, run_as_root=True): """Verify an existing RBD handle is connected and valid.""" if self.im_root: try: with open(path, 'r') as f: f.read(4096) except Exception: return False return True try: self._execute('dd', 'if=' + path, 'of=/dev/null', 'bs=4096', 'count=1', root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError: return False return True
[ "def", "check_valid_device", "(", "self", ",", "path", ",", "run_as_root", "=", "True", ")", ":", "if", "self", ".", "im_root", ":", "try", ":", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "f", ".", "read", "(", "4096", ")", "ex...
Verify an existing RBD handle is connected and valid.
[ "Verify", "an", "existing", "RBD", "handle", "is", "connected", "and", "valid", "." ]
6481cd9a34744f80bdba130fe9089f1b8b7cb327
https://github.com/Akrog/cinderlib/blob/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/nos_brick.py#L114-L130
train
32,345
Akrog/cinderlib
cinderlib/persistence/__init__.py
setup
def setup(config): """Setup persistence to be used in cinderlib. By default memory persistance will be used, but there are other mechanisms available and other ways to use custom mechanisms: - Persistence plugins: Plugin mechanism uses Python entrypoints under namespace cinderlib.persistence.storage, and cinderlib comes with 3 different mechanisms, "memory", "dbms", and "memory_dbms". To use any of these one must pass the string name in the storage parameter and any other configuration as keyword arguments. - Passing a class that inherits from PersistenceDriverBase as storage parameter and initialization parameters as keyword arguments. - Passing an instance that inherits from PersistenceDriverBase as storage parameter. """ if config is None: config = {} else: config = config.copy() # Prevent driver dynamic loading clearing configuration options volume_cmd.CONF._ConfigOpts__cache = MyDict() # Default configuration is using memory storage storage = config.pop('storage', None) or DEFAULT_STORAGE if isinstance(storage, base.PersistenceDriverBase): return storage if inspect.isclass(storage) and issubclass(storage, base.PersistenceDriverBase): return storage(**config) if not isinstance(storage, six.string_types): raise exception.InvalidPersistence(storage) persistence_driver = driver.DriverManager( namespace='cinderlib.persistence.storage', name=storage, invoke_on_load=True, invoke_kwds=config, ) return persistence_driver.driver
python
def setup(config): """Setup persistence to be used in cinderlib. By default memory persistance will be used, but there are other mechanisms available and other ways to use custom mechanisms: - Persistence plugins: Plugin mechanism uses Python entrypoints under namespace cinderlib.persistence.storage, and cinderlib comes with 3 different mechanisms, "memory", "dbms", and "memory_dbms". To use any of these one must pass the string name in the storage parameter and any other configuration as keyword arguments. - Passing a class that inherits from PersistenceDriverBase as storage parameter and initialization parameters as keyword arguments. - Passing an instance that inherits from PersistenceDriverBase as storage parameter. """ if config is None: config = {} else: config = config.copy() # Prevent driver dynamic loading clearing configuration options volume_cmd.CONF._ConfigOpts__cache = MyDict() # Default configuration is using memory storage storage = config.pop('storage', None) or DEFAULT_STORAGE if isinstance(storage, base.PersistenceDriverBase): return storage if inspect.isclass(storage) and issubclass(storage, base.PersistenceDriverBase): return storage(**config) if not isinstance(storage, six.string_types): raise exception.InvalidPersistence(storage) persistence_driver = driver.DriverManager( namespace='cinderlib.persistence.storage', name=storage, invoke_on_load=True, invoke_kwds=config, ) return persistence_driver.driver
[ "def", "setup", "(", "config", ")", ":", "if", "config", "is", "None", ":", "config", "=", "{", "}", "else", ":", "config", "=", "config", ".", "copy", "(", ")", "# Prevent driver dynamic loading clearing configuration options", "volume_cmd", ".", "CONF", ".",...
Setup persistence to be used in cinderlib. By default memory persistance will be used, but there are other mechanisms available and other ways to use custom mechanisms: - Persistence plugins: Plugin mechanism uses Python entrypoints under namespace cinderlib.persistence.storage, and cinderlib comes with 3 different mechanisms, "memory", "dbms", and "memory_dbms". To use any of these one must pass the string name in the storage parameter and any other configuration as keyword arguments. - Passing a class that inherits from PersistenceDriverBase as storage parameter and initialization parameters as keyword arguments. - Passing an instance that inherits from PersistenceDriverBase as storage parameter.
[ "Setup", "persistence", "to", "be", "used", "in", "cinderlib", "." ]
6481cd9a34744f80bdba130fe9089f1b8b7cb327
https://github.com/Akrog/cinderlib/blob/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/persistence/__init__.py#L43-L85
train
32,346
Akrog/cinderlib
cinderlib/cinderlib.py
Backend._transform_legacy_stats
def _transform_legacy_stats(self, stats): """Convert legacy stats to new stats with pools key.""" # Fill pools for legacy driver reports if stats and 'pools' not in stats: pool = stats.copy() pool['pool_name'] = self.id for key in ('driver_version', 'shared_targets', 'sparse_copy_volume', 'storage_protocol', 'vendor_name', 'volume_backend_name'): pool.pop(key, None) stats['pools'] = [pool] return stats
python
def _transform_legacy_stats(self, stats): """Convert legacy stats to new stats with pools key.""" # Fill pools for legacy driver reports if stats and 'pools' not in stats: pool = stats.copy() pool['pool_name'] = self.id for key in ('driver_version', 'shared_targets', 'sparse_copy_volume', 'storage_protocol', 'vendor_name', 'volume_backend_name'): pool.pop(key, None) stats['pools'] = [pool] return stats
[ "def", "_transform_legacy_stats", "(", "self", ",", "stats", ")", ":", "# Fill pools for legacy driver reports", "if", "stats", "and", "'pools'", "not", "in", "stats", ":", "pool", "=", "stats", ".", "copy", "(", ")", "pool", "[", "'pool_name'", "]", "=", "s...
Convert legacy stats to new stats with pools key.
[ "Convert", "legacy", "stats", "to", "new", "stats", "with", "pools", "key", "." ]
6481cd9a34744f80bdba130fe9089f1b8b7cb327
https://github.com/Akrog/cinderlib/blob/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/cinderlib.py#L130-L141
train
32,347
Akrog/cinderlib
cinderlib/cinderlib.py
Backend._config_parse
def _config_parse(self): """Replacer oslo_config.cfg.ConfigParser.parse for in-memory cfg.""" res = super(cfg.ConfigParser, self).parse(Backend._config_string_io) return res
python
def _config_parse(self): """Replacer oslo_config.cfg.ConfigParser.parse for in-memory cfg.""" res = super(cfg.ConfigParser, self).parse(Backend._config_string_io) return res
[ "def", "_config_parse", "(", "self", ")", ":", "res", "=", "super", "(", "cfg", ".", "ConfigParser", ",", "self", ")", ".", "parse", "(", "Backend", ".", "_config_string_io", ")", "return", "res" ]
Replacer oslo_config.cfg.ConfigParser.parse for in-memory cfg.
[ "Replacer", "oslo_config", ".", "cfg", ".", "ConfigParser", ".", "parse", "for", "in", "-", "memory", "cfg", "." ]
6481cd9a34744f80bdba130fe9089f1b8b7cb327
https://github.com/Akrog/cinderlib/blob/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/cinderlib.py#L196-L199
train
32,348
Akrog/cinderlib
cinderlib/cinderlib.py
Backend._update_cinder_config
def _update_cinder_config(cls): """Parse in-memory file to update OSLO configuration used by Cinder.""" cls._config_string_io.seek(0) cls._parser.write(cls._config_string_io) # Check if we have any multiopt cls._config_string_io.seek(0) current_cfg = cls._config_string_io.read() if '\n\t' in current_cfg: cls._config_string_io.seek(0) cls._config_string_io.write(current_cfg.replace('\n\t', '\n')) cls._config_string_io.seek(0) cfg.CONF.reload_config_files()
python
def _update_cinder_config(cls): """Parse in-memory file to update OSLO configuration used by Cinder.""" cls._config_string_io.seek(0) cls._parser.write(cls._config_string_io) # Check if we have any multiopt cls._config_string_io.seek(0) current_cfg = cls._config_string_io.read() if '\n\t' in current_cfg: cls._config_string_io.seek(0) cls._config_string_io.write(current_cfg.replace('\n\t', '\n')) cls._config_string_io.seek(0) cfg.CONF.reload_config_files()
[ "def", "_update_cinder_config", "(", "cls", ")", ":", "cls", ".", "_config_string_io", ".", "seek", "(", "0", ")", "cls", ".", "_parser", ".", "write", "(", "cls", ".", "_config_string_io", ")", "# Check if we have any multiopt", "cls", ".", "_config_string_io",...
Parse in-memory file to update OSLO configuration used by Cinder.
[ "Parse", "in", "-", "memory", "file", "to", "update", "OSLO", "configuration", "used", "by", "Cinder", "." ]
6481cd9a34744f80bdba130fe9089f1b8b7cb327
https://github.com/Akrog/cinderlib/blob/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/cinderlib.py#L202-L215
train
32,349
Akrog/cinderlib
cinderlib/cinderlib.py
Backend._set_cinder_config
def _set_cinder_config(cls, host, locks_path, cinder_config_params): """Setup the parser with all the known Cinder configuration.""" cfg.CONF.set_default('state_path', os.getcwd()) cfg.CONF.set_default('lock_path', '$state_path', 'oslo_concurrency') cls._parser = six.moves.configparser.SafeConfigParser() cls._parser.set('DEFAULT', 'enabled_backends', '') if locks_path: cls._parser.add_section('oslo_concurrency') cls._parser.set('oslo_concurrency', 'lock_path', locks_path) cls._parser.add_section('coordination') cls._parser.set('coordination', 'backend_url', 'file://' + locks_path) if host: cls._parser.set('DEFAULT', 'host', host) # All other configuration options go into the DEFAULT section cls.__set_parser_kv(cinder_config_params, 'DEFAULT') # We replace the OSLO's default parser to read from a StringIO instead # of reading from a file. cls._config_string_io = six.moves.StringIO() cfg.ConfigParser.parse = six.create_unbound_method(cls._config_parse, cfg.ConfigParser) # Replace command line arg parser so we ignore caller's args cfg._CachedArgumentParser.parse_args = lambda *a, **kw: None # Update the configuration with the options we have configured cfg.CONF(project='cinder', version=cinderlib.__version__, default_config_files=['in_memory_file']) cls._update_cinder_config()
python
def _set_cinder_config(cls, host, locks_path, cinder_config_params): """Setup the parser with all the known Cinder configuration.""" cfg.CONF.set_default('state_path', os.getcwd()) cfg.CONF.set_default('lock_path', '$state_path', 'oslo_concurrency') cls._parser = six.moves.configparser.SafeConfigParser() cls._parser.set('DEFAULT', 'enabled_backends', '') if locks_path: cls._parser.add_section('oslo_concurrency') cls._parser.set('oslo_concurrency', 'lock_path', locks_path) cls._parser.add_section('coordination') cls._parser.set('coordination', 'backend_url', 'file://' + locks_path) if host: cls._parser.set('DEFAULT', 'host', host) # All other configuration options go into the DEFAULT section cls.__set_parser_kv(cinder_config_params, 'DEFAULT') # We replace the OSLO's default parser to read from a StringIO instead # of reading from a file. cls._config_string_io = six.moves.StringIO() cfg.ConfigParser.parse = six.create_unbound_method(cls._config_parse, cfg.ConfigParser) # Replace command line arg parser so we ignore caller's args cfg._CachedArgumentParser.parse_args = lambda *a, **kw: None # Update the configuration with the options we have configured cfg.CONF(project='cinder', version=cinderlib.__version__, default_config_files=['in_memory_file']) cls._update_cinder_config()
[ "def", "_set_cinder_config", "(", "cls", ",", "host", ",", "locks_path", ",", "cinder_config_params", ")", ":", "cfg", ".", "CONF", ".", "set_default", "(", "'state_path'", ",", "os", ".", "getcwd", "(", ")", ")", "cfg", ".", "CONF", ".", "set_default", ...
Setup the parser with all the known Cinder configuration.
[ "Setup", "the", "parser", "with", "all", "the", "known", "Cinder", "configuration", "." ]
6481cd9a34744f80bdba130fe9089f1b8b7cb327
https://github.com/Akrog/cinderlib/blob/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/cinderlib.py#L218-L251
train
32,350
Akrog/cinderlib
cinderlib/cinderlib.py
Backend.list_supported_drivers
def list_supported_drivers(): """Returns dictionary with driver classes names as keys.""" def convert_oslo_config(oslo_options): options = [] for opt in oslo_options: tmp_dict = {k: str(v) for k, v in vars(opt).items() if not k.startswith('_')} options.append(tmp_dict) return options def list_drivers(queue): cwd = os.getcwd() # Go to the parent directory directory where Cinder is installed os.chdir(utils.__file__.rsplit(os.sep, 2)[0]) try: drivers = cinder_interface_util.get_volume_drivers() mapping = {d.class_name: vars(d) for d in drivers} # Drivers contain class instances which are not serializable for driver in mapping.values(): driver.pop('cls', None) if 'driver_options' in driver: driver['driver_options'] = convert_oslo_config( driver['driver_options']) finally: os.chdir(cwd) queue.put(mapping) # Use a different process to avoid having all driver classes loaded in # memory during our execution. queue = multiprocessing.Queue() p = multiprocessing.Process(target=list_drivers, args=(queue,)) p.start() result = queue.get() p.join() return result
python
def list_supported_drivers(): """Returns dictionary with driver classes names as keys.""" def convert_oslo_config(oslo_options): options = [] for opt in oslo_options: tmp_dict = {k: str(v) for k, v in vars(opt).items() if not k.startswith('_')} options.append(tmp_dict) return options def list_drivers(queue): cwd = os.getcwd() # Go to the parent directory directory where Cinder is installed os.chdir(utils.__file__.rsplit(os.sep, 2)[0]) try: drivers = cinder_interface_util.get_volume_drivers() mapping = {d.class_name: vars(d) for d in drivers} # Drivers contain class instances which are not serializable for driver in mapping.values(): driver.pop('cls', None) if 'driver_options' in driver: driver['driver_options'] = convert_oslo_config( driver['driver_options']) finally: os.chdir(cwd) queue.put(mapping) # Use a different process to avoid having all driver classes loaded in # memory during our execution. queue = multiprocessing.Queue() p = multiprocessing.Process(target=list_drivers, args=(queue,)) p.start() result = queue.get() p.join() return result
[ "def", "list_supported_drivers", "(", ")", ":", "def", "convert_oslo_config", "(", "oslo_options", ")", ":", "options", "=", "[", "]", "for", "opt", "in", "oslo_options", ":", "tmp_dict", "=", "{", "k", ":", "str", "(", "v", ")", "for", "k", ",", "v", ...
Returns dictionary with driver classes names as keys.
[ "Returns", "dictionary", "with", "driver", "classes", "names", "as", "keys", "." ]
6481cd9a34744f80bdba130fe9089f1b8b7cb327
https://github.com/Akrog/cinderlib/blob/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/cinderlib.py#L393-L428
train
32,351
Akrog/cinderlib
cinderlib/serialization.py
load
def load(json_src, save=False): """Load any json serialized cinderlib object.""" if isinstance(json_src, six.string_types): json_src = json_lib.loads(json_src) if isinstance(json_src, list): return [getattr(objects, obj['class']).load(obj, save) for obj in json_src] return getattr(objects, json_src['class']).load(json_src, save)
python
def load(json_src, save=False): """Load any json serialized cinderlib object.""" if isinstance(json_src, six.string_types): json_src = json_lib.loads(json_src) if isinstance(json_src, list): return [getattr(objects, obj['class']).load(obj, save) for obj in json_src] return getattr(objects, json_src['class']).load(json_src, save)
[ "def", "load", "(", "json_src", ",", "save", "=", "False", ")", ":", "if", "isinstance", "(", "json_src", ",", "six", ".", "string_types", ")", ":", "json_src", "=", "json_lib", ".", "loads", "(", "json_src", ")", "if", "isinstance", "(", "json_src", "...
Load any json serialized cinderlib object.
[ "Load", "any", "json", "serialized", "cinderlib", "object", "." ]
6481cd9a34744f80bdba130fe9089f1b8b7cb327
https://github.com/Akrog/cinderlib/blob/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/serialization.py#L157-L166
train
32,352
zblz/naima
naima/model_utils.py
memoize
def memoize(func): """ Cache decorator for functions inside model classes """ def model(cls, energy, *args, **kwargs): try: memoize = cls._memoize cache = cls._cache queue = cls._queue except AttributeError: memoize = False if memoize: # Allow for dicts or tables with energy column, Quantity array or # Quantity scalar try: with warnings.catch_warnings(): warnings.simplefilter( "ignore", getattr(np, "VisibleDeprecationWarning", None), ) energy = u.Quantity(energy["energy"]) except (TypeError, ValueError, IndexError): pass try: # tostring is 10 times faster than str(array).encode() bstr = energy.value.tostring() except AttributeError: # scalar Quantity bstr = str(energy.value).encode() data = [hashlib.sha256(bstr).hexdigest()] data.append(energy.unit.to_string()) data.append(str(kwargs.get("distance", 0))) if args: data.append(str(args)) if hasattr(cls, "particle_distribution"): models = [cls, cls.particle_distribution] else: models = [cls] for model in models: if hasattr(model, "param_names"): for par in model.param_names: data.append(str(getattr(model, par))) token = "".join(data) digest = hashlib.sha256(token.encode()).hexdigest() if digest in cache: return cache[digest] result = func(cls, energy, *args, **kwargs) if memoize: # remove first item in queue and remove from cache if len(queue) > 16: key = queue.pop(0) cache.pop(key, None) # save last result to cache queue.append(digest) cache[digest] = result return result model.__name__ = func.__name__ model.__doc__ = func.__doc__ return model
python
def memoize(func): """ Cache decorator for functions inside model classes """ def model(cls, energy, *args, **kwargs): try: memoize = cls._memoize cache = cls._cache queue = cls._queue except AttributeError: memoize = False if memoize: # Allow for dicts or tables with energy column, Quantity array or # Quantity scalar try: with warnings.catch_warnings(): warnings.simplefilter( "ignore", getattr(np, "VisibleDeprecationWarning", None), ) energy = u.Quantity(energy["energy"]) except (TypeError, ValueError, IndexError): pass try: # tostring is 10 times faster than str(array).encode() bstr = energy.value.tostring() except AttributeError: # scalar Quantity bstr = str(energy.value).encode() data = [hashlib.sha256(bstr).hexdigest()] data.append(energy.unit.to_string()) data.append(str(kwargs.get("distance", 0))) if args: data.append(str(args)) if hasattr(cls, "particle_distribution"): models = [cls, cls.particle_distribution] else: models = [cls] for model in models: if hasattr(model, "param_names"): for par in model.param_names: data.append(str(getattr(model, par))) token = "".join(data) digest = hashlib.sha256(token.encode()).hexdigest() if digest in cache: return cache[digest] result = func(cls, energy, *args, **kwargs) if memoize: # remove first item in queue and remove from cache if len(queue) > 16: key = queue.pop(0) cache.pop(key, None) # save last result to cache queue.append(digest) cache[digest] = result return result model.__name__ = func.__name__ model.__doc__ = func.__doc__ return model
[ "def", "memoize", "(", "func", ")", ":", "def", "model", "(", "cls", ",", "energy", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "memoize", "=", "cls", ".", "_memoize", "cache", "=", "cls", ".", "_cache", "queue", "=", "cls", ...
Cache decorator for functions inside model classes
[ "Cache", "decorator", "for", "functions", "inside", "model", "classes" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/model_utils.py#L15-L84
train
32,353
zblz/naima
examples/RXJ1713_SynIC.py
lnprior
def lnprior(pars): """ Return probability of parameter values according to prior knowledge. Parameter limits should be done here through uniform prior ditributions """ # Limit norm and B to be positive logprob = ( naima.uniform_prior(pars[0], 0.0, np.inf) + naima.uniform_prior(pars[1], -1, 5) + naima.uniform_prior(pars[3], 0, np.inf) ) return logprob
python
def lnprior(pars): """ Return probability of parameter values according to prior knowledge. Parameter limits should be done here through uniform prior ditributions """ # Limit norm and B to be positive logprob = ( naima.uniform_prior(pars[0], 0.0, np.inf) + naima.uniform_prior(pars[1], -1, 5) + naima.uniform_prior(pars[3], 0, np.inf) ) return logprob
[ "def", "lnprior", "(", "pars", ")", ":", "# Limit norm and B to be positive", "logprob", "=", "(", "naima", ".", "uniform_prior", "(", "pars", "[", "0", "]", ",", "0.0", ",", "np", ".", "inf", ")", "+", "naima", ".", "uniform_prior", "(", "pars", "[", ...
Return probability of parameter values according to prior knowledge. Parameter limits should be done here through uniform prior ditributions
[ "Return", "probability", "of", "parameter", "values", "according", "to", "prior", "knowledge", ".", "Parameter", "limits", "should", "be", "done", "here", "through", "uniform", "prior", "ditributions" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/examples/RXJ1713_SynIC.py#L56-L68
train
32,354
zblz/naima
naima/core.py
normal_prior
def normal_prior(value, mean, sigma): """Normal prior distribution. """ return -0.5 * (2 * np.pi * sigma) - (value - mean) ** 2 / (2.0 * sigma)
python
def normal_prior(value, mean, sigma): """Normal prior distribution. """ return -0.5 * (2 * np.pi * sigma) - (value - mean) ** 2 / (2.0 * sigma)
[ "def", "normal_prior", "(", "value", ",", "mean", ",", "sigma", ")", ":", "return", "-", "0.5", "*", "(", "2", "*", "np", ".", "pi", "*", "sigma", ")", "-", "(", "value", "-", "mean", ")", "**", "2", "/", "(", "2.0", "*", "sigma", ")" ]
Normal prior distribution.
[ "Normal", "prior", "distribution", "." ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/core.py#L44-L47
train
32,355
zblz/naima
naima/core.py
log_uniform_prior
def log_uniform_prior(value, umin=0, umax=None): """Log-uniform prior distribution. """ if value > 0 and value >= umin: if umax is not None: if value <= umax: return 1 / value else: return -np.inf else: return 1 / value else: return -np.inf
python
def log_uniform_prior(value, umin=0, umax=None): """Log-uniform prior distribution. """ if value > 0 and value >= umin: if umax is not None: if value <= umax: return 1 / value else: return -np.inf else: return 1 / value else: return -np.inf
[ "def", "log_uniform_prior", "(", "value", ",", "umin", "=", "0", ",", "umax", "=", "None", ")", ":", "if", "value", ">", "0", "and", "value", ">=", "umin", ":", "if", "umax", "is", "not", "None", ":", "if", "value", "<=", "umax", ":", "return", "...
Log-uniform prior distribution.
[ "Log", "-", "uniform", "prior", "distribution", "." ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/core.py#L50-L62
train
32,356
zblz/naima
naima/core.py
run_sampler
def run_sampler(nrun=100, sampler=None, pos=None, **kwargs): """Run an MCMC sampler. If no sampler or initial position vector is provided, extra ``kwargs`` are passed to `get_sampler` to generate a new sampler. Parameters ---------- nrun : int, optional Number of steps to run sampler : :class:`~emcee.EnsembleSampler` instance, optional Sampler. pos : :class:`~numpy.ndarray`, optional A list of initial position vectors for the walkers. It should have dimensions of ``(nwalkers,dim)``, where ``dim`` is the number of free parameters. `emcee.utils.sample_ball` can be used to generate a multidimensional gaussian distribution around a single initial position. Returns ------- sampler : :class:`~emcee.EnsembleSampler` instance Sampler containing the paths of the walkers during the ``nrun`` steps. pos : array List of final position vectors after the run. """ if sampler is None or pos is None: sampler, pos = get_sampler(**kwargs) sampler.run_info["n_run"] = nrun print("\nWalker burn in finished, running {0} steps...".format(nrun)) sampler.reset() sampler, pos = _run_mcmc(sampler, pos, nrun) return sampler, pos
python
def run_sampler(nrun=100, sampler=None, pos=None, **kwargs): """Run an MCMC sampler. If no sampler or initial position vector is provided, extra ``kwargs`` are passed to `get_sampler` to generate a new sampler. Parameters ---------- nrun : int, optional Number of steps to run sampler : :class:`~emcee.EnsembleSampler` instance, optional Sampler. pos : :class:`~numpy.ndarray`, optional A list of initial position vectors for the walkers. It should have dimensions of ``(nwalkers,dim)``, where ``dim`` is the number of free parameters. `emcee.utils.sample_ball` can be used to generate a multidimensional gaussian distribution around a single initial position. Returns ------- sampler : :class:`~emcee.EnsembleSampler` instance Sampler containing the paths of the walkers during the ``nrun`` steps. pos : array List of final position vectors after the run. """ if sampler is None or pos is None: sampler, pos = get_sampler(**kwargs) sampler.run_info["n_run"] = nrun print("\nWalker burn in finished, running {0} steps...".format(nrun)) sampler.reset() sampler, pos = _run_mcmc(sampler, pos, nrun) return sampler, pos
[ "def", "run_sampler", "(", "nrun", "=", "100", ",", "sampler", "=", "None", ",", "pos", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "sampler", "is", "None", "or", "pos", "is", "None", ":", "sampler", ",", "pos", "=", "get_sampler", "(", ...
Run an MCMC sampler. If no sampler or initial position vector is provided, extra ``kwargs`` are passed to `get_sampler` to generate a new sampler. Parameters ---------- nrun : int, optional Number of steps to run sampler : :class:`~emcee.EnsembleSampler` instance, optional Sampler. pos : :class:`~numpy.ndarray`, optional A list of initial position vectors for the walkers. It should have dimensions of ``(nwalkers,dim)``, where ``dim`` is the number of free parameters. `emcee.utils.sample_ball` can be used to generate a multidimensional gaussian distribution around a single initial position. Returns ------- sampler : :class:`~emcee.EnsembleSampler` instance Sampler containing the paths of the walkers during the ``nrun`` steps. pos : array List of final position vectors after the run.
[ "Run", "an", "MCMC", "sampler", "." ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/core.py#L527-L563
train
32,357
projecthamster/hamster-lib
hamster_lib/reports.py
XMLWriter._fact_to_tuple
def _fact_to_tuple(self, fact): """ Convert a ``Fact`` to its normalized tuple. This is where all type conversion for ``Fact`` attributes to strings as well as any normalization happens. Note: Because different writers may require different types, we need to do this individually. Args: fact (hamster_lib.Fact): Fact to be converted. Returns: FactTuple: Tuple representing the original ``Fact``. """ # Fields that may have ``None`` value will be represented by '' if fact.category: category = fact.category.name else: category = '' description = fact.description or '' return FactTuple( start=fact.start.strftime(self.datetime_format), end=fact.end.strftime(self.datetime_format), activity=text_type(fact.activity.name), duration=fact.get_string_delta(format='%M'), category=text_type(category), description=text_type(description), )
python
def _fact_to_tuple(self, fact): """ Convert a ``Fact`` to its normalized tuple. This is where all type conversion for ``Fact`` attributes to strings as well as any normalization happens. Note: Because different writers may require different types, we need to do this individually. Args: fact (hamster_lib.Fact): Fact to be converted. Returns: FactTuple: Tuple representing the original ``Fact``. """ # Fields that may have ``None`` value will be represented by '' if fact.category: category = fact.category.name else: category = '' description = fact.description or '' return FactTuple( start=fact.start.strftime(self.datetime_format), end=fact.end.strftime(self.datetime_format), activity=text_type(fact.activity.name), duration=fact.get_string_delta(format='%M'), category=text_type(category), description=text_type(description), )
[ "def", "_fact_to_tuple", "(", "self", ",", "fact", ")", ":", "# Fields that may have ``None`` value will be represented by ''", "if", "fact", ".", "category", ":", "category", "=", "fact", ".", "category", ".", "name", "else", ":", "category", "=", "''", "descript...
Convert a ``Fact`` to its normalized tuple. This is where all type conversion for ``Fact`` attributes to strings as well as any normalization happens. Note: Because different writers may require different types, we need to do this individually. Args: fact (hamster_lib.Fact): Fact to be converted. Returns: FactTuple: Tuple representing the original ``Fact``.
[ "Convert", "a", "Fact", "to", "its", "normalized", "tuple", "." ]
bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f
https://github.com/projecthamster/hamster-lib/blob/bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f/hamster_lib/reports.py#L294-L326
train
32,358
projecthamster/hamster-lib
hamster_lib/reports.py
XMLWriter._write_fact
def _write_fact(self, fact_tuple): """ Create new fact element and populate attributes. Once the child is prepared append it to ``fact_list``. """ fact = self.document.createElement("fact") fact.setAttribute('start', fact_tuple.start) fact.setAttribute('end', fact_tuple.end) fact.setAttribute('activity', fact_tuple.activity) fact.setAttribute('duration', fact_tuple.duration) fact.setAttribute('category', fact_tuple.category) fact.setAttribute('description', fact_tuple.description) self.fact_list.appendChild(fact)
python
def _write_fact(self, fact_tuple): """ Create new fact element and populate attributes. Once the child is prepared append it to ``fact_list``. """ fact = self.document.createElement("fact") fact.setAttribute('start', fact_tuple.start) fact.setAttribute('end', fact_tuple.end) fact.setAttribute('activity', fact_tuple.activity) fact.setAttribute('duration', fact_tuple.duration) fact.setAttribute('category', fact_tuple.category) fact.setAttribute('description', fact_tuple.description) self.fact_list.appendChild(fact)
[ "def", "_write_fact", "(", "self", ",", "fact_tuple", ")", ":", "fact", "=", "self", ".", "document", ".", "createElement", "(", "\"fact\"", ")", "fact", ".", "setAttribute", "(", "'start'", ",", "fact_tuple", ".", "start", ")", "fact", ".", "setAttribute"...
Create new fact element and populate attributes. Once the child is prepared append it to ``fact_list``.
[ "Create", "new", "fact", "element", "and", "populate", "attributes", "." ]
bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f
https://github.com/projecthamster/hamster-lib/blob/bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f/hamster_lib/reports.py#L328-L341
train
32,359
projecthamster/hamster-lib
hamster_lib/reports.py
XMLWriter._close
def _close(self): """ Append the xml fact list to the main document write file and cleanup. ``toxml`` should take care of encoding everything with UTF-8. """ self.document.appendChild(self.fact_list) self.file.write(self.document.toxml(encoding='utf-8')) return super(XMLWriter, self)._close()
python
def _close(self): """ Append the xml fact list to the main document write file and cleanup. ``toxml`` should take care of encoding everything with UTF-8. """ self.document.appendChild(self.fact_list) self.file.write(self.document.toxml(encoding='utf-8')) return super(XMLWriter, self)._close()
[ "def", "_close", "(", "self", ")", ":", "self", ".", "document", ".", "appendChild", "(", "self", ".", "fact_list", ")", "self", ".", "file", ".", "write", "(", "self", ".", "document", ".", "toxml", "(", "encoding", "=", "'utf-8'", ")", ")", "return...
Append the xml fact list to the main document write file and cleanup. ``toxml`` should take care of encoding everything with UTF-8.
[ "Append", "the", "xml", "fact", "list", "to", "the", "main", "document", "write", "file", "and", "cleanup", "." ]
bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f
https://github.com/projecthamster/hamster-lib/blob/bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f/hamster_lib/reports.py#L343-L352
train
32,360
LabKey/labkey-api-python
labkey/query.py
execute_sql
def execute_sql(server_context, schema_name, sql, container_path=None, max_rows=None, sort=None, offset=None, container_filter=None, save_in_session=None, parameters=None, required_version=None, timeout=_default_timeout): """ Execute sql query against a LabKey server. :param server_context: A LabKey server context. See utils.create_server_context. :param schema_name: schema of table :param sql: String of labkey sql to execute :param container_path: labkey container path if not already set in context :param max_rows: max number of rows to return :param sort: comma separated list of column names to sort by :param offset: number of rows to offset results by :param container_filter: enumeration of the various container filters available. See: https://www.labkey.org/download/clientapi_docs/javascript-api/symbols/LABKEY.Query.html#.containerFilter :param save_in_session: save query result as a named view to the session :param parameters: parameter values to pass through to a parameterized query :param required_version: Api version of response :param timeout: timeout of request in seconds (defaults to 30s) :return: """ url = server_context.build_url('query', 'executeSql.api', container_path=container_path) payload = { 'schemaName': schema_name, 'sql': sql } if container_filter is not None: payload['containerFilter'] = container_filter if max_rows is not None: payload['maxRows'] = max_rows if offset is not None: payload['offset'] = offset if sort is not None: payload['query.sort'] = sort if save_in_session is not None: payload['saveInSession'] = save_in_session if parameters is not None: for key, value in parameters.items(): payload['query.param.' + key] = value if required_version is not None: payload['apiVersion'] = required_version return server_context.make_request(url, payload, timeout=timeout)
python
def execute_sql(server_context, schema_name, sql, container_path=None, max_rows=None, sort=None, offset=None, container_filter=None, save_in_session=None, parameters=None, required_version=None, timeout=_default_timeout): """ Execute sql query against a LabKey server. :param server_context: A LabKey server context. See utils.create_server_context. :param schema_name: schema of table :param sql: String of labkey sql to execute :param container_path: labkey container path if not already set in context :param max_rows: max number of rows to return :param sort: comma separated list of column names to sort by :param offset: number of rows to offset results by :param container_filter: enumeration of the various container filters available. See: https://www.labkey.org/download/clientapi_docs/javascript-api/symbols/LABKEY.Query.html#.containerFilter :param save_in_session: save query result as a named view to the session :param parameters: parameter values to pass through to a parameterized query :param required_version: Api version of response :param timeout: timeout of request in seconds (defaults to 30s) :return: """ url = server_context.build_url('query', 'executeSql.api', container_path=container_path) payload = { 'schemaName': schema_name, 'sql': sql } if container_filter is not None: payload['containerFilter'] = container_filter if max_rows is not None: payload['maxRows'] = max_rows if offset is not None: payload['offset'] = offset if sort is not None: payload['query.sort'] = sort if save_in_session is not None: payload['saveInSession'] = save_in_session if parameters is not None: for key, value in parameters.items(): payload['query.param.' + key] = value if required_version is not None: payload['apiVersion'] = required_version return server_context.make_request(url, payload, timeout=timeout)
[ "def", "execute_sql", "(", "server_context", ",", "schema_name", ",", "sql", ",", "container_path", "=", "None", ",", "max_rows", "=", "None", ",", "sort", "=", "None", ",", "offset", "=", "None", ",", "container_filter", "=", "None", ",", "save_in_session",...
Execute sql query against a LabKey server. :param server_context: A LabKey server context. See utils.create_server_context. :param schema_name: schema of table :param sql: String of labkey sql to execute :param container_path: labkey container path if not already set in context :param max_rows: max number of rows to return :param sort: comma separated list of column names to sort by :param offset: number of rows to offset results by :param container_filter: enumeration of the various container filters available. See: https://www.labkey.org/download/clientapi_docs/javascript-api/symbols/LABKEY.Query.html#.containerFilter :param save_in_session: save query result as a named view to the session :param parameters: parameter values to pass through to a parameterized query :param required_version: Api version of response :param timeout: timeout of request in seconds (defaults to 30s) :return:
[ "Execute", "sql", "query", "against", "a", "LabKey", "server", "." ]
3c8d393384d7cbb2785f8a7f5fe34007b17a76b8
https://github.com/LabKey/labkey-api-python/blob/3c8d393384d7cbb2785f8a7f5fe34007b17a76b8/labkey/query.py#L87-L143
train
32,361
LabKey/labkey-api-python
labkey/query.py
update_rows
def update_rows(server_context, schema_name, query_name, rows, container_path=None, timeout=_default_timeout): """ Update a set of rows :param server_context: A LabKey server context. See utils.create_server_context. :param schema_name: schema of table :param query_name: table name to update :param rows: Set of rows to update :param container_path: labkey container path if not already set in context :param timeout: timeout of request in seconds (defaults to 30s) :return: """ url = server_context.build_url('query', 'updateRows.api', container_path=container_path) payload = { 'schemaName': schema_name, 'queryName': query_name, 'rows': rows } return server_context.make_request(url, json_dumps(payload, sort_keys=True), headers=_query_headers, timeout=timeout)
python
def update_rows(server_context, schema_name, query_name, rows, container_path=None, timeout=_default_timeout): """ Update a set of rows :param server_context: A LabKey server context. See utils.create_server_context. :param schema_name: schema of table :param query_name: table name to update :param rows: Set of rows to update :param container_path: labkey container path if not already set in context :param timeout: timeout of request in seconds (defaults to 30s) :return: """ url = server_context.build_url('query', 'updateRows.api', container_path=container_path) payload = { 'schemaName': schema_name, 'queryName': query_name, 'rows': rows } return server_context.make_request(url, json_dumps(payload, sort_keys=True), headers=_query_headers, timeout=timeout)
[ "def", "update_rows", "(", "server_context", ",", "schema_name", ",", "query_name", ",", "rows", ",", "container_path", "=", "None", ",", "timeout", "=", "_default_timeout", ")", ":", "url", "=", "server_context", ".", "build_url", "(", "'query'", ",", "'updat...
Update a set of rows :param server_context: A LabKey server context. See utils.create_server_context. :param schema_name: schema of table :param query_name: table name to update :param rows: Set of rows to update :param container_path: labkey container path if not already set in context :param timeout: timeout of request in seconds (defaults to 30s) :return:
[ "Update", "a", "set", "of", "rows" ]
3c8d393384d7cbb2785f8a7f5fe34007b17a76b8
https://github.com/LabKey/labkey-api-python/blob/3c8d393384d7cbb2785f8a7f5fe34007b17a76b8/labkey/query.py#L265-L286
train
32,362
projecthamster/hamster-lib
hamster_lib/helpers/time.py
get_day_end
def get_day_end(config): """ Get the day end time given the day start. This assumes full 24h day. Args: config (dict): Configdict. Needed to extract ``day_start``. Note: This is merely a convinience funtion so we do not have to deduct this from ``day_start`` by hand all the time. """ day_start_datetime = datetime.datetime.combine(datetime.date.today(), config['day_start']) day_end_datetime = day_start_datetime - datetime.timedelta(seconds=1) return day_end_datetime.time()
python
def get_day_end(config): """ Get the day end time given the day start. This assumes full 24h day. Args: config (dict): Configdict. Needed to extract ``day_start``. Note: This is merely a convinience funtion so we do not have to deduct this from ``day_start`` by hand all the time. """ day_start_datetime = datetime.datetime.combine(datetime.date.today(), config['day_start']) day_end_datetime = day_start_datetime - datetime.timedelta(seconds=1) return day_end_datetime.time()
[ "def", "get_day_end", "(", "config", ")", ":", "day_start_datetime", "=", "datetime", ".", "datetime", ".", "combine", "(", "datetime", ".", "date", ".", "today", "(", ")", ",", "config", "[", "'day_start'", "]", ")", "day_end_datetime", "=", "day_start_date...
Get the day end time given the day start. This assumes full 24h day. Args: config (dict): Configdict. Needed to extract ``day_start``. Note: This is merely a convinience funtion so we do not have to deduct this from ``day_start`` by hand all the time.
[ "Get", "the", "day", "end", "time", "given", "the", "day", "start", ".", "This", "assumes", "full", "24h", "day", "." ]
bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f
https://github.com/projecthamster/hamster-lib/blob/bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f/hamster_lib/helpers/time.py#L33-L46
train
32,363
projecthamster/hamster-lib
hamster_lib/helpers/time.py
end_day_to_datetime
def end_day_to_datetime(end_day, config): """ Convert a given end day to its proper datetime. This is non trivial because of variable ``day_start``. We want to make sure that even if an 'end day' is specified the actual point in time may reach into the following day. Args: end (datetime.date): Raw end date that is to be adjusted. config: Controller config containing information on when a workday starts. Returns: datetime.datetime: The endday as a adjusted datetime object. Example: Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to consider even points in time up to ``2015-04-02 5:29``. That is to represent that a *work day* does not match *calendar days*. Note: An alternative implementation for the similar problem in legacy hamster: ``hamster.storage.db.Storage.__get_todays_facts``. """ day_start_time = config['day_start'] day_end_time = get_day_end(config) if day_start_time == datetime.time(0, 0, 0): end = datetime.datetime.combine(end_day, day_end_time) else: end = datetime.datetime.combine(end_day, day_end_time) + datetime.timedelta(days=1) return end
python
def end_day_to_datetime(end_day, config): """ Convert a given end day to its proper datetime. This is non trivial because of variable ``day_start``. We want to make sure that even if an 'end day' is specified the actual point in time may reach into the following day. Args: end (datetime.date): Raw end date that is to be adjusted. config: Controller config containing information on when a workday starts. Returns: datetime.datetime: The endday as a adjusted datetime object. Example: Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to consider even points in time up to ``2015-04-02 5:29``. That is to represent that a *work day* does not match *calendar days*. Note: An alternative implementation for the similar problem in legacy hamster: ``hamster.storage.db.Storage.__get_todays_facts``. """ day_start_time = config['day_start'] day_end_time = get_day_end(config) if day_start_time == datetime.time(0, 0, 0): end = datetime.datetime.combine(end_day, day_end_time) else: end = datetime.datetime.combine(end_day, day_end_time) + datetime.timedelta(days=1) return end
[ "def", "end_day_to_datetime", "(", "end_day", ",", "config", ")", ":", "day_start_time", "=", "config", "[", "'day_start'", "]", "day_end_time", "=", "get_day_end", "(", "config", ")", "if", "day_start_time", "==", "datetime", ".", "time", "(", "0", ",", "0"...
Convert a given end day to its proper datetime. This is non trivial because of variable ``day_start``. We want to make sure that even if an 'end day' is specified the actual point in time may reach into the following day. Args: end (datetime.date): Raw end date that is to be adjusted. config: Controller config containing information on when a workday starts. Returns: datetime.datetime: The endday as a adjusted datetime object. Example: Given a ``day_start`` of ``5:30`` and end date of ``2015-04-01`` we actually want to consider even points in time up to ``2015-04-02 5:29``. That is to represent that a *work day* does not match *calendar days*. Note: An alternative implementation for the similar problem in legacy hamster: ``hamster.storage.db.Storage.__get_todays_facts``.
[ "Convert", "a", "given", "end", "day", "to", "its", "proper", "datetime", "." ]
bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f
https://github.com/projecthamster/hamster-lib/blob/bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f/hamster_lib/helpers/time.py#L49-L82
train
32,364
projecthamster/hamster-lib
hamster_lib/helpers/time.py
complete_timeframe
def complete_timeframe(timeframe, config, partial=False): """ Apply fallback strategy to incomplete timeframes. Our fallback strategy is as follows: * Missing start-date: Fallback to ``today``. * Missing start-time: Fallback to ``store.config['day_start']``. * Missing end-date: Fallback to ``today`` for ``day_start='00:00`, ``tomorrow`` otherwise. See ``hamster_lib.helpers.end_day_to_datetime`` for details and explanations. * Missing end-time: 1 second before ``store.config['day_start']``. Args: timeframe (TimeFrame): ``TimeFrame`` instance incorporating all available information available about the timespan. Any missing info will be completed per fallback strategy. config (dict): A config-dict providing settings relevant to determine fallback values. partial (bool, optional): If true, we will only complete start/end times if there is at least either date or time information present. Defaults to ``False``. Returns: tuple: ``(start, end)`` tuple. Where ``start`` and ``end`` are full ``datetime.datetime`` instances. Raises: TypeError: If any of the ``timeframe`` values is of inappropriate datetime type. """ def complete_start_date(date): """ Assign ``today`` if ``date=None``, else ensure its a ``datetime.date`` instance. Args: date (datetime.date): Startdate information. Returns: datetime.date: Either the original date or the default solution. Raises: TypeError: If ``date``` is neither ``None`` nor ``datetime.date`` instance. Note: Reference behavior taken from [hamster-cli](https://github.com/projecthamster/ hamster/blob/master/src/hamster-cli#L368). """ if not date: date = datetime.date.today() else: if not isinstance(date, datetime.date): raise TypeError(_( "Expected datetime.date instance, got {type} instead.".format( type=type(date)) )) return date def complete_start_time(time, day_start): """Assign ``day_start`` if no start-time is given.""" if not time: time = day_start else: if not isinstance(time, datetime.time): raise TypeError(_( "Expected datetime.time instance, got {type} instead.".format( type=type(time)) )) return time def complete_start(date, time, config): return datetime.datetime.combine( complete_start_date(timeframe.start_date), complete_start_time(timeframe.start_time, config['day_start']), ) def complete_end_date(date): if not date: date = datetime.date.today() else: if not isinstance(date, datetime.date): raise TypeError(_( "Expected datetime.date instance, got {type} instead.".format( type=type(date)) )) return date def complete_end(date, time, config): date = complete_end_date(date) if time: result = datetime.datetime.combine(date, time) else: result = end_day_to_datetime(date, config) return result start, end = None, None if any((timeframe.offset, timeframe.start_time, timeframe.start_date)) or not partial: if not timeframe.offset: start = complete_start(timeframe.start_date, timeframe.start_time, config) else: start = datetime.datetime.now() - timeframe.offset if any((timeframe.end_date, timeframe.end_time)) or not partial: end = complete_end(timeframe.end_date, timeframe.end_time, config) return (start, end)
python
def complete_timeframe(timeframe, config, partial=False): """ Apply fallback strategy to incomplete timeframes. Our fallback strategy is as follows: * Missing start-date: Fallback to ``today``. * Missing start-time: Fallback to ``store.config['day_start']``. * Missing end-date: Fallback to ``today`` for ``day_start='00:00`, ``tomorrow`` otherwise. See ``hamster_lib.helpers.end_day_to_datetime`` for details and explanations. * Missing end-time: 1 second before ``store.config['day_start']``. Args: timeframe (TimeFrame): ``TimeFrame`` instance incorporating all available information available about the timespan. Any missing info will be completed per fallback strategy. config (dict): A config-dict providing settings relevant to determine fallback values. partial (bool, optional): If true, we will only complete start/end times if there is at least either date or time information present. Defaults to ``False``. Returns: tuple: ``(start, end)`` tuple. Where ``start`` and ``end`` are full ``datetime.datetime`` instances. Raises: TypeError: If any of the ``timeframe`` values is of inappropriate datetime type. """ def complete_start_date(date): """ Assign ``today`` if ``date=None``, else ensure its a ``datetime.date`` instance. Args: date (datetime.date): Startdate information. Returns: datetime.date: Either the original date or the default solution. Raises: TypeError: If ``date``` is neither ``None`` nor ``datetime.date`` instance. Note: Reference behavior taken from [hamster-cli](https://github.com/projecthamster/ hamster/blob/master/src/hamster-cli#L368). """ if not date: date = datetime.date.today() else: if not isinstance(date, datetime.date): raise TypeError(_( "Expected datetime.date instance, got {type} instead.".format( type=type(date)) )) return date def complete_start_time(time, day_start): """Assign ``day_start`` if no start-time is given.""" if not time: time = day_start else: if not isinstance(time, datetime.time): raise TypeError(_( "Expected datetime.time instance, got {type} instead.".format( type=type(time)) )) return time def complete_start(date, time, config): return datetime.datetime.combine( complete_start_date(timeframe.start_date), complete_start_time(timeframe.start_time, config['day_start']), ) def complete_end_date(date): if not date: date = datetime.date.today() else: if not isinstance(date, datetime.date): raise TypeError(_( "Expected datetime.date instance, got {type} instead.".format( type=type(date)) )) return date def complete_end(date, time, config): date = complete_end_date(date) if time: result = datetime.datetime.combine(date, time) else: result = end_day_to_datetime(date, config) return result start, end = None, None if any((timeframe.offset, timeframe.start_time, timeframe.start_date)) or not partial: if not timeframe.offset: start = complete_start(timeframe.start_date, timeframe.start_time, config) else: start = datetime.datetime.now() - timeframe.offset if any((timeframe.end_date, timeframe.end_time)) or not partial: end = complete_end(timeframe.end_date, timeframe.end_time, config) return (start, end)
[ "def", "complete_timeframe", "(", "timeframe", ",", "config", ",", "partial", "=", "False", ")", ":", "def", "complete_start_date", "(", "date", ")", ":", "\"\"\"\n Assign ``today`` if ``date=None``, else ensure its a ``datetime.date`` instance.\n\n Args:\n ...
Apply fallback strategy to incomplete timeframes. Our fallback strategy is as follows: * Missing start-date: Fallback to ``today``. * Missing start-time: Fallback to ``store.config['day_start']``. * Missing end-date: Fallback to ``today`` for ``day_start='00:00`, ``tomorrow`` otherwise. See ``hamster_lib.helpers.end_day_to_datetime`` for details and explanations. * Missing end-time: 1 second before ``store.config['day_start']``. Args: timeframe (TimeFrame): ``TimeFrame`` instance incorporating all available information available about the timespan. Any missing info will be completed per fallback strategy. config (dict): A config-dict providing settings relevant to determine fallback values. partial (bool, optional): If true, we will only complete start/end times if there is at least either date or time information present. Defaults to ``False``. Returns: tuple: ``(start, end)`` tuple. Where ``start`` and ``end`` are full ``datetime.datetime`` instances. Raises: TypeError: If any of the ``timeframe`` values is of inappropriate datetime type.
[ "Apply", "fallback", "strategy", "to", "incomplete", "timeframes", "." ]
bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f
https://github.com/projecthamster/hamster-lib/blob/bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f/hamster_lib/helpers/time.py#L189-L297
train
32,365
projecthamster/hamster-lib
hamster_lib/helpers/time.py
validate_start_end_range
def validate_start_end_range(range_tuple): """ Perform basic sanity checks on a timeframe. Args: range_tuple (tuple): ``(start, end)`` tuple as returned by ``complete_timeframe``. Raises: ValueError: If start > end. Returns: tuple: ``(start, end)`` tuple that passed validation. Note: ``timeframes`` may be incomplete, especially if ``complete_timeframe(partial=True)`` has been used to construct them. """ start, end = range_tuple if (start and end) and (start > end): raise ValueError(_("Start after end!")) return range_tuple
python
def validate_start_end_range(range_tuple): """ Perform basic sanity checks on a timeframe. Args: range_tuple (tuple): ``(start, end)`` tuple as returned by ``complete_timeframe``. Raises: ValueError: If start > end. Returns: tuple: ``(start, end)`` tuple that passed validation. Note: ``timeframes`` may be incomplete, especially if ``complete_timeframe(partial=True)`` has been used to construct them. """ start, end = range_tuple if (start and end) and (start > end): raise ValueError(_("Start after end!")) return range_tuple
[ "def", "validate_start_end_range", "(", "range_tuple", ")", ":", "start", ",", "end", "=", "range_tuple", "if", "(", "start", "and", "end", ")", "and", "(", "start", ">", "end", ")", ":", "raise", "ValueError", "(", "_", "(", "\"Start after end!\"", ")", ...
Perform basic sanity checks on a timeframe. Args: range_tuple (tuple): ``(start, end)`` tuple as returned by ``complete_timeframe``. Raises: ValueError: If start > end. Returns: tuple: ``(start, end)`` tuple that passed validation. Note: ``timeframes`` may be incomplete, especially if ``complete_timeframe(partial=True)`` has been used to construct them.
[ "Perform", "basic", "sanity", "checks", "on", "a", "timeframe", "." ]
bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f
https://github.com/projecthamster/hamster-lib/blob/bc34c822c239a6fa0cde3a4f90b0d00506fb5a4f/hamster_lib/helpers/time.py#L340-L364
train
32,366
zblz/naima
naima/utils.py
validate_data_table
def validate_data_table(data_table, sed=None): """ Validate all columns of a data table. If a list of tables is passed, all tables will be validated and then concatenated Parameters ---------- data_table : `astropy.table.Table` or list of `astropy.table.Table`. sed : bool, optional Whether to convert the fluxes to SED. If unset, all data tables are converted to the format of the first data table. """ if isinstance(data_table, Table) or isinstance(data_table, QTable): data_table = [data_table] try: for dt in data_table: if not isinstance(dt, Table) and not isinstance(dt, QTable): raise TypeError( "An object passed as data_table is not an astropy Table!" ) except TypeError: raise TypeError( "Argument passed to validate_data_table is not a table and " "not a list" ) def dt_sed_conversion(dt, sed): f_unit, sedf = sed_conversion(dt["energy"], dt["flux"].unit, sed) # roundtrip to Table to change the units t = Table(dt) for col in ["flux", "flux_error_lo", "flux_error_hi"]: t[col].unit = f_unit ndt = QTable(t) ndt["flux"] = (dt["flux"] * sedf).to(f_unit) ndt["flux_error_lo"] = (dt["flux_error_lo"] * sedf).to(f_unit) ndt["flux_error_hi"] = (dt["flux_error_hi"] * sedf).to(f_unit) return ndt data_list = [] for group, dt in enumerate(data_table): dt_val = _validate_single_data_table(dt, group=group) data_list.append(dt_val) # concatenate input data tables data_new = data_list[0].copy() f_pt = data_new["flux"].unit.physical_type if sed is None: sed = f_pt in ["flux", "power"] data_new = dt_sed_conversion(data_new, sed) for dt in data_list[1:]: nf_pt = dt["flux"].unit.physical_type if ("flux" in nf_pt and "power" in f_pt) or ( "power" in nf_pt and "flux" in f_pt ): raise TypeError( "The physical types of the data tables could not be " "matched: Some are in flux and others in luminosity units" ) dt = dt_sed_conversion(dt, sed) for row in dt: data_new.add_row(row) return data_new
python
def validate_data_table(data_table, sed=None): """ Validate all columns of a data table. If a list of tables is passed, all tables will be validated and then concatenated Parameters ---------- data_table : `astropy.table.Table` or list of `astropy.table.Table`. sed : bool, optional Whether to convert the fluxes to SED. If unset, all data tables are converted to the format of the first data table. """ if isinstance(data_table, Table) or isinstance(data_table, QTable): data_table = [data_table] try: for dt in data_table: if not isinstance(dt, Table) and not isinstance(dt, QTable): raise TypeError( "An object passed as data_table is not an astropy Table!" ) except TypeError: raise TypeError( "Argument passed to validate_data_table is not a table and " "not a list" ) def dt_sed_conversion(dt, sed): f_unit, sedf = sed_conversion(dt["energy"], dt["flux"].unit, sed) # roundtrip to Table to change the units t = Table(dt) for col in ["flux", "flux_error_lo", "flux_error_hi"]: t[col].unit = f_unit ndt = QTable(t) ndt["flux"] = (dt["flux"] * sedf).to(f_unit) ndt["flux_error_lo"] = (dt["flux_error_lo"] * sedf).to(f_unit) ndt["flux_error_hi"] = (dt["flux_error_hi"] * sedf).to(f_unit) return ndt data_list = [] for group, dt in enumerate(data_table): dt_val = _validate_single_data_table(dt, group=group) data_list.append(dt_val) # concatenate input data tables data_new = data_list[0].copy() f_pt = data_new["flux"].unit.physical_type if sed is None: sed = f_pt in ["flux", "power"] data_new = dt_sed_conversion(data_new, sed) for dt in data_list[1:]: nf_pt = dt["flux"].unit.physical_type if ("flux" in nf_pt and "power" in f_pt) or ( "power" in nf_pt and "flux" in f_pt ): raise TypeError( "The physical types of the data tables could not be " "matched: Some are in flux and others in luminosity units" ) dt = dt_sed_conversion(dt, sed) for row in dt: data_new.add_row(row) return data_new
[ "def", "validate_data_table", "(", "data_table", ",", "sed", "=", "None", ")", ":", "if", "isinstance", "(", "data_table", ",", "Table", ")", "or", "isinstance", "(", "data_table", ",", "QTable", ")", ":", "data_table", "=", "[", "data_table", "]", "try", ...
Validate all columns of a data table. If a list of tables is passed, all tables will be validated and then concatenated Parameters ---------- data_table : `astropy.table.Table` or list of `astropy.table.Table`. sed : bool, optional Whether to convert the fluxes to SED. If unset, all data tables are converted to the format of the first data table.
[ "Validate", "all", "columns", "of", "a", "data", "table", ".", "If", "a", "list", "of", "tables", "is", "passed", "all", "tables", "will", "be", "validated", "and", "then", "concatenated" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/utils.py#L44-L115
train
32,367
zblz/naima
naima/utils.py
sed_conversion
def sed_conversion(energy, model_unit, sed): """ Manage conversion between differential spectrum and SED """ model_pt = model_unit.physical_type ones = np.ones(energy.shape) if sed: # SED f_unit = u.Unit("erg/s") if model_pt == "power" or model_pt == "flux" or model_pt == "energy": sedf = ones elif "differential" in model_pt: sedf = energy ** 2 else: raise u.UnitsError( "Model physical type ({0}) is not supported".format(model_pt), "Supported physical types are: power, flux, differential" " power, differential flux", ) if "flux" in model_pt: f_unit /= u.cm ** 2 elif "energy" in model_pt: # particle energy distributions f_unit = u.erg else: # Differential spectrum f_unit = u.Unit("1/(s TeV)") if "differential" in model_pt: sedf = ones elif model_pt == "power" or model_pt == "flux" or model_pt == "energy": # From SED to differential sedf = 1 / (energy ** 2) else: raise u.UnitsError( "Model physical type ({0}) is not supported".format(model_pt), "Supported physical types are: power, flux, differential" " power, differential flux", ) if "flux" in model_pt: f_unit /= u.cm ** 2 elif "energy" in model_pt: # particle energy distributions f_unit = u.Unit("1/TeV") log.debug( "Converted from {0} ({1}) into {2} ({3}) for sed={4}".format( model_unit, model_pt, f_unit, f_unit.physical_type, sed ) ) return f_unit, sedf
python
def sed_conversion(energy, model_unit, sed): """ Manage conversion between differential spectrum and SED """ model_pt = model_unit.physical_type ones = np.ones(energy.shape) if sed: # SED f_unit = u.Unit("erg/s") if model_pt == "power" or model_pt == "flux" or model_pt == "energy": sedf = ones elif "differential" in model_pt: sedf = energy ** 2 else: raise u.UnitsError( "Model physical type ({0}) is not supported".format(model_pt), "Supported physical types are: power, flux, differential" " power, differential flux", ) if "flux" in model_pt: f_unit /= u.cm ** 2 elif "energy" in model_pt: # particle energy distributions f_unit = u.erg else: # Differential spectrum f_unit = u.Unit("1/(s TeV)") if "differential" in model_pt: sedf = ones elif model_pt == "power" or model_pt == "flux" or model_pt == "energy": # From SED to differential sedf = 1 / (energy ** 2) else: raise u.UnitsError( "Model physical type ({0}) is not supported".format(model_pt), "Supported physical types are: power, flux, differential" " power, differential flux", ) if "flux" in model_pt: f_unit /= u.cm ** 2 elif "energy" in model_pt: # particle energy distributions f_unit = u.Unit("1/TeV") log.debug( "Converted from {0} ({1}) into {2} ({3}) for sed={4}".format( model_unit, model_pt, f_unit, f_unit.physical_type, sed ) ) return f_unit, sedf
[ "def", "sed_conversion", "(", "energy", ",", "model_unit", ",", "sed", ")", ":", "model_pt", "=", "model_unit", ".", "physical_type", "ones", "=", "np", ".", "ones", "(", "energy", ".", "shape", ")", "if", "sed", ":", "# SED", "f_unit", "=", "u", ".", ...
Manage conversion between differential spectrum and SED
[ "Manage", "conversion", "between", "differential", "spectrum", "and", "SED" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/utils.py#L237-L292
train
32,368
zblz/naima
naima/utils.py
trapz_loglog
def trapz_loglog(y, x, axis=-1, intervals=False): """ Integrate along the given axis using the composite trapezoidal rule in loglog space. Integrate `y` (`x`) along given axis in loglog space. Parameters ---------- y : array_like Input array to integrate. x : array_like, optional Independent variable to integrate over. axis : int, optional Specify the axis. Returns ------- trapz : float Definite integral as approximated by trapezoidal rule in loglog space. """ try: y_unit = y.unit y = y.value except AttributeError: y_unit = 1.0 try: x_unit = x.unit x = x.value except AttributeError: x_unit = 1.0 y = np.asanyarray(y) x = np.asanyarray(x) slice1 = [slice(None)] * y.ndim slice2 = [slice(None)] * y.ndim slice1[axis] = slice(None, -1) slice2[axis] = slice(1, None) slice1 = tuple(slice1) slice2 = tuple(slice2) if x.ndim == 1: shape = [1] * y.ndim shape[axis] = x.shape[0] x = x.reshape(shape) with warnings.catch_warnings(): warnings.simplefilter("ignore") # Compute the power law indices in each integration bin b = np.log10(y[slice2] / y[slice1]) / np.log10(x[slice2] / x[slice1]) # if local powerlaw index is -1, use \int 1/x = log(x); otherwise use # normal powerlaw integration trapzs = np.where( np.abs(b + 1.0) > 1e-10, ( y[slice1] * (x[slice2] * (x[slice2] / x[slice1]) ** b - x[slice1]) ) / (b + 1), x[slice1] * y[slice1] * np.log(x[slice2] / x[slice1]), ) tozero = (y[slice1] == 0.0) + (y[slice2] == 0.0) + (x[slice1] == x[slice2]) trapzs[tozero] = 0.0 if intervals: return trapzs * x_unit * y_unit ret = np.add.reduce(trapzs, axis) * x_unit * y_unit return ret
python
def trapz_loglog(y, x, axis=-1, intervals=False): """ Integrate along the given axis using the composite trapezoidal rule in loglog space. Integrate `y` (`x`) along given axis in loglog space. Parameters ---------- y : array_like Input array to integrate. x : array_like, optional Independent variable to integrate over. axis : int, optional Specify the axis. Returns ------- trapz : float Definite integral as approximated by trapezoidal rule in loglog space. """ try: y_unit = y.unit y = y.value except AttributeError: y_unit = 1.0 try: x_unit = x.unit x = x.value except AttributeError: x_unit = 1.0 y = np.asanyarray(y) x = np.asanyarray(x) slice1 = [slice(None)] * y.ndim slice2 = [slice(None)] * y.ndim slice1[axis] = slice(None, -1) slice2[axis] = slice(1, None) slice1 = tuple(slice1) slice2 = tuple(slice2) if x.ndim == 1: shape = [1] * y.ndim shape[axis] = x.shape[0] x = x.reshape(shape) with warnings.catch_warnings(): warnings.simplefilter("ignore") # Compute the power law indices in each integration bin b = np.log10(y[slice2] / y[slice1]) / np.log10(x[slice2] / x[slice1]) # if local powerlaw index is -1, use \int 1/x = log(x); otherwise use # normal powerlaw integration trapzs = np.where( np.abs(b + 1.0) > 1e-10, ( y[slice1] * (x[slice2] * (x[slice2] / x[slice1]) ** b - x[slice1]) ) / (b + 1), x[slice1] * y[slice1] * np.log(x[slice2] / x[slice1]), ) tozero = (y[slice1] == 0.0) + (y[slice2] == 0.0) + (x[slice1] == x[slice2]) trapzs[tozero] = 0.0 if intervals: return trapzs * x_unit * y_unit ret = np.add.reduce(trapzs, axis) * x_unit * y_unit return ret
[ "def", "trapz_loglog", "(", "y", ",", "x", ",", "axis", "=", "-", "1", ",", "intervals", "=", "False", ")", ":", "try", ":", "y_unit", "=", "y", ".", "unit", "y", "=", "y", ".", "value", "except", "AttributeError", ":", "y_unit", "=", "1.0", "try...
Integrate along the given axis using the composite trapezoidal rule in loglog space. Integrate `y` (`x`) along given axis in loglog space. Parameters ---------- y : array_like Input array to integrate. x : array_like, optional Independent variable to integrate over. axis : int, optional Specify the axis. Returns ------- trapz : float Definite integral as approximated by trapezoidal rule in loglog space.
[ "Integrate", "along", "the", "given", "axis", "using", "the", "composite", "trapezoidal", "rule", "in", "loglog", "space", "." ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/utils.py#L295-L368
train
32,369
zblz/naima
naima/utils.py
_generate_energy_edges_single
def _generate_energy_edges_single(ene): """Generate energy edges for single group""" midene = np.sqrt((ene[1:] * ene[:-1])) elo, ehi = np.zeros(len(ene)) * ene.unit, np.zeros(len(ene)) * ene.unit elo[1:] = ene[1:] - midene ehi[:-1] = midene - ene[:-1] elo[0] = ene[0] * (1 - ene[0] / (ene[0] + ehi[0])) ehi[-1] = elo[-1] return u.Quantity([elo, ehi])
python
def _generate_energy_edges_single(ene): """Generate energy edges for single group""" midene = np.sqrt((ene[1:] * ene[:-1])) elo, ehi = np.zeros(len(ene)) * ene.unit, np.zeros(len(ene)) * ene.unit elo[1:] = ene[1:] - midene ehi[:-1] = midene - ene[:-1] elo[0] = ene[0] * (1 - ene[0] / (ene[0] + ehi[0])) ehi[-1] = elo[-1] return u.Quantity([elo, ehi])
[ "def", "_generate_energy_edges_single", "(", "ene", ")", ":", "midene", "=", "np", ".", "sqrt", "(", "(", "ene", "[", "1", ":", "]", "*", "ene", "[", ":", "-", "1", "]", ")", ")", "elo", ",", "ehi", "=", "np", ".", "zeros", "(", "len", "(", "...
Generate energy edges for single group
[ "Generate", "energy", "edges", "for", "single", "group" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/utils.py#L371-L379
train
32,370
zblz/naima
naima/utils.py
generate_energy_edges
def generate_energy_edges(ene, groups=None): """Generate energy bin edges from given energy array. Generate an array of energy edges from given energy array to be used as abcissa error bar limits when no energy uncertainty or energy band is provided. Parameters ---------- ene : `astropy.units.Quantity` array instance 1-D array of energies with associated phsyical units. Returns ------- energy_err_lo, energy_error_hi : `astropy.units.Quantity` arrays Arrays of low and high energy edges corresponding to each given energy of the input array. """ if groups is None or len(ene) != len(groups): return _generate_energy_edges_single(ene) else: eloehi = np.zeros((2, len(ene))) * ene.unit for g in np.unique(groups): group_edges = _generate_energy_edges_single(ene[groups == g]) eloehi[:, groups == g] = group_edges # hstack throws away units return eloehi
python
def generate_energy_edges(ene, groups=None): """Generate energy bin edges from given energy array. Generate an array of energy edges from given energy array to be used as abcissa error bar limits when no energy uncertainty or energy band is provided. Parameters ---------- ene : `astropy.units.Quantity` array instance 1-D array of energies with associated phsyical units. Returns ------- energy_err_lo, energy_error_hi : `astropy.units.Quantity` arrays Arrays of low and high energy edges corresponding to each given energy of the input array. """ if groups is None or len(ene) != len(groups): return _generate_energy_edges_single(ene) else: eloehi = np.zeros((2, len(ene))) * ene.unit for g in np.unique(groups): group_edges = _generate_energy_edges_single(ene[groups == g]) eloehi[:, groups == g] = group_edges # hstack throws away units return eloehi
[ "def", "generate_energy_edges", "(", "ene", ",", "groups", "=", "None", ")", ":", "if", "groups", "is", "None", "or", "len", "(", "ene", ")", "!=", "len", "(", "groups", ")", ":", "return", "_generate_energy_edges_single", "(", "ene", ")", "else", ":", ...
Generate energy bin edges from given energy array. Generate an array of energy edges from given energy array to be used as abcissa error bar limits when no energy uncertainty or energy band is provided. Parameters ---------- ene : `astropy.units.Quantity` array instance 1-D array of energies with associated phsyical units. Returns ------- energy_err_lo, energy_error_hi : `astropy.units.Quantity` arrays Arrays of low and high energy edges corresponding to each given energy of the input array.
[ "Generate", "energy", "bin", "edges", "from", "given", "energy", "array", "." ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/utils.py#L382-L408
train
32,371
zblz/naima
naima/utils.py
build_data_table
def build_data_table( energy, flux, flux_error=None, flux_error_lo=None, flux_error_hi=None, energy_width=None, energy_lo=None, energy_hi=None, ul=None, cl=None, ): """ Read data into data dict. Parameters ---------- energy : :class:`~astropy.units.Quantity` array instance Observed photon energy array [physical type ``energy``] flux : :class:`~astropy.units.Quantity` array instance Observed flux array [physical type ``flux`` or ``differential flux``] flux_error, flux_error_hi, flux_error_lo : :class:`~astropy.units.Quantity` array instance 68% CL gaussian uncertainty of the flux [physical type ``flux`` or ``differential flux``]. Either ``flux_error`` (symmetrical uncertainty) or ``flux_error_hi`` and ``flux_error_lo`` (asymmetrical uncertainties) must be provided. energy_width, energy_lo, energy_hi : :class:`~astropy.units.Quantity` array instance, optional Width of the energy bins [physical type ``energy``]. Either ``energy_width`` (bin width) or ``energy_lo`` and ``energy_hi`` (Energies of the lower and upper bin edges) can be provided. If none are provided, ``generate_energy_edges`` will be used. ul : boolean or int array, optional Boolean array indicating which of the flux values given in ``flux`` correspond to upper limits. cl : float, optional Confidence level of the flux upper limits given by ``ul``. Returns ------- data : :class:`astropy.table.QTable` Data stored in an astropy Table. """ table = QTable() if cl is not None: cl = validate_scalar("cl", cl) table.meta["keywords"] = {"cl": {"value": cl}} table["energy"] = energy if energy_width is not None: table["energy_width"] = energy_width elif energy_lo is not None and energy_hi is not None: table["energy_lo"] = energy_lo table["energy_hi"] = energy_hi table["flux"] = flux if flux_error is not None: table["flux_error"] = flux_error elif flux_error_lo is not None and flux_error_hi is not None: table["flux_error_lo"] = flux_error_lo table["flux_error_hi"] = flux_error_hi else: raise TypeError("Flux error not provided!") if ul is not None: ul = np.array(ul, dtype=np.int) table["ul"] = ul table.meta["comments"] = ["Table generated with naima.build_data_table"] # test table units, format, etc validate_data_table(table) return table
python
def build_data_table( energy, flux, flux_error=None, flux_error_lo=None, flux_error_hi=None, energy_width=None, energy_lo=None, energy_hi=None, ul=None, cl=None, ): """ Read data into data dict. Parameters ---------- energy : :class:`~astropy.units.Quantity` array instance Observed photon energy array [physical type ``energy``] flux : :class:`~astropy.units.Quantity` array instance Observed flux array [physical type ``flux`` or ``differential flux``] flux_error, flux_error_hi, flux_error_lo : :class:`~astropy.units.Quantity` array instance 68% CL gaussian uncertainty of the flux [physical type ``flux`` or ``differential flux``]. Either ``flux_error`` (symmetrical uncertainty) or ``flux_error_hi`` and ``flux_error_lo`` (asymmetrical uncertainties) must be provided. energy_width, energy_lo, energy_hi : :class:`~astropy.units.Quantity` array instance, optional Width of the energy bins [physical type ``energy``]. Either ``energy_width`` (bin width) or ``energy_lo`` and ``energy_hi`` (Energies of the lower and upper bin edges) can be provided. If none are provided, ``generate_energy_edges`` will be used. ul : boolean or int array, optional Boolean array indicating which of the flux values given in ``flux`` correspond to upper limits. cl : float, optional Confidence level of the flux upper limits given by ``ul``. Returns ------- data : :class:`astropy.table.QTable` Data stored in an astropy Table. """ table = QTable() if cl is not None: cl = validate_scalar("cl", cl) table.meta["keywords"] = {"cl": {"value": cl}} table["energy"] = energy if energy_width is not None: table["energy_width"] = energy_width elif energy_lo is not None and energy_hi is not None: table["energy_lo"] = energy_lo table["energy_hi"] = energy_hi table["flux"] = flux if flux_error is not None: table["flux_error"] = flux_error elif flux_error_lo is not None and flux_error_hi is not None: table["flux_error_lo"] = flux_error_lo table["flux_error_hi"] = flux_error_hi else: raise TypeError("Flux error not provided!") if ul is not None: ul = np.array(ul, dtype=np.int) table["ul"] = ul table.meta["comments"] = ["Table generated with naima.build_data_table"] # test table units, format, etc validate_data_table(table) return table
[ "def", "build_data_table", "(", "energy", ",", "flux", ",", "flux_error", "=", "None", ",", "flux_error_lo", "=", "None", ",", "flux_error_hi", "=", "None", ",", "energy_width", "=", "None", ",", "energy_lo", "=", "None", ",", "energy_hi", "=", "None", ","...
Read data into data dict. Parameters ---------- energy : :class:`~astropy.units.Quantity` array instance Observed photon energy array [physical type ``energy``] flux : :class:`~astropy.units.Quantity` array instance Observed flux array [physical type ``flux`` or ``differential flux``] flux_error, flux_error_hi, flux_error_lo : :class:`~astropy.units.Quantity` array instance 68% CL gaussian uncertainty of the flux [physical type ``flux`` or ``differential flux``]. Either ``flux_error`` (symmetrical uncertainty) or ``flux_error_hi`` and ``flux_error_lo`` (asymmetrical uncertainties) must be provided. energy_width, energy_lo, energy_hi : :class:`~astropy.units.Quantity` array instance, optional Width of the energy bins [physical type ``energy``]. Either ``energy_width`` (bin width) or ``energy_lo`` and ``energy_hi`` (Energies of the lower and upper bin edges) can be provided. If none are provided, ``generate_energy_edges`` will be used. ul : boolean or int array, optional Boolean array indicating which of the flux values given in ``flux`` correspond to upper limits. cl : float, optional Confidence level of the flux upper limits given by ``ul``. Returns ------- data : :class:`astropy.table.QTable` Data stored in an astropy Table.
[ "Read", "data", "into", "data", "dict", "." ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/utils.py#L411-L492
train
32,372
zblz/naima
naima/utils.py
estimate_B
def estimate_B( xray_table, vhe_table, photon_energy_density=0.261 * u.eV / u.cm ** 3 ): """ Estimate magnetic field from synchrotron to Inverse Compton luminosity ratio Estimate the magnetic field from the ratio of X-ray to gamma-ray emission according to: .. math:: \\frac{L_\mathrm{xray}}{L_\gamma} = \\frac{u_\mathrm{B}}{u_\mathrm{ph}} = \\frac{B^2}{ 8 \pi u_\mathrm{ph}} where :math:`L_\mathrm{xray}` is the X-ray luminosity, :math:`L_\gamma` is the gamma-ray luminosity, and :math:`u_\mathrm{ph}` is the seed photon field energy density. Note that this assumes that the ratio of observed fluxes is equal to the ratio of bolometric synchrotron and IC luminosities, and that IC proceeds in the Thomson regims. This assumption is safe as long as the X-ray and gamma-ray emission contain the bulk of the bolometric emission (i.e., the peak in the SED is in the X-ray and gamma-ray observed bands). Even if the assumption does not hold, this is a good starting point for the magnetic field when doing simultaneous X-ray and gamma-ray spectral fits. Parameters ---------- xray_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the X-ray spectrum. vhe_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the HE/VHE gamma-ray spectrum. photon_energy_density : :class:`~astropy.units.Quantity` float, optional Energy density of the seed photon field for IC emission. Defaults to 0.261 eV/cm3, the energy density of the CMB. Returns ------- B : :class:`~astropy.units.Quantity` float Estimate of the magnetic flux density at the emitter. """ xray = validate_data_table(xray_table, sed=False) vhe = validate_data_table(vhe_table, sed=False) xray_lum = trapz_loglog(xray["flux"] * xray["energy"], xray["energy"]) vhe_lum = trapz_loglog(vhe["flux"] * vhe["energy"], vhe["energy"]) uph = (photon_energy_density.to("erg/cm3")).value B0 = ( np.sqrt((xray_lum / vhe_lum).decompose().value * 8 * np.pi * uph) * u.G ).to("uG") return B0
python
def estimate_B( xray_table, vhe_table, photon_energy_density=0.261 * u.eV / u.cm ** 3 ): """ Estimate magnetic field from synchrotron to Inverse Compton luminosity ratio Estimate the magnetic field from the ratio of X-ray to gamma-ray emission according to: .. math:: \\frac{L_\mathrm{xray}}{L_\gamma} = \\frac{u_\mathrm{B}}{u_\mathrm{ph}} = \\frac{B^2}{ 8 \pi u_\mathrm{ph}} where :math:`L_\mathrm{xray}` is the X-ray luminosity, :math:`L_\gamma` is the gamma-ray luminosity, and :math:`u_\mathrm{ph}` is the seed photon field energy density. Note that this assumes that the ratio of observed fluxes is equal to the ratio of bolometric synchrotron and IC luminosities, and that IC proceeds in the Thomson regims. This assumption is safe as long as the X-ray and gamma-ray emission contain the bulk of the bolometric emission (i.e., the peak in the SED is in the X-ray and gamma-ray observed bands). Even if the assumption does not hold, this is a good starting point for the magnetic field when doing simultaneous X-ray and gamma-ray spectral fits. Parameters ---------- xray_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the X-ray spectrum. vhe_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the HE/VHE gamma-ray spectrum. photon_energy_density : :class:`~astropy.units.Quantity` float, optional Energy density of the seed photon field for IC emission. Defaults to 0.261 eV/cm3, the energy density of the CMB. Returns ------- B : :class:`~astropy.units.Quantity` float Estimate of the magnetic flux density at the emitter. """ xray = validate_data_table(xray_table, sed=False) vhe = validate_data_table(vhe_table, sed=False) xray_lum = trapz_loglog(xray["flux"] * xray["energy"], xray["energy"]) vhe_lum = trapz_loglog(vhe["flux"] * vhe["energy"], vhe["energy"]) uph = (photon_energy_density.to("erg/cm3")).value B0 = ( np.sqrt((xray_lum / vhe_lum).decompose().value * 8 * np.pi * uph) * u.G ).to("uG") return B0
[ "def", "estimate_B", "(", "xray_table", ",", "vhe_table", ",", "photon_energy_density", "=", "0.261", "*", "u", ".", "eV", "/", "u", ".", "cm", "**", "3", ")", ":", "xray", "=", "validate_data_table", "(", "xray_table", ",", "sed", "=", "False", ")", "...
Estimate magnetic field from synchrotron to Inverse Compton luminosity ratio Estimate the magnetic field from the ratio of X-ray to gamma-ray emission according to: .. math:: \\frac{L_\mathrm{xray}}{L_\gamma} = \\frac{u_\mathrm{B}}{u_\mathrm{ph}} = \\frac{B^2}{ 8 \pi u_\mathrm{ph}} where :math:`L_\mathrm{xray}` is the X-ray luminosity, :math:`L_\gamma` is the gamma-ray luminosity, and :math:`u_\mathrm{ph}` is the seed photon field energy density. Note that this assumes that the ratio of observed fluxes is equal to the ratio of bolometric synchrotron and IC luminosities, and that IC proceeds in the Thomson regims. This assumption is safe as long as the X-ray and gamma-ray emission contain the bulk of the bolometric emission (i.e., the peak in the SED is in the X-ray and gamma-ray observed bands). Even if the assumption does not hold, this is a good starting point for the magnetic field when doing simultaneous X-ray and gamma-ray spectral fits. Parameters ---------- xray_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the X-ray spectrum. vhe_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the HE/VHE gamma-ray spectrum. photon_energy_density : :class:`~astropy.units.Quantity` float, optional Energy density of the seed photon field for IC emission. Defaults to 0.261 eV/cm3, the energy density of the CMB. Returns ------- B : :class:`~astropy.units.Quantity` float Estimate of the magnetic flux density at the emitter.
[ "Estimate", "magnetic", "field", "from", "synchrotron", "to", "Inverse", "Compton", "luminosity", "ratio" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/utils.py#L495-L555
train
32,373
zblz/naima
naima/extern/interruptible_pool.py
_initializer_wrapper
def _initializer_wrapper(actual_initializer, *rest): """ We ignore SIGINT. It's up to our parent to kill us in the typical condition of this arising from ``^C`` on a terminal. If someone is manually killing us with that signal, well... nothing will happen. """ signal.signal(signal.SIGINT, signal.SIG_IGN) if actual_initializer is not None: actual_initializer(*rest)
python
def _initializer_wrapper(actual_initializer, *rest): """ We ignore SIGINT. It's up to our parent to kill us in the typical condition of this arising from ``^C`` on a terminal. If someone is manually killing us with that signal, well... nothing will happen. """ signal.signal(signal.SIGINT, signal.SIG_IGN) if actual_initializer is not None: actual_initializer(*rest)
[ "def", "_initializer_wrapper", "(", "actual_initializer", ",", "*", "rest", ")", ":", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "SIG_IGN", ")", "if", "actual_initializer", "is", "not", "None", ":", "actual_initializer", "(", ...
We ignore SIGINT. It's up to our parent to kill us in the typical condition of this arising from ``^C`` on a terminal. If someone is manually killing us with that signal, well... nothing will happen.
[ "We", "ignore", "SIGINT", ".", "It", "s", "up", "to", "our", "parent", "to", "kill", "us", "in", "the", "typical", "condition", "of", "this", "arising", "from", "^C", "on", "a", "terminal", ".", "If", "someone", "is", "manually", "killing", "us", "with...
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/extern/interruptible_pool.py#L40-L49
train
32,374
zblz/naima
naima/models.py
PowerLaw.eval
def eval(e, amplitude, e_0, alpha): """One dimensional power law model function""" xx = e / e_0 return amplitude * xx ** (-alpha)
python
def eval(e, amplitude, e_0, alpha): """One dimensional power law model function""" xx = e / e_0 return amplitude * xx ** (-alpha)
[ "def", "eval", "(", "e", ",", "amplitude", ",", "e_0", ",", "alpha", ")", ":", "xx", "=", "e", "/", "e_0", "return", "amplitude", "*", "xx", "**", "(", "-", "alpha", ")" ]
One dimensional power law model function
[ "One", "dimensional", "power", "law", "model", "function" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/models.py#L92-L96
train
32,375
zblz/naima
naima/models.py
ExponentialCutoffPowerLaw.eval
def eval(e, amplitude, e_0, alpha, e_cutoff, beta): """One dimensional power law with an exponential cutoff model function """ xx = e / e_0 return amplitude * xx ** (-alpha) * np.exp(-(e / e_cutoff) ** beta)
python
def eval(e, amplitude, e_0, alpha, e_cutoff, beta): """One dimensional power law with an exponential cutoff model function """ xx = e / e_0 return amplitude * xx ** (-alpha) * np.exp(-(e / e_cutoff) ** beta)
[ "def", "eval", "(", "e", ",", "amplitude", ",", "e_0", ",", "alpha", ",", "e_cutoff", ",", "beta", ")", ":", "xx", "=", "e", "/", "e_0", "return", "amplitude", "*", "xx", "**", "(", "-", "alpha", ")", "*", "np", ".", "exp", "(", "-", "(", "e"...
One dimensional power law with an exponential cutoff model function
[ "One", "dimensional", "power", "law", "with", "an", "exponential", "cutoff", "model", "function" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/models.py#L161-L166
train
32,376
zblz/naima
naima/models.py
LogParabola.eval
def eval(e, amplitude, e_0, alpha, beta): """One dimenional log parabola model function""" ee = e / e_0 eeponent = -alpha - beta * np.log(ee) return amplitude * ee ** eeponent
python
def eval(e, amplitude, e_0, alpha, beta): """One dimenional log parabola model function""" ee = e / e_0 eeponent = -alpha - beta * np.log(ee) return amplitude * ee ** eeponent
[ "def", "eval", "(", "e", ",", "amplitude", ",", "e_0", ",", "alpha", ",", "beta", ")", ":", "ee", "=", "e", "/", "e_0", "eeponent", "=", "-", "alpha", "-", "beta", "*", "np", ".", "log", "(", "ee", ")", "return", "amplitude", "*", "ee", "**", ...
One dimenional log parabola model function
[ "One", "dimenional", "log", "parabola", "model", "function" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/models.py#L410-L415
train
32,377
zblz/naima
naima/radiative.py
BaseRadiative.flux
def flux(self, photon_energy, distance=1 * u.kpc): """Differential flux at a given distance from the source. Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic differential luminosity will be returned. Default is 1 kpc. """ spec = self._spectrum(photon_energy) if distance != 0: distance = validate_scalar( "distance", distance, physical_type="length" ) spec /= 4 * np.pi * distance.to("cm") ** 2 out_unit = "1/(s cm2 eV)" else: out_unit = "1/(s eV)" return spec.to(out_unit)
python
def flux(self, photon_energy, distance=1 * u.kpc): """Differential flux at a given distance from the source. Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic differential luminosity will be returned. Default is 1 kpc. """ spec = self._spectrum(photon_energy) if distance != 0: distance = validate_scalar( "distance", distance, physical_type="length" ) spec /= 4 * np.pi * distance.to("cm") ** 2 out_unit = "1/(s cm2 eV)" else: out_unit = "1/(s eV)" return spec.to(out_unit)
[ "def", "flux", "(", "self", ",", "photon_energy", ",", "distance", "=", "1", "*", "u", ".", "kpc", ")", ":", "spec", "=", "self", ".", "_spectrum", "(", "photon_energy", ")", "if", "distance", "!=", "0", ":", "distance", "=", "validate_scalar", "(", ...
Differential flux at a given distance from the source. Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic differential luminosity will be returned. Default is 1 kpc.
[ "Differential", "flux", "at", "a", "given", "distance", "from", "the", "source", "." ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L99-L123
train
32,378
zblz/naima
naima/radiative.py
BaseRadiative.sed
def sed(self, photon_energy, distance=1 * u.kpc): """Spectral energy distribution at a given distance from the source. Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic luminosity will be returned. Default is 1 kpc. """ if distance != 0: out_unit = "erg/(cm2 s)" else: out_unit = "erg/s" photon_energy = _validate_ene(photon_energy) sed = (self.flux(photon_energy, distance) * photon_energy ** 2.0).to( out_unit ) return sed
python
def sed(self, photon_energy, distance=1 * u.kpc): """Spectral energy distribution at a given distance from the source. Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic luminosity will be returned. Default is 1 kpc. """ if distance != 0: out_unit = "erg/(cm2 s)" else: out_unit = "erg/s" photon_energy = _validate_ene(photon_energy) sed = (self.flux(photon_energy, distance) * photon_energy ** 2.0).to( out_unit ) return sed
[ "def", "sed", "(", "self", ",", "photon_energy", ",", "distance", "=", "1", "*", "u", ".", "kpc", ")", ":", "if", "distance", "!=", "0", ":", "out_unit", "=", "\"erg/(cm2 s)\"", "else", ":", "out_unit", "=", "\"erg/s\"", "photon_energy", "=", "_validate_...
Spectral energy distribution at a given distance from the source. Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic luminosity will be returned. Default is 1 kpc.
[ "Spectral", "energy", "distribution", "at", "a", "given", "distance", "from", "the", "source", "." ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L125-L148
train
32,379
zblz/naima
naima/radiative.py
BaseElectron._gam
def _gam(self): """ Lorentz factor array """ log10gmin = np.log10(self.Eemin / mec2).value log10gmax = np.log10(self.Eemax / mec2).value return np.logspace( log10gmin, log10gmax, int(self.nEed * (log10gmax - log10gmin)) )
python
def _gam(self): """ Lorentz factor array """ log10gmin = np.log10(self.Eemin / mec2).value log10gmax = np.log10(self.Eemax / mec2).value return np.logspace( log10gmin, log10gmax, int(self.nEed * (log10gmax - log10gmin)) )
[ "def", "_gam", "(", "self", ")", ":", "log10gmin", "=", "np", ".", "log10", "(", "self", ".", "Eemin", "/", "mec2", ")", ".", "value", "log10gmax", "=", "np", ".", "log10", "(", "self", ".", "Eemax", "/", "mec2", ")", ".", "value", "return", "np"...
Lorentz factor array
[ "Lorentz", "factor", "array" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L163-L170
train
32,380
zblz/naima
naima/radiative.py
BaseElectron._nelec
def _nelec(self): """ Particles per unit lorentz factor """ pd = self.particle_distribution(self._gam * mec2) return pd.to(1 / mec2_unit).value
python
def _nelec(self): """ Particles per unit lorentz factor """ pd = self.particle_distribution(self._gam * mec2) return pd.to(1 / mec2_unit).value
[ "def", "_nelec", "(", "self", ")", ":", "pd", "=", "self", ".", "particle_distribution", "(", "self", ".", "_gam", "*", "mec2", ")", "return", "pd", ".", "to", "(", "1", "/", "mec2_unit", ")", ".", "value" ]
Particles per unit lorentz factor
[ "Particles", "per", "unit", "lorentz", "factor" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L173-L177
train
32,381
zblz/naima
naima/radiative.py
BaseElectron.We
def We(self): """ Total energy in electrons used for the radiative calculation """ We = trapz_loglog(self._gam * self._nelec, self._gam * mec2) return We
python
def We(self): """ Total energy in electrons used for the radiative calculation """ We = trapz_loglog(self._gam * self._nelec, self._gam * mec2) return We
[ "def", "We", "(", "self", ")", ":", "We", "=", "trapz_loglog", "(", "self", ".", "_gam", "*", "self", ".", "_nelec", ",", "self", ".", "_gam", "*", "mec2", ")", "return", "We" ]
Total energy in electrons used for the radiative calculation
[ "Total", "energy", "in", "electrons", "used", "for", "the", "radiative", "calculation" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L180-L184
train
32,382
zblz/naima
naima/radiative.py
BaseElectron.compute_We
def compute_We(self, Eemin=None, Eemax=None): """ Total energy in electrons between energies Eemin and Eemax Parameters ---------- Eemin : :class:`~astropy.units.Quantity` float, optional Minimum electron energy for energy content calculation. Eemax : :class:`~astropy.units.Quantity` float, optional Maximum electron energy for energy content calculation. """ if Eemin is None and Eemax is None: We = self.We else: if Eemax is None: Eemax = self.Eemax if Eemin is None: Eemin = self.Eemin log10gmin = np.log10(Eemin / mec2).value log10gmax = np.log10(Eemax / mec2).value gam = np.logspace( log10gmin, log10gmax, int(self.nEed * (log10gmax - log10gmin)) ) nelec = ( self.particle_distribution(gam * mec2).to(1 / mec2_unit).value ) We = trapz_loglog(gam * nelec, gam * mec2) return We
python
def compute_We(self, Eemin=None, Eemax=None): """ Total energy in electrons between energies Eemin and Eemax Parameters ---------- Eemin : :class:`~astropy.units.Quantity` float, optional Minimum electron energy for energy content calculation. Eemax : :class:`~astropy.units.Quantity` float, optional Maximum electron energy for energy content calculation. """ if Eemin is None and Eemax is None: We = self.We else: if Eemax is None: Eemax = self.Eemax if Eemin is None: Eemin = self.Eemin log10gmin = np.log10(Eemin / mec2).value log10gmax = np.log10(Eemax / mec2).value gam = np.logspace( log10gmin, log10gmax, int(self.nEed * (log10gmax - log10gmin)) ) nelec = ( self.particle_distribution(gam * mec2).to(1 / mec2_unit).value ) We = trapz_loglog(gam * nelec, gam * mec2) return We
[ "def", "compute_We", "(", "self", ",", "Eemin", "=", "None", ",", "Eemax", "=", "None", ")", ":", "if", "Eemin", "is", "None", "and", "Eemax", "is", "None", ":", "We", "=", "self", ".", "We", "else", ":", "if", "Eemax", "is", "None", ":", "Eemax"...
Total energy in electrons between energies Eemin and Eemax Parameters ---------- Eemin : :class:`~astropy.units.Quantity` float, optional Minimum electron energy for energy content calculation. Eemax : :class:`~astropy.units.Quantity` float, optional Maximum electron energy for energy content calculation.
[ "Total", "energy", "in", "electrons", "between", "energies", "Eemin", "and", "Eemax" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L186-L215
train
32,383
zblz/naima
naima/radiative.py
BaseElectron.set_We
def set_We(self, We, Eemin=None, Eemax=None, amplitude_name=None): """ Normalize particle distribution so that the total energy in electrons between Eemin and Eemax is We Parameters ---------- We : :class:`~astropy.units.Quantity` float Desired energy in electrons. Eemin : :class:`~astropy.units.Quantity` float, optional Minimum electron energy for energy content calculation. Eemax : :class:`~astropy.units.Quantity` float, optional Maximum electron energy for energy content calculation. amplitude_name : str, optional Name of the amplitude parameter of the particle distribution. It must be accesible as an attribute of the distribution function. Defaults to ``amplitude``. """ We = validate_scalar("We", We, physical_type="energy") oldWe = self.compute_We(Eemin=Eemin, Eemax=Eemax) if amplitude_name is None: try: self.particle_distribution.amplitude *= ( We / oldWe ).decompose() except AttributeError: log.error( "The particle distribution does not have an attribute" " called amplitude to modify its normalization: you can" " set the name with the amplitude_name parameter of set_We" ) else: oldampl = getattr(self.particle_distribution, amplitude_name) setattr( self.particle_distribution, amplitude_name, oldampl * (We / oldWe).decompose(), )
python
def set_We(self, We, Eemin=None, Eemax=None, amplitude_name=None): """ Normalize particle distribution so that the total energy in electrons between Eemin and Eemax is We Parameters ---------- We : :class:`~astropy.units.Quantity` float Desired energy in electrons. Eemin : :class:`~astropy.units.Quantity` float, optional Minimum electron energy for energy content calculation. Eemax : :class:`~astropy.units.Quantity` float, optional Maximum electron energy for energy content calculation. amplitude_name : str, optional Name of the amplitude parameter of the particle distribution. It must be accesible as an attribute of the distribution function. Defaults to ``amplitude``. """ We = validate_scalar("We", We, physical_type="energy") oldWe = self.compute_We(Eemin=Eemin, Eemax=Eemax) if amplitude_name is None: try: self.particle_distribution.amplitude *= ( We / oldWe ).decompose() except AttributeError: log.error( "The particle distribution does not have an attribute" " called amplitude to modify its normalization: you can" " set the name with the amplitude_name parameter of set_We" ) else: oldampl = getattr(self.particle_distribution, amplitude_name) setattr( self.particle_distribution, amplitude_name, oldampl * (We / oldWe).decompose(), )
[ "def", "set_We", "(", "self", ",", "We", ",", "Eemin", "=", "None", ",", "Eemax", "=", "None", ",", "amplitude_name", "=", "None", ")", ":", "We", "=", "validate_scalar", "(", "\"We\"", ",", "We", ",", "physical_type", "=", "\"energy\"", ")", "oldWe", ...
Normalize particle distribution so that the total energy in electrons between Eemin and Eemax is We Parameters ---------- We : :class:`~astropy.units.Quantity` float Desired energy in electrons. Eemin : :class:`~astropy.units.Quantity` float, optional Minimum electron energy for energy content calculation. Eemax : :class:`~astropy.units.Quantity` float, optional Maximum electron energy for energy content calculation. amplitude_name : str, optional Name of the amplitude parameter of the particle distribution. It must be accesible as an attribute of the distribution function. Defaults to ``amplitude``.
[ "Normalize", "particle", "distribution", "so", "that", "the", "total", "energy", "in", "electrons", "between", "Eemin", "and", "Eemax", "is", "We" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L217-L258
train
32,384
zblz/naima
naima/radiative.py
Synchrotron._spectrum
def _spectrum(self, photon_energy): """Compute intrinsic synchrotron differential spectrum for energies in ``photon_energy`` Compute synchrotron for random magnetic field according to approximation of Aharonian, Kelner, and Prosekin 2010, PhysRev D 82, 3002 (`arXiv:1006.1045 <http://arxiv.org/abs/1006.1045>`_). Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` instance Photon energy array. """ outspecene = _validate_ene(photon_energy) from scipy.special import cbrt def Gtilde(x): """ AKP10 Eq. D7 Factor ~2 performance gain in using cbrt(x)**n vs x**(n/3.) Invoking crbt only once reduced time by ~40% """ cb = cbrt(x) gt1 = 1.808 * cb / np.sqrt(1 + 3.4 * cb ** 2.0) gt2 = 1 + 2.210 * cb ** 2.0 + 0.347 * cb ** 4.0 gt3 = 1 + 1.353 * cb ** 2.0 + 0.217 * cb ** 4.0 return gt1 * (gt2 / gt3) * np.exp(-x) log.debug("calc_sy: Starting synchrotron computation with AKB2010...") # strip units, ensuring correct conversion # astropy units do not convert correctly for gyroradius calculation # when using cgs (SI is fine, see # https://github.com/astropy/astropy/issues/1687) CS1_0 = np.sqrt(3) * e.value ** 3 * self.B.to("G").value CS1_1 = ( 2 * np.pi * m_e.cgs.value * c.cgs.value ** 2 * hbar.cgs.value * outspecene.to("erg").value ) CS1 = CS1_0 / CS1_1 # Critical energy, erg Ec = ( 3 * e.value * hbar.cgs.value * self.B.to("G").value * self._gam ** 2 ) Ec /= 2 * (m_e * c).cgs.value EgEc = outspecene.to("erg").value / np.vstack(Ec) dNdE = CS1 * Gtilde(EgEc) # return units spec = ( trapz_loglog(np.vstack(self._nelec) * dNdE, self._gam, axis=0) / u.s / u.erg ) spec = spec.to("1/(s eV)") return spec
python
def _spectrum(self, photon_energy): """Compute intrinsic synchrotron differential spectrum for energies in ``photon_energy`` Compute synchrotron for random magnetic field according to approximation of Aharonian, Kelner, and Prosekin 2010, PhysRev D 82, 3002 (`arXiv:1006.1045 <http://arxiv.org/abs/1006.1045>`_). Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` instance Photon energy array. """ outspecene = _validate_ene(photon_energy) from scipy.special import cbrt def Gtilde(x): """ AKP10 Eq. D7 Factor ~2 performance gain in using cbrt(x)**n vs x**(n/3.) Invoking crbt only once reduced time by ~40% """ cb = cbrt(x) gt1 = 1.808 * cb / np.sqrt(1 + 3.4 * cb ** 2.0) gt2 = 1 + 2.210 * cb ** 2.0 + 0.347 * cb ** 4.0 gt3 = 1 + 1.353 * cb ** 2.0 + 0.217 * cb ** 4.0 return gt1 * (gt2 / gt3) * np.exp(-x) log.debug("calc_sy: Starting synchrotron computation with AKB2010...") # strip units, ensuring correct conversion # astropy units do not convert correctly for gyroradius calculation # when using cgs (SI is fine, see # https://github.com/astropy/astropy/issues/1687) CS1_0 = np.sqrt(3) * e.value ** 3 * self.B.to("G").value CS1_1 = ( 2 * np.pi * m_e.cgs.value * c.cgs.value ** 2 * hbar.cgs.value * outspecene.to("erg").value ) CS1 = CS1_0 / CS1_1 # Critical energy, erg Ec = ( 3 * e.value * hbar.cgs.value * self.B.to("G").value * self._gam ** 2 ) Ec /= 2 * (m_e * c).cgs.value EgEc = outspecene.to("erg").value / np.vstack(Ec) dNdE = CS1 * Gtilde(EgEc) # return units spec = ( trapz_loglog(np.vstack(self._nelec) * dNdE, self._gam, axis=0) / u.s / u.erg ) spec = spec.to("1/(s eV)") return spec
[ "def", "_spectrum", "(", "self", ",", "photon_energy", ")", ":", "outspecene", "=", "_validate_ene", "(", "photon_energy", ")", "from", "scipy", ".", "special", "import", "cbrt", "def", "Gtilde", "(", "x", ")", ":", "\"\"\"\n AKP10 Eq. D7\n\n ...
Compute intrinsic synchrotron differential spectrum for energies in ``photon_energy`` Compute synchrotron for random magnetic field according to approximation of Aharonian, Kelner, and Prosekin 2010, PhysRev D 82, 3002 (`arXiv:1006.1045 <http://arxiv.org/abs/1006.1045>`_). Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` instance Photon energy array.
[ "Compute", "intrinsic", "synchrotron", "differential", "spectrum", "for", "energies", "in", "photon_energy" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L304-L372
train
32,385
zblz/naima
naima/radiative.py
InverseCompton._spectrum
def _spectrum(self, photon_energy): """Compute differential IC spectrum for energies in ``photon_energy``. Compute IC spectrum using IC cross-section for isotropic interaction with a blackbody photon spectrum following Khangulyan, Aharonian, and Kelner 2014, ApJ 783, 100 (`arXiv:1310.7971 <http://www.arxiv.org/abs/1310.7971>`_). Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` instance Photon energy array. """ outspecene = _validate_ene(photon_energy) self.specic = [] for seed in self.seed_photon_fields: # Call actual computation, detached to allow changes in subclasses self.specic.append( self._calc_specic(seed, outspecene).to("1/(s eV)") ) return np.sum(u.Quantity(self.specic), axis=0)
python
def _spectrum(self, photon_energy): """Compute differential IC spectrum for energies in ``photon_energy``. Compute IC spectrum using IC cross-section for isotropic interaction with a blackbody photon spectrum following Khangulyan, Aharonian, and Kelner 2014, ApJ 783, 100 (`arXiv:1310.7971 <http://www.arxiv.org/abs/1310.7971>`_). Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` instance Photon energy array. """ outspecene = _validate_ene(photon_energy) self.specic = [] for seed in self.seed_photon_fields: # Call actual computation, detached to allow changes in subclasses self.specic.append( self._calc_specic(seed, outspecene).to("1/(s eV)") ) return np.sum(u.Quantity(self.specic), axis=0)
[ "def", "_spectrum", "(", "self", ",", "photon_energy", ")", ":", "outspecene", "=", "_validate_ene", "(", "photon_energy", ")", "self", ".", "specic", "=", "[", "]", "for", "seed", "in", "self", ".", "seed_photon_fields", ":", "# Call actual computation, detache...
Compute differential IC spectrum for energies in ``photon_energy``. Compute IC spectrum using IC cross-section for isotropic interaction with a blackbody photon spectrum following Khangulyan, Aharonian, and Kelner 2014, ApJ 783, 100 (`arXiv:1310.7971 <http://www.arxiv.org/abs/1310.7971>`_). Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` instance Photon energy array.
[ "Compute", "differential", "IC", "spectrum", "for", "energies", "in", "photon_energy", "." ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L743-L766
train
32,386
zblz/naima
naima/radiative.py
InverseCompton.flux
def flux(self, photon_energy, distance=1 * u.kpc, seed=None): """Differential flux at a given distance from the source from a single seed photon field Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic luminosity will be returned. Default is 1 kpc. seed : int, str or None Number or name of seed photon field for which the IC contribution is required. If set to None it will return the sum of all contributions (default). """ model = super(InverseCompton, self).flux( photon_energy, distance=distance ) if seed is not None: # Test seed argument if not isinstance(seed, int): if seed not in self.seed_photon_fields: raise ValueError( "Provided seed photon field name is not in" " the definition of the InverseCompton instance" ) else: seed = list(self.seed_photon_fields.keys()).index(seed) elif seed > len(self.seed_photon_fields): raise ValueError( "Provided seed photon field number is larger" " than the number of seed photon fields defined in the" " InverseCompton instance" ) if distance != 0: distance = validate_scalar( "distance", distance, physical_type="length" ) dfac = 4 * np.pi * distance.to("cm") ** 2 out_unit = "1/(s cm2 eV)" else: dfac = 1 out_unit = "1/(s eV)" model = (self.specic[seed] / dfac).to(out_unit) return model
python
def flux(self, photon_energy, distance=1 * u.kpc, seed=None): """Differential flux at a given distance from the source from a single seed photon field Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic luminosity will be returned. Default is 1 kpc. seed : int, str or None Number or name of seed photon field for which the IC contribution is required. If set to None it will return the sum of all contributions (default). """ model = super(InverseCompton, self).flux( photon_energy, distance=distance ) if seed is not None: # Test seed argument if not isinstance(seed, int): if seed not in self.seed_photon_fields: raise ValueError( "Provided seed photon field name is not in" " the definition of the InverseCompton instance" ) else: seed = list(self.seed_photon_fields.keys()).index(seed) elif seed > len(self.seed_photon_fields): raise ValueError( "Provided seed photon field number is larger" " than the number of seed photon fields defined in the" " InverseCompton instance" ) if distance != 0: distance = validate_scalar( "distance", distance, physical_type="length" ) dfac = 4 * np.pi * distance.to("cm") ** 2 out_unit = "1/(s cm2 eV)" else: dfac = 1 out_unit = "1/(s eV)" model = (self.specic[seed] / dfac).to(out_unit) return model
[ "def", "flux", "(", "self", ",", "photon_energy", ",", "distance", "=", "1", "*", "u", ".", "kpc", ",", "seed", "=", "None", ")", ":", "model", "=", "super", "(", "InverseCompton", ",", "self", ")", ".", "flux", "(", "photon_energy", ",", "distance",...
Differential flux at a given distance from the source from a single seed photon field Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic luminosity will be returned. Default is 1 kpc. seed : int, str or None Number or name of seed photon field for which the IC contribution is required. If set to None it will return the sum of all contributions (default).
[ "Differential", "flux", "at", "a", "given", "distance", "from", "the", "source", "from", "a", "single", "seed", "photon", "field" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L768-L819
train
32,387
zblz/naima
naima/radiative.py
InverseCompton.sed
def sed(self, photon_energy, distance=1 * u.kpc, seed=None): """Spectral energy distribution at a given distance from the source Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic luminosity will be returned. Default is 1 kpc. seed : int, str or None Number or name of seed photon field for which the IC contribution is required. If set to None it will return the sum of all contributions (default). """ sed = super(InverseCompton, self).sed(photon_energy, distance=distance) if seed is not None: if distance != 0: out_unit = "erg/(cm2 s)" else: out_unit = "erg/s" sed = ( self.flux(photon_energy, distance=distance, seed=seed) * photon_energy ** 2.0 ).to(out_unit) return sed
python
def sed(self, photon_energy, distance=1 * u.kpc, seed=None): """Spectral energy distribution at a given distance from the source Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic luminosity will be returned. Default is 1 kpc. seed : int, str or None Number or name of seed photon field for which the IC contribution is required. If set to None it will return the sum of all contributions (default). """ sed = super(InverseCompton, self).sed(photon_energy, distance=distance) if seed is not None: if distance != 0: out_unit = "erg/(cm2 s)" else: out_unit = "erg/s" sed = ( self.flux(photon_energy, distance=distance, seed=seed) * photon_energy ** 2.0 ).to(out_unit) return sed
[ "def", "sed", "(", "self", ",", "photon_energy", ",", "distance", "=", "1", "*", "u", ".", "kpc", ",", "seed", "=", "None", ")", ":", "sed", "=", "super", "(", "InverseCompton", ",", "self", ")", ".", "sed", "(", "photon_energy", ",", "distance", "...
Spectral energy distribution at a given distance from the source Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` float or array Photon energy array. distance : :class:`~astropy.units.Quantity` float, optional Distance to the source. If set to 0, the intrinsic luminosity will be returned. Default is 1 kpc. seed : int, str or None Number or name of seed photon field for which the IC contribution is required. If set to None it will return the sum of all contributions (default).
[ "Spectral", "energy", "distribution", "at", "a", "given", "distance", "from", "the", "source" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L821-L851
train
32,388
zblz/naima
naima/radiative.py
Bremsstrahlung._emiss_ee
def _emiss_ee(self, Eph): """ Electron-electron bremsstrahlung emissivity per unit photon energy """ if self.weight_ee == 0.0: return np.zeros_like(Eph) gam = np.vstack(self._gam) # compute integral with electron distribution emiss = c.cgs * trapz_loglog( np.vstack(self._nelec) * self._sigma_ee(gam, Eph), self._gam, axis=0, ) return emiss
python
def _emiss_ee(self, Eph): """ Electron-electron bremsstrahlung emissivity per unit photon energy """ if self.weight_ee == 0.0: return np.zeros_like(Eph) gam = np.vstack(self._gam) # compute integral with electron distribution emiss = c.cgs * trapz_loglog( np.vstack(self._nelec) * self._sigma_ee(gam, Eph), self._gam, axis=0, ) return emiss
[ "def", "_emiss_ee", "(", "self", ",", "Eph", ")", ":", "if", "self", ".", "weight_ee", "==", "0.0", ":", "return", "np", ".", "zeros_like", "(", "Eph", ")", "gam", "=", "np", ".", "vstack", "(", "self", ".", "_gam", ")", "# compute integral with electr...
Electron-electron bremsstrahlung emissivity per unit photon energy
[ "Electron", "-", "electron", "bremsstrahlung", "emissivity", "per", "unit", "photon", "energy" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L1002-L1016
train
32,389
zblz/naima
naima/radiative.py
Bremsstrahlung._emiss_ep
def _emiss_ep(self, Eph): """ Electron-proton bremsstrahlung emissivity per unit photon energy """ if self.weight_ep == 0.0: return np.zeros_like(Eph) gam = np.vstack(self._gam) eps = (Eph / mec2).decompose().value # compute integral with electron distribution emiss = c.cgs * trapz_loglog( np.vstack(self._nelec) * self._sigma_ep(gam, eps), self._gam, axis=0, ).to(u.cm ** 2 / Eph.unit) return emiss
python
def _emiss_ep(self, Eph): """ Electron-proton bremsstrahlung emissivity per unit photon energy """ if self.weight_ep == 0.0: return np.zeros_like(Eph) gam = np.vstack(self._gam) eps = (Eph / mec2).decompose().value # compute integral with electron distribution emiss = c.cgs * trapz_loglog( np.vstack(self._nelec) * self._sigma_ep(gam, eps), self._gam, axis=0, ).to(u.cm ** 2 / Eph.unit) return emiss
[ "def", "_emiss_ep", "(", "self", ",", "Eph", ")", ":", "if", "self", ".", "weight_ep", "==", "0.0", ":", "return", "np", ".", "zeros_like", "(", "Eph", ")", "gam", "=", "np", ".", "vstack", "(", "self", ".", "_gam", ")", "eps", "=", "(", "Eph", ...
Electron-proton bremsstrahlung emissivity per unit photon energy
[ "Electron", "-", "proton", "bremsstrahlung", "emissivity", "per", "unit", "photon", "energy" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L1018-L1033
train
32,390
zblz/naima
naima/radiative.py
Bremsstrahlung._spectrum
def _spectrum(self, photon_energy): """Compute differential bremsstrahlung spectrum for energies in ``photon_energy``. Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` instance Photon energy array. """ Eph = _validate_ene(photon_energy) spec = self.n0 * ( self.weight_ee * self._emiss_ee(Eph) + self.weight_ep * self._emiss_ep(Eph) ) return spec
python
def _spectrum(self, photon_energy): """Compute differential bremsstrahlung spectrum for energies in ``photon_energy``. Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` instance Photon energy array. """ Eph = _validate_ene(photon_energy) spec = self.n0 * ( self.weight_ee * self._emiss_ee(Eph) + self.weight_ep * self._emiss_ep(Eph) ) return spec
[ "def", "_spectrum", "(", "self", ",", "photon_energy", ")", ":", "Eph", "=", "_validate_ene", "(", "photon_energy", ")", "spec", "=", "self", ".", "n0", "*", "(", "self", ".", "weight_ee", "*", "self", ".", "_emiss_ee", "(", "Eph", ")", "+", "self", ...
Compute differential bremsstrahlung spectrum for energies in ``photon_energy``. Parameters ---------- photon_energy : :class:`~astropy.units.Quantity` instance Photon energy array.
[ "Compute", "differential", "bremsstrahlung", "spectrum", "for", "energies", "in", "photon_energy", "." ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L1035-L1052
train
32,391
zblz/naima
naima/radiative.py
BaseProton._Ep
def _Ep(self): """ Proton energy array in GeV """ return np.logspace( np.log10(self.Epmin.to("GeV").value), np.log10(self.Epmax.to("GeV").value), int(self.nEpd * (np.log10(self.Epmax / self.Epmin))), )
python
def _Ep(self): """ Proton energy array in GeV """ return np.logspace( np.log10(self.Epmin.to("GeV").value), np.log10(self.Epmax.to("GeV").value), int(self.nEpd * (np.log10(self.Epmax / self.Epmin))), )
[ "def", "_Ep", "(", "self", ")", ":", "return", "np", ".", "logspace", "(", "np", ".", "log10", "(", "self", ".", "Epmin", ".", "to", "(", "\"GeV\"", ")", ".", "value", ")", ",", "np", ".", "log10", "(", "self", ".", "Epmax", ".", "to", "(", "...
Proton energy array in GeV
[ "Proton", "energy", "array", "in", "GeV" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L1067-L1074
train
32,392
zblz/naima
naima/radiative.py
BaseProton._J
def _J(self): """ Particles per unit proton energy in particles per GeV """ pd = self.particle_distribution(self._Ep * u.GeV) return pd.to("1/GeV").value
python
def _J(self): """ Particles per unit proton energy in particles per GeV """ pd = self.particle_distribution(self._Ep * u.GeV) return pd.to("1/GeV").value
[ "def", "_J", "(", "self", ")", ":", "pd", "=", "self", ".", "particle_distribution", "(", "self", ".", "_Ep", "*", "u", ".", "GeV", ")", "return", "pd", ".", "to", "(", "\"1/GeV\"", ")", ".", "value" ]
Particles per unit proton energy in particles per GeV
[ "Particles", "per", "unit", "proton", "energy", "in", "particles", "per", "GeV" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L1077-L1081
train
32,393
zblz/naima
naima/radiative.py
BaseProton.Wp
def Wp(self): """Total energy in protons """ Wp = trapz_loglog(self._Ep * self._J, self._Ep) * u.GeV return Wp.to("erg")
python
def Wp(self): """Total energy in protons """ Wp = trapz_loglog(self._Ep * self._J, self._Ep) * u.GeV return Wp.to("erg")
[ "def", "Wp", "(", "self", ")", ":", "Wp", "=", "trapz_loglog", "(", "self", ".", "_Ep", "*", "self", ".", "_J", ",", "self", ".", "_Ep", ")", "*", "u", ".", "GeV", "return", "Wp", ".", "to", "(", "\"erg\"", ")" ]
Total energy in protons
[ "Total", "energy", "in", "protons" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L1084-L1088
train
32,394
zblz/naima
naima/radiative.py
BaseProton.compute_Wp
def compute_Wp(self, Epmin=None, Epmax=None): """ Total energy in protons between energies Epmin and Epmax Parameters ---------- Epmin : :class:`~astropy.units.Quantity` float, optional Minimum proton energy for energy content calculation. Epmax : :class:`~astropy.units.Quantity` float, optional Maximum proton energy for energy content calculation. """ if Epmin is None and Epmax is None: Wp = self.Wp else: if Epmax is None: Epmax = self.Epmax if Epmin is None: Epmin = self.Epmin log10Epmin = np.log10(Epmin.to("GeV").value) log10Epmax = np.log10(Epmax.to("GeV").value) Ep = ( np.logspace( log10Epmin, log10Epmax, int(self.nEpd * (log10Epmax - log10Epmin)), ) * u.GeV ) pdist = self.particle_distribution(Ep) Wp = trapz_loglog(Ep * pdist, Ep).to("erg") return Wp
python
def compute_Wp(self, Epmin=None, Epmax=None): """ Total energy in protons between energies Epmin and Epmax Parameters ---------- Epmin : :class:`~astropy.units.Quantity` float, optional Minimum proton energy for energy content calculation. Epmax : :class:`~astropy.units.Quantity` float, optional Maximum proton energy for energy content calculation. """ if Epmin is None and Epmax is None: Wp = self.Wp else: if Epmax is None: Epmax = self.Epmax if Epmin is None: Epmin = self.Epmin log10Epmin = np.log10(Epmin.to("GeV").value) log10Epmax = np.log10(Epmax.to("GeV").value) Ep = ( np.logspace( log10Epmin, log10Epmax, int(self.nEpd * (log10Epmax - log10Epmin)), ) * u.GeV ) pdist = self.particle_distribution(Ep) Wp = trapz_loglog(Ep * pdist, Ep).to("erg") return Wp
[ "def", "compute_Wp", "(", "self", ",", "Epmin", "=", "None", ",", "Epmax", "=", "None", ")", ":", "if", "Epmin", "is", "None", "and", "Epmax", "is", "None", ":", "Wp", "=", "self", ".", "Wp", "else", ":", "if", "Epmax", "is", "None", ":", "Epmax"...
Total energy in protons between energies Epmin and Epmax Parameters ---------- Epmin : :class:`~astropy.units.Quantity` float, optional Minimum proton energy for energy content calculation. Epmax : :class:`~astropy.units.Quantity` float, optional Maximum proton energy for energy content calculation.
[ "Total", "energy", "in", "protons", "between", "energies", "Epmin", "and", "Epmax" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L1090-L1122
train
32,395
zblz/naima
naima/radiative.py
BaseProton.set_Wp
def set_Wp(self, Wp, Epmin=None, Epmax=None, amplitude_name=None): """ Normalize particle distribution so that the total energy in protons between Epmin and Epmax is Wp Parameters ---------- Wp : :class:`~astropy.units.Quantity` float Desired energy in protons. Epmin : :class:`~astropy.units.Quantity` float, optional Minimum proton energy for energy content calculation. Epmax : :class:`~astropy.units.Quantity` float, optional Maximum proton energy for energy content calculation. amplitude_name : str, optional Name of the amplitude parameter of the particle distribution. It must be accesible as an attribute of the distribution function. Defaults to ``amplitude``. """ Wp = validate_scalar("Wp", Wp, physical_type="energy") oldWp = self.compute_Wp(Epmin=Epmin, Epmax=Epmax) if amplitude_name is None: try: self.particle_distribution.amplitude *= ( Wp / oldWp ).decompose() except AttributeError: log.error( "The particle distribution does not have an attribute" " called amplitude to modify its normalization: you can" " set the name with the amplitude_name parameter of set_Wp" ) else: oldampl = getattr(self.particle_distribution, amplitude_name) setattr( self.particle_distribution, amplitude_name, oldampl * (Wp / oldWp).decompose(), )
python
def set_Wp(self, Wp, Epmin=None, Epmax=None, amplitude_name=None): """ Normalize particle distribution so that the total energy in protons between Epmin and Epmax is Wp Parameters ---------- Wp : :class:`~astropy.units.Quantity` float Desired energy in protons. Epmin : :class:`~astropy.units.Quantity` float, optional Minimum proton energy for energy content calculation. Epmax : :class:`~astropy.units.Quantity` float, optional Maximum proton energy for energy content calculation. amplitude_name : str, optional Name of the amplitude parameter of the particle distribution. It must be accesible as an attribute of the distribution function. Defaults to ``amplitude``. """ Wp = validate_scalar("Wp", Wp, physical_type="energy") oldWp = self.compute_Wp(Epmin=Epmin, Epmax=Epmax) if amplitude_name is None: try: self.particle_distribution.amplitude *= ( Wp / oldWp ).decompose() except AttributeError: log.error( "The particle distribution does not have an attribute" " called amplitude to modify its normalization: you can" " set the name with the amplitude_name parameter of set_Wp" ) else: oldampl = getattr(self.particle_distribution, amplitude_name) setattr( self.particle_distribution, amplitude_name, oldampl * (Wp / oldWp).decompose(), )
[ "def", "set_Wp", "(", "self", ",", "Wp", ",", "Epmin", "=", "None", ",", "Epmax", "=", "None", ",", "amplitude_name", "=", "None", ")", ":", "Wp", "=", "validate_scalar", "(", "\"Wp\"", ",", "Wp", ",", "physical_type", "=", "\"energy\"", ")", "oldWp", ...
Normalize particle distribution so that the total energy in protons between Epmin and Epmax is Wp Parameters ---------- Wp : :class:`~astropy.units.Quantity` float Desired energy in protons. Epmin : :class:`~astropy.units.Quantity` float, optional Minimum proton energy for energy content calculation. Epmax : :class:`~astropy.units.Quantity` float, optional Maximum proton energy for energy content calculation. amplitude_name : str, optional Name of the amplitude parameter of the particle distribution. It must be accesible as an attribute of the distribution function. Defaults to ``amplitude``.
[ "Normalize", "particle", "distribution", "so", "that", "the", "total", "energy", "in", "protons", "between", "Epmin", "and", "Epmax", "is", "Wp" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L1124-L1165
train
32,396
zblz/naima
naima/radiative.py
PionDecay._sigma_inel
def _sigma_inel(self, Tp): """ Inelastic cross-section for p-p interaction. KATV14 Eq. 1 Parameters ---------- Tp : float Kinetic energy of proton (i.e. Ep - m_p*c**2) [GeV] Returns ------- sigma_inel : float Inelastic cross-section for p-p interaction [1/cm2]. """ L = np.log(Tp / self._Tth) sigma = 30.7 - 0.96 * L + 0.18 * L ** 2 sigma *= (1 - (self._Tth / Tp) ** 1.9) ** 3 return sigma * 1e-27
python
def _sigma_inel(self, Tp): """ Inelastic cross-section for p-p interaction. KATV14 Eq. 1 Parameters ---------- Tp : float Kinetic energy of proton (i.e. Ep - m_p*c**2) [GeV] Returns ------- sigma_inel : float Inelastic cross-section for p-p interaction [1/cm2]. """ L = np.log(Tp / self._Tth) sigma = 30.7 - 0.96 * L + 0.18 * L ** 2 sigma *= (1 - (self._Tth / Tp) ** 1.9) ** 3 return sigma * 1e-27
[ "def", "_sigma_inel", "(", "self", ",", "Tp", ")", ":", "L", "=", "np", ".", "log", "(", "Tp", "/", "self", ".", "_Tth", ")", "sigma", "=", "30.7", "-", "0.96", "*", "L", "+", "0.18", "*", "L", "**", "2", "sigma", "*=", "(", "1", "-", "(", ...
Inelastic cross-section for p-p interaction. KATV14 Eq. 1 Parameters ---------- Tp : float Kinetic energy of proton (i.e. Ep - m_p*c**2) [GeV] Returns ------- sigma_inel : float Inelastic cross-section for p-p interaction [1/cm2].
[ "Inelastic", "cross", "-", "section", "for", "p", "-", "p", "interaction", ".", "KATV14", "Eq", ".", "1" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L1284-L1302
train
32,397
zblz/naima
naima/radiative.py
PionDecay._sigma_pi_loE
def _sigma_pi_loE(self, Tp): """ inclusive cross section for Tth < Tp < 2 GeV Fit from experimental data """ m_p = self._m_p m_pi = self._m_pi Mres = 1.1883 # GeV Gres = 0.2264 # GeV s = 2 * m_p * (Tp + 2 * m_p) # center of mass energy gamma = np.sqrt(Mres ** 2 * (Mres ** 2 + Gres ** 2)) K = np.sqrt(8) * Mres * Gres * gamma K /= np.pi * np.sqrt(Mres ** 2 + gamma) fBW = m_p * K fBW /= ( (np.sqrt(s) - m_p) ** 2 - Mres ** 2 ) ** 2 + Mres ** 2 * Gres ** 2 mu = np.sqrt( (s - m_pi ** 2 - 4 * m_p ** 2) ** 2 - 16 * m_pi ** 2 * m_p ** 2 ) mu /= 2 * m_pi * np.sqrt(s) sigma0 = 7.66e-3 # mb sigma1pi = sigma0 * mu ** 1.95 * (1 + mu + mu ** 5) * fBW ** 1.86 # two pion production sigma2pi = 5.7 # mb sigma2pi /= 1 + np.exp(-9.3 * (Tp - 1.4)) E2pith = 0.56 # GeV sigma2pi[np.where(Tp < E2pith)] = 0.0 return (sigma1pi + sigma2pi) * 1e-27
python
def _sigma_pi_loE(self, Tp): """ inclusive cross section for Tth < Tp < 2 GeV Fit from experimental data """ m_p = self._m_p m_pi = self._m_pi Mres = 1.1883 # GeV Gres = 0.2264 # GeV s = 2 * m_p * (Tp + 2 * m_p) # center of mass energy gamma = np.sqrt(Mres ** 2 * (Mres ** 2 + Gres ** 2)) K = np.sqrt(8) * Mres * Gres * gamma K /= np.pi * np.sqrt(Mres ** 2 + gamma) fBW = m_p * K fBW /= ( (np.sqrt(s) - m_p) ** 2 - Mres ** 2 ) ** 2 + Mres ** 2 * Gres ** 2 mu = np.sqrt( (s - m_pi ** 2 - 4 * m_p ** 2) ** 2 - 16 * m_pi ** 2 * m_p ** 2 ) mu /= 2 * m_pi * np.sqrt(s) sigma0 = 7.66e-3 # mb sigma1pi = sigma0 * mu ** 1.95 * (1 + mu + mu ** 5) * fBW ** 1.86 # two pion production sigma2pi = 5.7 # mb sigma2pi /= 1 + np.exp(-9.3 * (Tp - 1.4)) E2pith = 0.56 # GeV sigma2pi[np.where(Tp < E2pith)] = 0.0 return (sigma1pi + sigma2pi) * 1e-27
[ "def", "_sigma_pi_loE", "(", "self", ",", "Tp", ")", ":", "m_p", "=", "self", ".", "_m_p", "m_pi", "=", "self", ".", "_m_pi", "Mres", "=", "1.1883", "# GeV", "Gres", "=", "0.2264", "# GeV", "s", "=", "2", "*", "m_p", "*", "(", "Tp", "+", "2", "...
inclusive cross section for Tth < Tp < 2 GeV Fit from experimental data
[ "inclusive", "cross", "section", "for", "Tth", "<", "Tp", "<", "2", "GeV", "Fit", "from", "experimental", "data" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L1304-L1339
train
32,398
zblz/naima
naima/radiative.py
PionDecay._sigma_pi_midE
def _sigma_pi_midE(self, Tp): """ Geant 4.10.0 model for 2 GeV < Tp < 5 GeV """ m_p = self._m_p Qp = (Tp - self._Tth) / m_p multip = -6e-3 + 0.237 * Qp - 0.023 * Qp ** 2 return self._sigma_inel(Tp) * multip
python
def _sigma_pi_midE(self, Tp): """ Geant 4.10.0 model for 2 GeV < Tp < 5 GeV """ m_p = self._m_p Qp = (Tp - self._Tth) / m_p multip = -6e-3 + 0.237 * Qp - 0.023 * Qp ** 2 return self._sigma_inel(Tp) * multip
[ "def", "_sigma_pi_midE", "(", "self", ",", "Tp", ")", ":", "m_p", "=", "self", ".", "_m_p", "Qp", "=", "(", "Tp", "-", "self", ".", "_Tth", ")", "/", "m_p", "multip", "=", "-", "6e-3", "+", "0.237", "*", "Qp", "-", "0.023", "*", "Qp", "**", "...
Geant 4.10.0 model for 2 GeV < Tp < 5 GeV
[ "Geant", "4", ".", "10", ".", "0", "model", "for", "2", "GeV", "<", "Tp", "<", "5", "GeV" ]
d6a6781d73bf58fd8269e8b0e3b70be22723cd5b
https://github.com/zblz/naima/blob/d6a6781d73bf58fd8269e8b0e3b70be22723cd5b/naima/radiative.py#L1341-L1348
train
32,399