body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
1bb263384b821b0815017e0fbf2c8fac8beb5e10026eb3c94bec6890af03e7cf | def __init__(self, url: str, tags: Optional[dict]=None, auth: Optional[emitter.BasicAuth]=None, version: Optional[str]=None):
'\n Create new Loki logging handler.\n\n Arguments:\n url: Endpoint used to send log entries to Loki (e.g. `https://my-loki-instance/loki/api/v1/push`).\n tags: Default tags added to every log record.\n auth: Optional tuple with username and password for basic HTTP authentication.\n version: Version of Loki emitter to use.\n\n '
super().__init__()
if ((version is None) and (const.emitter_ver == '0')):
msg = ('Loki /api/prom/push endpoint is in the depreciation process starting from version 0.4.0.', "Explicitly set the emitter version to '0' if you want to use the old endpoint.", "Or specify '1' if you have Loki version> = 0.4.0.", 'When the old API is removed from Loki, the handler will use the new version by default.')
warnings.warn(' '.join(msg), DeprecationWarning)
version = (version or const.emitter_ver)
if (version not in self.emitters):
raise ValueError('Unknown emitter version: {0}'.format(version))
self.emitter = self.emitters[version](url, tags, auth) | Create new Loki logging handler.
Arguments:
url: Endpoint used to send log entries to Loki (e.g. `https://my-loki-instance/loki/api/v1/push`).
tags: Default tags added to every log record.
auth: Optional tuple with username and password for basic HTTP authentication.
version: Version of Loki emitter to use. | logging_loki/handlers.py | __init__ | muze-app/python-logging-loki | 71 | python | def __init__(self, url: str, tags: Optional[dict]=None, auth: Optional[emitter.BasicAuth]=None, version: Optional[str]=None):
'\n Create new Loki logging handler.\n\n Arguments:\n url: Endpoint used to send log entries to Loki (e.g. `https://my-loki-instance/loki/api/v1/push`).\n tags: Default tags added to every log record.\n auth: Optional tuple with username and password for basic HTTP authentication.\n version: Version of Loki emitter to use.\n\n '
super().__init__()
if ((version is None) and (const.emitter_ver == '0')):
msg = ('Loki /api/prom/push endpoint is in the depreciation process starting from version 0.4.0.', "Explicitly set the emitter version to '0' if you want to use the old endpoint.", "Or specify '1' if you have Loki version> = 0.4.0.", 'When the old API is removed from Loki, the handler will use the new version by default.')
warnings.warn(' '.join(msg), DeprecationWarning)
version = (version or const.emitter_ver)
if (version not in self.emitters):
raise ValueError('Unknown emitter version: {0}'.format(version))
self.emitter = self.emitters[version](url, tags, auth) | def __init__(self, url: str, tags: Optional[dict]=None, auth: Optional[emitter.BasicAuth]=None, version: Optional[str]=None):
'\n Create new Loki logging handler.\n\n Arguments:\n url: Endpoint used to send log entries to Loki (e.g. `https://my-loki-instance/loki/api/v1/push`).\n tags: Default tags added to every log record.\n auth: Optional tuple with username and password for basic HTTP authentication.\n version: Version of Loki emitter to use.\n\n '
super().__init__()
if ((version is None) and (const.emitter_ver == '0')):
msg = ('Loki /api/prom/push endpoint is in the depreciation process starting from version 0.4.0.', "Explicitly set the emitter version to '0' if you want to use the old endpoint.", "Or specify '1' if you have Loki version> = 0.4.0.", 'When the old API is removed from Loki, the handler will use the new version by default.')
warnings.warn(' '.join(msg), DeprecationWarning)
version = (version or const.emitter_ver)
if (version not in self.emitters):
raise ValueError('Unknown emitter version: {0}'.format(version))
self.emitter = self.emitters[version](url, tags, auth)<|docstring|>Create new Loki logging handler.
Arguments:
url: Endpoint used to send log entries to Loki (e.g. `https://my-loki-instance/loki/api/v1/push`).
tags: Default tags added to every log record.
auth: Optional tuple with username and password for basic HTTP authentication.
version: Version of Loki emitter to use.<|endoftext|> |
1df513d6642bde677b25a5e5ca57a64337105562c10a0c31a702650350bb22c6 | def handleError(self, record):
'Close emitter and let default handler take actions on error.'
self.emitter.close()
super().handleError(record) | Close emitter and let default handler take actions on error. | logging_loki/handlers.py | handleError | muze-app/python-logging-loki | 71 | python | def handleError(self, record):
self.emitter.close()
super().handleError(record) | def handleError(self, record):
self.emitter.close()
super().handleError(record)<|docstring|>Close emitter and let default handler take actions on error.<|endoftext|> |
bb10a3a5ad1c41a4156ab07247ef4a0e4d937dd335b99c808da7991b5d5688f9 | def emit(self, record: logging.LogRecord):
'Send log record to Loki.'
try:
self.emitter(record, self.format(record))
except Exception:
self.handleError(record) | Send log record to Loki. | logging_loki/handlers.py | emit | muze-app/python-logging-loki | 71 | python | def emit(self, record: logging.LogRecord):
try:
self.emitter(record, self.format(record))
except Exception:
self.handleError(record) | def emit(self, record: logging.LogRecord):
try:
self.emitter(record, self.format(record))
except Exception:
self.handleError(record)<|docstring|>Send log record to Loki.<|endoftext|> |
2d5ef0a3e18a43781c2061331cd78f2962ad536aff97dd15367bde395a3c2937 | def randomString(stringLength=10):
'Generate a random string of fixed length '
letters = string.ascii_lowercase
return (str(int(time.time())) + ''.join((random.choice(letters) for i in range(stringLength)))) | Generate a random string of fixed length | python/utils/imagetotext_v2.py | randomString | barkavi87/anuvaad-corpus | 2 | python | def randomString(stringLength=10):
' '
letters = string.ascii_lowercase
return (str(int(time.time())) + .join((random.choice(letters) for i in range(stringLength)))) | def randomString(stringLength=10):
' '
letters = string.ascii_lowercase
return (str(int(time.time())) + .join((random.choice(letters) for i in range(stringLength))))<|docstring|>Generate a random string of fixed length<|endoftext|> |
0a2710207deb0510aea5326e7c8f27204137483a29253467a2af7636f62d5143 | def cg(Ax, b, max_iter=100, epsilon=1e-05):
' Conjugate Gradient\n Args:\n Ax: function, takes list of tensors as input\n b: list of tensors\n Returns:\n x_star: list of tensors\n '
x_last = [torch.zeros_like(bb) for bb in b]
r_last = [torch.zeros_like(bb).copy_(bb) for bb in b]
p_last = [torch.zeros_like(rr).copy_(rr) for rr in r_last]
for ii in range(max_iter):
Ap = Ax(p_last)
Ap_vec = cat_list_to_tensor(Ap)
p_last_vec = cat_list_to_tensor(p_last)
r_last_vec = cat_list_to_tensor(r_last)
rTr = torch.sum((r_last_vec * r_last_vec))
pAp = torch.sum((p_last_vec * Ap_vec))
alpha = (rTr / pAp)
x = [(xx + (alpha * pp)) for (xx, pp) in zip(x_last, p_last)]
r = [(rr - (alpha * pp)) for (rr, pp) in zip(r_last, Ap)]
r_vec = cat_list_to_tensor(r)
if (float(torch.norm(r_vec)) < epsilon):
break
beta = (torch.sum((r_vec * r_vec)) / rTr)
p = [(rr + (beta * pp)) for (rr, pp) in zip(r, p_last)]
x_last = x
p_last = p
r_last = r
return x_last | Conjugate Gradient
Args:
Ax: function, takes list of tensors as input
b: list of tensors
Returns:
x_star: list of tensors | hypergrad/CG_torch.py | cg | Arnav0400/hypertorch | 90 | python | def cg(Ax, b, max_iter=100, epsilon=1e-05):
' Conjugate Gradient\n Args:\n Ax: function, takes list of tensors as input\n b: list of tensors\n Returns:\n x_star: list of tensors\n '
x_last = [torch.zeros_like(bb) for bb in b]
r_last = [torch.zeros_like(bb).copy_(bb) for bb in b]
p_last = [torch.zeros_like(rr).copy_(rr) for rr in r_last]
for ii in range(max_iter):
Ap = Ax(p_last)
Ap_vec = cat_list_to_tensor(Ap)
p_last_vec = cat_list_to_tensor(p_last)
r_last_vec = cat_list_to_tensor(r_last)
rTr = torch.sum((r_last_vec * r_last_vec))
pAp = torch.sum((p_last_vec * Ap_vec))
alpha = (rTr / pAp)
x = [(xx + (alpha * pp)) for (xx, pp) in zip(x_last, p_last)]
r = [(rr - (alpha * pp)) for (rr, pp) in zip(r_last, Ap)]
r_vec = cat_list_to_tensor(r)
if (float(torch.norm(r_vec)) < epsilon):
break
beta = (torch.sum((r_vec * r_vec)) / rTr)
p = [(rr + (beta * pp)) for (rr, pp) in zip(r, p_last)]
x_last = x
p_last = p
r_last = r
return x_last | def cg(Ax, b, max_iter=100, epsilon=1e-05):
' Conjugate Gradient\n Args:\n Ax: function, takes list of tensors as input\n b: list of tensors\n Returns:\n x_star: list of tensors\n '
x_last = [torch.zeros_like(bb) for bb in b]
r_last = [torch.zeros_like(bb).copy_(bb) for bb in b]
p_last = [torch.zeros_like(rr).copy_(rr) for rr in r_last]
for ii in range(max_iter):
Ap = Ax(p_last)
Ap_vec = cat_list_to_tensor(Ap)
p_last_vec = cat_list_to_tensor(p_last)
r_last_vec = cat_list_to_tensor(r_last)
rTr = torch.sum((r_last_vec * r_last_vec))
pAp = torch.sum((p_last_vec * Ap_vec))
alpha = (rTr / pAp)
x = [(xx + (alpha * pp)) for (xx, pp) in zip(x_last, p_last)]
r = [(rr - (alpha * pp)) for (rr, pp) in zip(r_last, Ap)]
r_vec = cat_list_to_tensor(r)
if (float(torch.norm(r_vec)) < epsilon):
break
beta = (torch.sum((r_vec * r_vec)) / rTr)
p = [(rr + (beta * pp)) for (rr, pp) in zip(r, p_last)]
x_last = x
p_last = p
r_last = r
return x_last<|docstring|>Conjugate Gradient
Args:
Ax: function, takes list of tensors as input
b: list of tensors
Returns:
x_star: list of tensors<|endoftext|> |
a349b3e2f13028260b06c868e64d7e2811a1c019d1c5ba9a58e16c1839dacf36 | def walk(self):
'Walk over the message tree, yielding each subpart.\n\n The walk is performed in depth-first order. This method is a\n generator.\n '
(yield self)
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
(yield subsubpart) | Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator. | Lib/email/Iterators.py | walk | deadsnakes/python2.4 | 33 | python | def walk(self):
'Walk over the message tree, yielding each subpart.\n\n The walk is performed in depth-first order. This method is a\n generator.\n '
(yield self)
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
(yield subsubpart) | def walk(self):
'Walk over the message tree, yielding each subpart.\n\n The walk is performed in depth-first order. This method is a\n generator.\n '
(yield self)
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
(yield subsubpart)<|docstring|>Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.<|endoftext|> |
05d7824854c310548ce9ca1b868041ca704d323acfa2f4e01dc400615f026389 | def body_line_iterator(msg, decode=False):
'Iterate over the parts, returning string payloads line-by-line.\n\n Optional decode (default False) is passed through to .get_payload().\n '
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, basestring):
for line in StringIO(payload):
(yield line) | Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload(). | Lib/email/Iterators.py | body_line_iterator | deadsnakes/python2.4 | 33 | python | def body_line_iterator(msg, decode=False):
'Iterate over the parts, returning string payloads line-by-line.\n\n Optional decode (default False) is passed through to .get_payload().\n '
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, basestring):
for line in StringIO(payload):
(yield line) | def body_line_iterator(msg, decode=False):
'Iterate over the parts, returning string payloads line-by-line.\n\n Optional decode (default False) is passed through to .get_payload().\n '
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, basestring):
for line in StringIO(payload):
(yield line)<|docstring|>Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().<|endoftext|> |
011eaa45578658ceeb2945fdc0572beb72284a9d400d9e53e2d5dc3eb16ae743 | def typed_subpart_iterator(msg, maintype='text', subtype=None):
'Iterate over the subparts with a given MIME type.\n\n Use `maintype\' as the main MIME type to match against; this defaults to\n "text". Optional `subtype\' is the MIME subtype to match against; if\n omitted, only the main type is matched.\n '
for subpart in msg.walk():
if (subpart.get_content_maintype() == maintype):
if ((subtype is None) or (subpart.get_content_subtype() == subtype)):
(yield subpart) | Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched. | Lib/email/Iterators.py | typed_subpart_iterator | deadsnakes/python2.4 | 33 | python | def typed_subpart_iterator(msg, maintype='text', subtype=None):
'Iterate over the subparts with a given MIME type.\n\n Use `maintype\' as the main MIME type to match against; this defaults to\n "text". Optional `subtype\' is the MIME subtype to match against; if\n omitted, only the main type is matched.\n '
for subpart in msg.walk():
if (subpart.get_content_maintype() == maintype):
if ((subtype is None) or (subpart.get_content_subtype() == subtype)):
(yield subpart) | def typed_subpart_iterator(msg, maintype='text', subtype=None):
'Iterate over the subparts with a given MIME type.\n\n Use `maintype\' as the main MIME type to match against; this defaults to\n "text". Optional `subtype\' is the MIME subtype to match against; if\n omitted, only the main type is matched.\n '
for subpart in msg.walk():
if (subpart.get_content_maintype() == maintype):
if ((subtype is None) or (subpart.get_content_subtype() == subtype)):
(yield subpart)<|docstring|>Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.<|endoftext|> |
fc3e0db1dd09504120eb8dc7aa6340489c31c1082e09286e61a79d4bff3c5e0a | def _structure(msg, fp=None, level=0, include_default=False):
'A handy debugging aid'
if (fp is None):
fp = sys.stdout
tab = (' ' * (level * 4))
((print >> fp), (tab + msg.get_content_type()))
if include_default:
((print >> fp), ('[%s]' % msg.get_default_type()))
else:
(print >> fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, (level + 1), include_default) | A handy debugging aid | Lib/email/Iterators.py | _structure | deadsnakes/python2.4 | 33 | python | def _structure(msg, fp=None, level=0, include_default=False):
if (fp is None):
fp = sys.stdout
tab = (' ' * (level * 4))
((print >> fp), (tab + msg.get_content_type()))
if include_default:
((print >> fp), ('[%s]' % msg.get_default_type()))
else:
(print >> fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, (level + 1), include_default) | def _structure(msg, fp=None, level=0, include_default=False):
if (fp is None):
fp = sys.stdout
tab = (' ' * (level * 4))
((print >> fp), (tab + msg.get_content_type()))
if include_default:
((print >> fp), ('[%s]' % msg.get_default_type()))
else:
(print >> fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, (level + 1), include_default)<|docstring|>A handy debugging aid<|endoftext|> |
5aaf2444eb5c46132da06f97b1afa9043147a37b1cc495b93120d2373e78748d | def __init__(self, data, license_plate):
'Initialize a RitAssist device, also a vehicle.'
self._data = data
self.attributes = {}
self.license_plate = license_plate
self.identifier = None
self.make = None
self.model = None
self.active = False
self.odo = 0
self.latitude = 0
self.longitude = 0
self.altitude = 0
self.speed = 0
self.last_seen = None
self.equipment_id = None
self.malfunction_light = False
self.fuel_level = (- 1)
self.coolant_temperature = 0
self.power_voltage = 0
self.current_maximum_speed = 0
self.current_address = None | Initialize a RitAssist device, also a vehicle. | ritassist/device.py | __init__ | depl0y/ritassist-py | 0 | python | def __init__(self, data, license_plate):
self._data = data
self.attributes = {}
self.license_plate = license_plate
self.identifier = None
self.make = None
self.model = None
self.active = False
self.odo = 0
self.latitude = 0
self.longitude = 0
self.altitude = 0
self.speed = 0
self.last_seen = None
self.equipment_id = None
self.malfunction_light = False
self.fuel_level = (- 1)
self.coolant_temperature = 0
self.power_voltage = 0
self.current_maximum_speed = 0
self.current_address = None | def __init__(self, data, license_plate):
self._data = data
self.attributes = {}
self.license_plate = license_plate
self.identifier = None
self.make = None
self.model = None
self.active = False
self.odo = 0
self.latitude = 0
self.longitude = 0
self.altitude = 0
self.speed = 0
self.last_seen = None
self.equipment_id = None
self.malfunction_light = False
self.fuel_level = (- 1)
self.coolant_temperature = 0
self.power_voltage = 0
self.current_maximum_speed = 0
self.current_address = None<|docstring|>Initialize a RitAssist device, also a vehicle.<|endoftext|> |
85814f0695f2af59d65716b7aac3ff7ad8f2205bff3419971edf50ebb6fb4864 | @property
def plate_as_id(self):
'Format the license plate so it can be used as identifier.'
return self.license_plate.replace('-', '') | Format the license plate so it can be used as identifier. | ritassist/device.py | plate_as_id | depl0y/ritassist-py | 0 | python | @property
def plate_as_id(self):
return self.license_plate.replace('-', ) | @property
def plate_as_id(self):
return self.license_plate.replace('-', )<|docstring|>Format the license plate so it can be used as identifier.<|endoftext|> |
8882cce484d1d5181013d31b8c1b785b91e88bc52c5e5f75a1dce46a225810fa | @property
def state_attributes(self):
'Return all attributes of the vehicle.'
address_attributes = None
if (self.current_address is not None):
address_attributes = self.current_address.state_attributes()
return {'id': self.identifier, 'make': self.make, 'model': self.model, 'license_plate': self.license_plate, 'active': self.active, 'odo': self.odo, 'latitude': self.latitude, 'longitude': self.longitude, 'altitude': self.altitude, 'speed': self.speed, 'last_seen': self.last_seen, 'friendly_name': self.license_plate, 'equipment_id': self.equipment_id, 'fuel_level': self.fuel_level, 'malfunction_light': self.malfunction_light, 'coolant_temperature': self.coolant_temperature, 'power_voltage': self.power_voltage, 'current_max_speed': self.current_maximum_speed, 'current_address': address_attributes} | Return all attributes of the vehicle. | ritassist/device.py | state_attributes | depl0y/ritassist-py | 0 | python | @property
def state_attributes(self):
address_attributes = None
if (self.current_address is not None):
address_attributes = self.current_address.state_attributes()
return {'id': self.identifier, 'make': self.make, 'model': self.model, 'license_plate': self.license_plate, 'active': self.active, 'odo': self.odo, 'latitude': self.latitude, 'longitude': self.longitude, 'altitude': self.altitude, 'speed': self.speed, 'last_seen': self.last_seen, 'friendly_name': self.license_plate, 'equipment_id': self.equipment_id, 'fuel_level': self.fuel_level, 'malfunction_light': self.malfunction_light, 'coolant_temperature': self.coolant_temperature, 'power_voltage': self.power_voltage, 'current_max_speed': self.current_maximum_speed, 'current_address': address_attributes} | @property
def state_attributes(self):
address_attributes = None
if (self.current_address is not None):
address_attributes = self.current_address.state_attributes()
return {'id': self.identifier, 'make': self.make, 'model': self.model, 'license_plate': self.license_plate, 'active': self.active, 'odo': self.odo, 'latitude': self.latitude, 'longitude': self.longitude, 'altitude': self.altitude, 'speed': self.speed, 'last_seen': self.last_seen, 'friendly_name': self.license_plate, 'equipment_id': self.equipment_id, 'fuel_level': self.fuel_level, 'malfunction_light': self.malfunction_light, 'coolant_temperature': self.coolant_temperature, 'power_voltage': self.power_voltage, 'current_max_speed': self.current_maximum_speed, 'current_address': address_attributes}<|docstring|>Return all attributes of the vehicle.<|endoftext|> |
f13d6d9e3540efcc924c321ec90e0ebf4ace9d5c27ad1ef77e6390ee383aa2ce | def get_trips(self, authentication_info, start, end):
'Get trips for this device between start and end.'
import requests
if ((authentication_info is None) or (not authentication_info.is_valid())):
return []
data_url = 'https://api.fleetgo.com/api/trips/GetTrips'
query = f'?equipmentId={self.identifier}&from={start}&to={end}&extendedInfo=True'
header = authentication_info.create_header()
response = requests.get((data_url + query), headers=header)
trips = response.json()
result = []
for trip_json in trips:
trip = Trip(trip_json)
result.append(trip)
return result | Get trips for this device between start and end. | ritassist/device.py | get_trips | depl0y/ritassist-py | 0 | python | def get_trips(self, authentication_info, start, end):
import requests
if ((authentication_info is None) or (not authentication_info.is_valid())):
return []
data_url = 'https://api.fleetgo.com/api/trips/GetTrips'
query = f'?equipmentId={self.identifier}&from={start}&to={end}&extendedInfo=True'
header = authentication_info.create_header()
response = requests.get((data_url + query), headers=header)
trips = response.json()
result = []
for trip_json in trips:
trip = Trip(trip_json)
result.append(trip)
return result | def get_trips(self, authentication_info, start, end):
import requests
if ((authentication_info is None) or (not authentication_info.is_valid())):
return []
data_url = 'https://api.fleetgo.com/api/trips/GetTrips'
query = f'?equipmentId={self.identifier}&from={start}&to={end}&extendedInfo=True'
header = authentication_info.create_header()
response = requests.get((data_url + query), headers=header)
trips = response.json()
result = []
for trip_json in trips:
trip = Trip(trip_json)
result.append(trip)
return result<|docstring|>Get trips for this device between start and end.<|endoftext|> |
b69c0350ca519c9b5bbf5a5a822ea3cad959f7591ec5d7d1b575a751b290508b | def get_extra_vehicle_info(self, authentication_info):
'Get extra data from the API.'
import requests
base_url = 'https://app.fleetgo.com/GenericServiceJSONP.ashx'
query = '?f=CheckExtraVehicleInfo&token={token}&equipmentId={identifier}&lastHash=null&padding=false'
parameters = {'token': authentication_info.access_token, 'identifier': str(self.identifier)}
response = requests.get((base_url + query.format(**parameters)))
json = response.json()
self.malfunction_light = json['MalfunctionIndicatorLight']
self.fuel_level = json['FuelLevel']
self.coolant_temperature = json['EngineCoolantTemperature']
self.power_voltage = json['PowerVoltage'] | Get extra data from the API. | ritassist/device.py | get_extra_vehicle_info | depl0y/ritassist-py | 0 | python | def get_extra_vehicle_info(self, authentication_info):
import requests
base_url = 'https://app.fleetgo.com/GenericServiceJSONP.ashx'
query = '?f=CheckExtraVehicleInfo&token={token}&equipmentId={identifier}&lastHash=null&padding=false'
parameters = {'token': authentication_info.access_token, 'identifier': str(self.identifier)}
response = requests.get((base_url + query.format(**parameters)))
json = response.json()
self.malfunction_light = json['MalfunctionIndicatorLight']
self.fuel_level = json['FuelLevel']
self.coolant_temperature = json['EngineCoolantTemperature']
self.power_voltage = json['PowerVoltage'] | def get_extra_vehicle_info(self, authentication_info):
import requests
base_url = 'https://app.fleetgo.com/GenericServiceJSONP.ashx'
query = '?f=CheckExtraVehicleInfo&token={token}&equipmentId={identifier}&lastHash=null&padding=false'
parameters = {'token': authentication_info.access_token, 'identifier': str(self.identifier)}
response = requests.get((base_url + query.format(**parameters)))
json = response.json()
self.malfunction_light = json['MalfunctionIndicatorLight']
self.fuel_level = json['FuelLevel']
self.coolant_temperature = json['EngineCoolantTemperature']
self.power_voltage = json['PowerVoltage']<|docstring|>Get extra data from the API.<|endoftext|> |
d0072c6dfc9d09febaa87b0ea7c4ccccd0d700d16a8917f4cadae0dd2bcceaad | def update_from_json(self, json_device):
'Set all attributes based on API response.'
self.identifier = json_device['Id']
self.license_plate = json_device['EquipmentHeader']['SerialNumber']
self.make = json_device['EquipmentHeader']['Make']
self.model = json_device['EquipmentHeader']['Model']
self.equipment_id = json_device['EquipmentHeader']['EquipmentID']
self.active = json_device['EngineRunning']
self.odo = json_device['Odometer']
self.latitude = json_device['Location']['Latitude']
self.longitude = json_device['Location']['Longitude']
self.altitude = json_device['Location']['Altitude']
self.speed = json_device['Speed']
self.last_seen = json_device['Location']['DateTime'] | Set all attributes based on API response. | ritassist/device.py | update_from_json | depl0y/ritassist-py | 0 | python | def update_from_json(self, json_device):
self.identifier = json_device['Id']
self.license_plate = json_device['EquipmentHeader']['SerialNumber']
self.make = json_device['EquipmentHeader']['Make']
self.model = json_device['EquipmentHeader']['Model']
self.equipment_id = json_device['EquipmentHeader']['EquipmentID']
self.active = json_device['EngineRunning']
self.odo = json_device['Odometer']
self.latitude = json_device['Location']['Latitude']
self.longitude = json_device['Location']['Longitude']
self.altitude = json_device['Location']['Altitude']
self.speed = json_device['Speed']
self.last_seen = json_device['Location']['DateTime'] | def update_from_json(self, json_device):
self.identifier = json_device['Id']
self.license_plate = json_device['EquipmentHeader']['SerialNumber']
self.make = json_device['EquipmentHeader']['Make']
self.model = json_device['EquipmentHeader']['Model']
self.equipment_id = json_device['EquipmentHeader']['EquipmentID']
self.active = json_device['EngineRunning']
self.odo = json_device['Odometer']
self.latitude = json_device['Location']['Latitude']
self.longitude = json_device['Location']['Longitude']
self.altitude = json_device['Location']['Altitude']
self.speed = json_device['Speed']
self.last_seen = json_device['Location']['DateTime']<|docstring|>Set all attributes based on API response.<|endoftext|> |
69474d745fbea55b70029059df06c875f815f2e42ec6e9112af3d47b733048f3 | def render_network(surface, network, values):
'\n Zeichnet die Minimap und den Netzwerkgraphen \n \n Argumente:\n surface: ein pygame.Surface der Groesse 750 x 180 Pixel.\n Darauf soll der Graph und die Minimap gezeichnet werden.\n network: das eigen implementierte Netzwerk (in network.py), dessen Graph gezeichnet werden soll.\n values: eine Liste von 27x18 = 486 Werten, welche die aktuelle diskrete Spielsituation darstellen\n die Werte haben folgende Bedeutung:\n 1 steht fuer begehbaren Block\n -1 steht fuer einen Gegner\n 0 leerer Raum\n Die Spielfigur befindet sich immer ca. bei der Position (10, 9) und (10, 10).\n '
colors = {1: (255, 255, 255), (- 1): (255, 0, 0)}
pygame.draw.rect(surface, (128, 128, 128, 128), (0, 0, (27 * TILESIZE), (18 * TILESIZE)))
for y in range(18):
for x in range(27):
if (values[((y * 27) + x)] != 0):
color = colors[values[((y * 27) + x)]]
surface.fill(color, ((TILESIZE * x), (TILESIZE * y), TILESIZE, TILESIZE))
pygame.draw.rect(surface, (0, 0, 0), ((TILESIZE * x), (TILESIZE * y), TILESIZE, TILESIZE), 1)
node_dict = {}
possible_position = [((x * TILESIZE), (y * TILESIZE)) for x in range(28, 60) for y in range(0, 18)]
for node in network.genome.hidden_nodes_dict:
position = random.choice(possible_position)
possible_position.remove(position)
node_dict[node] = position
y_output_node = 4
for output_node in network.genome.output_nodes_dict:
node_dict[output_node] = ((65 * TILESIZE), (y_output_node * TILESIZE))
y_output_node += 5
for _ in node_dict:
x = node_dict[_][0]
y = node_dict[_][1]
surface.fill(colors[1], (x, y, TILESIZE, TILESIZE))
pygame.draw.rect(surface, (0, 0, 0), (x, y, TILESIZE, TILESIZE), 1)
for y in range(18):
for x in range(27):
node_dict[f'in_{((y * 27) + x)}'] = [(x * TILESIZE), (y * TILESIZE)]
connection_dict = {}
line_colors = {1: (0, 201, 87), (- 1): (255, 0, 0), 0: (255, 255, 0)}
hidden_output_nodes = {**network.genome.hidden_nodes_dict, **network.genome.output_nodes_dict}
for node in hidden_output_nodes:
connection_dict[node] = []
for link in hidden_output_nodes[node].links:
if (link[0].node_type == 'input'):
pygame.draw.rect(surface, line_colors[1], (node_dict[link[0].node_name][0], node_dict[link[0].node_name][1], TILESIZE, TILESIZE), 1)
if (link[1] == 1):
connection_dict[node].append((link[0].node_name, line_colors[1]))
elif (link[1] == (- 1)):
connection_dict[node].append((link[0].node_name, line_colors[(- 1)]))
else:
connection_dict[node].append((link[0].node_name, line_colors[0]))
for node in connection_dict:
for i in range(len(connection_dict[node])):
pygame.draw.line(surface, connection_dict[node][i][1], ((node_dict[connection_dict[node][i][0]][0] + (TILESIZE / 2)), (node_dict[connection_dict[node][i][0]][1] + (TILESIZE / 2))), ((node_dict[node][0] + (TILESIZE / 2)), (node_dict[node][1] + (TILESIZE / 2))), 1) | Zeichnet die Minimap und den Netzwerkgraphen
Argumente:
surface: ein pygame.Surface der Groesse 750 x 180 Pixel.
Darauf soll der Graph und die Minimap gezeichnet werden.
network: das eigen implementierte Netzwerk (in network.py), dessen Graph gezeichnet werden soll.
values: eine Liste von 27x18 = 486 Werten, welche die aktuelle diskrete Spielsituation darstellen
die Werte haben folgende Bedeutung:
1 steht fuer begehbaren Block
-1 steht fuer einen Gegner
0 leerer Raum
Die Spielfigur befindet sich immer ca. bei der Position (10, 9) und (10, 10). | Gadakeco_Code/src/neat/networkrenderer.py | render_network | YueNing/gadakeco-ml | 3 | python | def render_network(surface, network, values):
'\n Zeichnet die Minimap und den Netzwerkgraphen \n \n Argumente:\n surface: ein pygame.Surface der Groesse 750 x 180 Pixel.\n Darauf soll der Graph und die Minimap gezeichnet werden.\n network: das eigen implementierte Netzwerk (in network.py), dessen Graph gezeichnet werden soll.\n values: eine Liste von 27x18 = 486 Werten, welche die aktuelle diskrete Spielsituation darstellen\n die Werte haben folgende Bedeutung:\n 1 steht fuer begehbaren Block\n -1 steht fuer einen Gegner\n 0 leerer Raum\n Die Spielfigur befindet sich immer ca. bei der Position (10, 9) und (10, 10).\n '
colors = {1: (255, 255, 255), (- 1): (255, 0, 0)}
pygame.draw.rect(surface, (128, 128, 128, 128), (0, 0, (27 * TILESIZE), (18 * TILESIZE)))
for y in range(18):
for x in range(27):
if (values[((y * 27) + x)] != 0):
color = colors[values[((y * 27) + x)]]
surface.fill(color, ((TILESIZE * x), (TILESIZE * y), TILESIZE, TILESIZE))
pygame.draw.rect(surface, (0, 0, 0), ((TILESIZE * x), (TILESIZE * y), TILESIZE, TILESIZE), 1)
node_dict = {}
possible_position = [((x * TILESIZE), (y * TILESIZE)) for x in range(28, 60) for y in range(0, 18)]
for node in network.genome.hidden_nodes_dict:
position = random.choice(possible_position)
possible_position.remove(position)
node_dict[node] = position
y_output_node = 4
for output_node in network.genome.output_nodes_dict:
node_dict[output_node] = ((65 * TILESIZE), (y_output_node * TILESIZE))
y_output_node += 5
for _ in node_dict:
x = node_dict[_][0]
y = node_dict[_][1]
surface.fill(colors[1], (x, y, TILESIZE, TILESIZE))
pygame.draw.rect(surface, (0, 0, 0), (x, y, TILESIZE, TILESIZE), 1)
for y in range(18):
for x in range(27):
node_dict[f'in_{((y * 27) + x)}'] = [(x * TILESIZE), (y * TILESIZE)]
connection_dict = {}
line_colors = {1: (0, 201, 87), (- 1): (255, 0, 0), 0: (255, 255, 0)}
hidden_output_nodes = {**network.genome.hidden_nodes_dict, **network.genome.output_nodes_dict}
for node in hidden_output_nodes:
connection_dict[node] = []
for link in hidden_output_nodes[node].links:
if (link[0].node_type == 'input'):
pygame.draw.rect(surface, line_colors[1], (node_dict[link[0].node_name][0], node_dict[link[0].node_name][1], TILESIZE, TILESIZE), 1)
if (link[1] == 1):
connection_dict[node].append((link[0].node_name, line_colors[1]))
elif (link[1] == (- 1)):
connection_dict[node].append((link[0].node_name, line_colors[(- 1)]))
else:
connection_dict[node].append((link[0].node_name, line_colors[0]))
for node in connection_dict:
for i in range(len(connection_dict[node])):
pygame.draw.line(surface, connection_dict[node][i][1], ((node_dict[connection_dict[node][i][0]][0] + (TILESIZE / 2)), (node_dict[connection_dict[node][i][0]][1] + (TILESIZE / 2))), ((node_dict[node][0] + (TILESIZE / 2)), (node_dict[node][1] + (TILESIZE / 2))), 1) | def render_network(surface, network, values):
'\n Zeichnet die Minimap und den Netzwerkgraphen \n \n Argumente:\n surface: ein pygame.Surface der Groesse 750 x 180 Pixel.\n Darauf soll der Graph und die Minimap gezeichnet werden.\n network: das eigen implementierte Netzwerk (in network.py), dessen Graph gezeichnet werden soll.\n values: eine Liste von 27x18 = 486 Werten, welche die aktuelle diskrete Spielsituation darstellen\n die Werte haben folgende Bedeutung:\n 1 steht fuer begehbaren Block\n -1 steht fuer einen Gegner\n 0 leerer Raum\n Die Spielfigur befindet sich immer ca. bei der Position (10, 9) und (10, 10).\n '
colors = {1: (255, 255, 255), (- 1): (255, 0, 0)}
pygame.draw.rect(surface, (128, 128, 128, 128), (0, 0, (27 * TILESIZE), (18 * TILESIZE)))
for y in range(18):
for x in range(27):
if (values[((y * 27) + x)] != 0):
color = colors[values[((y * 27) + x)]]
surface.fill(color, ((TILESIZE * x), (TILESIZE * y), TILESIZE, TILESIZE))
pygame.draw.rect(surface, (0, 0, 0), ((TILESIZE * x), (TILESIZE * y), TILESIZE, TILESIZE), 1)
node_dict = {}
possible_position = [((x * TILESIZE), (y * TILESIZE)) for x in range(28, 60) for y in range(0, 18)]
for node in network.genome.hidden_nodes_dict:
position = random.choice(possible_position)
possible_position.remove(position)
node_dict[node] = position
y_output_node = 4
for output_node in network.genome.output_nodes_dict:
node_dict[output_node] = ((65 * TILESIZE), (y_output_node * TILESIZE))
y_output_node += 5
for _ in node_dict:
x = node_dict[_][0]
y = node_dict[_][1]
surface.fill(colors[1], (x, y, TILESIZE, TILESIZE))
pygame.draw.rect(surface, (0, 0, 0), (x, y, TILESIZE, TILESIZE), 1)
for y in range(18):
for x in range(27):
node_dict[f'in_{((y * 27) + x)}'] = [(x * TILESIZE), (y * TILESIZE)]
connection_dict = {}
line_colors = {1: (0, 201, 87), (- 1): (255, 0, 0), 0: (255, 255, 0)}
hidden_output_nodes = {**network.genome.hidden_nodes_dict, **network.genome.output_nodes_dict}
for node in hidden_output_nodes:
connection_dict[node] = []
for link in hidden_output_nodes[node].links:
if (link[0].node_type == 'input'):
pygame.draw.rect(surface, line_colors[1], (node_dict[link[0].node_name][0], node_dict[link[0].node_name][1], TILESIZE, TILESIZE), 1)
if (link[1] == 1):
connection_dict[node].append((link[0].node_name, line_colors[1]))
elif (link[1] == (- 1)):
connection_dict[node].append((link[0].node_name, line_colors[(- 1)]))
else:
connection_dict[node].append((link[0].node_name, line_colors[0]))
for node in connection_dict:
for i in range(len(connection_dict[node])):
pygame.draw.line(surface, connection_dict[node][i][1], ((node_dict[connection_dict[node][i][0]][0] + (TILESIZE / 2)), (node_dict[connection_dict[node][i][0]][1] + (TILESIZE / 2))), ((node_dict[node][0] + (TILESIZE / 2)), (node_dict[node][1] + (TILESIZE / 2))), 1)<|docstring|>Zeichnet die Minimap und den Netzwerkgraphen
Argumente:
surface: ein pygame.Surface der Groesse 750 x 180 Pixel.
Darauf soll der Graph und die Minimap gezeichnet werden.
network: das eigen implementierte Netzwerk (in network.py), dessen Graph gezeichnet werden soll.
values: eine Liste von 27x18 = 486 Werten, welche die aktuelle diskrete Spielsituation darstellen
die Werte haben folgende Bedeutung:
1 steht fuer begehbaren Block
-1 steht fuer einen Gegner
0 leerer Raum
Die Spielfigur befindet sich immer ca. bei der Position (10, 9) und (10, 10).<|endoftext|> |
c30a230919ba98ddc36f516262d8095215b8e2f6b6a0d3e4679d15b0dc81c024 | @property
def table_name(self) -> str:
'\n Returns the table name\n\n Returns :\n table_name: A string which has the table name.\n '
return self.model_class.table_name | Returns the table name
Returns :
table_name: A string which has the table name. | vorm/manager.py | table_name | VarthanV/vorm | 2 | python | @property
def table_name(self) -> str:
'\n Returns the table name\n\n Returns :\n table_name: A string which has the table name.\n '
return self.model_class.table_name | @property
def table_name(self) -> str:
'\n Returns the table name\n\n Returns :\n table_name: A string which has the table name.\n '
return self.model_class.table_name<|docstring|>Returns the table name
Returns :
table_name: A string which has the table name.<|endoftext|> |
9f1f3921233a5d5e029eeb740caef7040148507c7bec23020f96ddcee3f49ee7 | @property
def _get_fields(self) -> List[str]:
'\n Returns all the name of co\n '
cursor = self._get_cursor()
cursor.execute('\n SELECT column_name, data_type FROM information_schema.columns WHERE table_name=%s\n ', (self.table_name,))
return [row['column_name'] for row in cursor.fetchall()] | Returns all the name of co | vorm/manager.py | _get_fields | VarthanV/vorm | 2 | python | @property
def _get_fields(self) -> List[str]:
'\n \n '
cursor = self._get_cursor()
cursor.execute('\n SELECT column_name, data_type FROM information_schema.columns WHERE table_name=%s\n ', (self.table_name,))
return [row['column_name'] for row in cursor.fetchall()] | @property
def _get_fields(self) -> List[str]:
'\n \n '
cursor = self._get_cursor()
cursor.execute('\n SELECT column_name, data_type FROM information_schema.columns WHERE table_name=%s\n ', (self.table_name,))
return [row['column_name'] for row in cursor.fetchall()]<|docstring|>Returns all the name of co<|endoftext|> |
9981d3289ba368d7fbb6d434474ce7fdbb38bde61fc77ed023246251b38e3e75 | @classmethod
def create_connection(cls, db_settings: dict):
'\n Creates a db connection to the specified DB driver\n and \n\n Parameters:\n db_settings(dict): The dict which contains the connection \n details of the driver to be connected.\n\n Returns :\n None \n '
cls.db_engine = db_settings.pop('driver')
if (cls.db_engine == 'mysql'):
cls._connect_to_mysql(db_settings)
if (cls.db_engine == 'postgresql'):
cls._connect_to_postgresql(db_settings)
return cls | Creates a db connection to the specified DB driver
and
Parameters:
db_settings(dict): The dict which contains the connection
details of the driver to be connected.
Returns :
None | vorm/manager.py | create_connection | VarthanV/vorm | 2 | python | @classmethod
def create_connection(cls, db_settings: dict):
'\n Creates a db connection to the specified DB driver\n and \n\n Parameters:\n db_settings(dict): The dict which contains the connection \n details of the driver to be connected.\n\n Returns :\n None \n '
cls.db_engine = db_settings.pop('driver')
if (cls.db_engine == 'mysql'):
cls._connect_to_mysql(db_settings)
if (cls.db_engine == 'postgresql'):
cls._connect_to_postgresql(db_settings)
return cls | @classmethod
def create_connection(cls, db_settings: dict):
'\n Creates a db connection to the specified DB driver\n and \n\n Parameters:\n db_settings(dict): The dict which contains the connection \n details of the driver to be connected.\n\n Returns :\n None \n '
cls.db_engine = db_settings.pop('driver')
if (cls.db_engine == 'mysql'):
cls._connect_to_mysql(db_settings)
if (cls.db_engine == 'postgresql'):
cls._connect_to_postgresql(db_settings)
return cls<|docstring|>Creates a db connection to the specified DB driver
and
Parameters:
db_settings(dict): The dict which contains the connection
details of the driver to be connected.
Returns :
None<|endoftext|> |
93addef16f5669ca7220c320c6be00800ac3b79fbbc518c98c295b8f8507ab9f | @classmethod
def _connect_to_mysql(cls, db_settings: dict):
'\n Connects to a MYSQL DB ,It is a internal which will will be used\n by the create_connection method and sets the returned connection to\n the db_connection attribute\n\n Parameters:\n db_settings(dict): The dict which contains the connection \n details of the driver to be connected. \n\n Returns:\n None \n '
try:
connection = connector.connect(**db_settings)
connection.autocommit = True
cls.db_connection = connection
except Exception as e:
raise Exception(e) | Connects to a MYSQL DB ,It is a internal which will will be used
by the create_connection method and sets the returned connection to
the db_connection attribute
Parameters:
db_settings(dict): The dict which contains the connection
details of the driver to be connected.
Returns:
None | vorm/manager.py | _connect_to_mysql | VarthanV/vorm | 2 | python | @classmethod
def _connect_to_mysql(cls, db_settings: dict):
'\n Connects to a MYSQL DB ,It is a internal which will will be used\n by the create_connection method and sets the returned connection to\n the db_connection attribute\n\n Parameters:\n db_settings(dict): The dict which contains the connection \n details of the driver to be connected. \n\n Returns:\n None \n '
try:
connection = connector.connect(**db_settings)
connection.autocommit = True
cls.db_connection = connection
except Exception as e:
raise Exception(e) | @classmethod
def _connect_to_mysql(cls, db_settings: dict):
'\n Connects to a MYSQL DB ,It is a internal which will will be used\n by the create_connection method and sets the returned connection to\n the db_connection attribute\n\n Parameters:\n db_settings(dict): The dict which contains the connection \n details of the driver to be connected. \n\n Returns:\n None \n '
try:
connection = connector.connect(**db_settings)
connection.autocommit = True
cls.db_connection = connection
except Exception as e:
raise Exception(e)<|docstring|>Connects to a MYSQL DB ,It is a internal which will will be used
by the create_connection method and sets the returned connection to
the db_connection attribute
Parameters:
db_settings(dict): The dict which contains the connection
details of the driver to be connected.
Returns:
None<|endoftext|> |
ce0e0fd3f20c7f85ff99754cf94a2afc1cd2dd1bea6f9aad8bf4ab6b2e17b6e9 | @classmethod
def _get_cursor(cls):
'\n Returns the cursor of the current db_connection\n\n Parameters:\n None\n\n Returns:\n cursor(Any) : The cursor object \n '
if (cls.db_engine == cls.MYSQL):
if (not cls.db_connection.is_connected()):
cls.db_connection.reconnect(attempts=2)
return cls.db_connection.cursor(buffered=True, dictionary=True)
return cls.db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) | Returns the cursor of the current db_connection
Parameters:
None
Returns:
cursor(Any) : The cursor object | vorm/manager.py | _get_cursor | VarthanV/vorm | 2 | python | @classmethod
def _get_cursor(cls):
'\n Returns the cursor of the current db_connection\n\n Parameters:\n None\n\n Returns:\n cursor(Any) : The cursor object \n '
if (cls.db_engine == cls.MYSQL):
if (not cls.db_connection.is_connected()):
cls.db_connection.reconnect(attempts=2)
return cls.db_connection.cursor(buffered=True, dictionary=True)
return cls.db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) | @classmethod
def _get_cursor(cls):
'\n Returns the cursor of the current db_connection\n\n Parameters:\n None\n\n Returns:\n cursor(Any) : The cursor object \n '
if (cls.db_engine == cls.MYSQL):
if (not cls.db_connection.is_connected()):
cls.db_connection.reconnect(attempts=2)
return cls.db_connection.cursor(buffered=True, dictionary=True)
return cls.db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)<|docstring|>Returns the cursor of the current db_connection
Parameters:
None
Returns:
cursor(Any) : The cursor object<|endoftext|> |
fd6ebb8e41ae35fd7305942b03b212a4bb1775b3cf5649a63f1b85f2ef69ab0a | @classmethod
def _execute_query(cls, query, variables=None):
'\n Executes the query that is passed by the caller , Gets the cursor\n from the current db_connection , Throws error if there are any that is\n caused by the gluing library\n\n Parameters:\n query(str) : A valid SQL Query\n variable(tuples|None) : The actual variables which are needed to be replaced\n in place of placeholders.\n\n Returns :\n result(Any) \n '
return cls._get_cursor().execute(query, variables) | Executes the query that is passed by the caller , Gets the cursor
from the current db_connection , Throws error if there are any that is
caused by the gluing library
Parameters:
query(str) : A valid SQL Query
variable(tuples|None) : The actual variables which are needed to be replaced
in place of placeholders.
Returns :
result(Any) | vorm/manager.py | _execute_query | VarthanV/vorm | 2 | python | @classmethod
def _execute_query(cls, query, variables=None):
'\n Executes the query that is passed by the caller , Gets the cursor\n from the current db_connection , Throws error if there are any that is\n caused by the gluing library\n\n Parameters:\n query(str) : A valid SQL Query\n variable(tuples|None) : The actual variables which are needed to be replaced\n in place of placeholders.\n\n Returns :\n result(Any) \n '
return cls._get_cursor().execute(query, variables) | @classmethod
def _execute_query(cls, query, variables=None):
'\n Executes the query that is passed by the caller , Gets the cursor\n from the current db_connection , Throws error if there are any that is\n caused by the gluing library\n\n Parameters:\n query(str) : A valid SQL Query\n variable(tuples|None) : The actual variables which are needed to be replaced\n in place of placeholders.\n\n Returns :\n result(Any) \n '
return cls._get_cursor().execute(query, variables)<|docstring|>Executes the query that is passed by the caller , Gets the cursor
from the current db_connection , Throws error if there are any that is
caused by the gluing library
Parameters:
query(str) : A valid SQL Query
variable(tuples|None) : The actual variables which are needed to be replaced
in place of placeholders.
Returns :
result(Any)<|endoftext|> |
09556cba6393f6263ba9f086d65bd84667123ef13b54aa14f7ba239fdc391c4b | @classmethod
def _evaluate_user_conditions(cls, conditions: dict) -> List:
"\n Evaluvates the user conditions and returns it as \n list of tuples\n Eg : {'price__gt':200 , 'os':'android'} - > [('price','>',200),('os','=','android')]\n\n Params:\n Conditions dict\n Returns:\n List[tuple] \n "
conditions_list = []
condition = None
for (k, v) in conditions.items():
if (not isinstance(v, _Constants.KNOWN_CLASSES)):
class_name = v.__class__.__name__.lower()
col_name = f'{class_name}_id'
condition = Condition(col_name, cls.operators_map['eq'], v.id)
else:
val = k.split(_Constants.SEPERATOR)
' Fallback to OP_EQUAL if he didnt mention any operator'
if (len(val) == 1):
operator = cls.operators_map[_Constants.OP_EQUAL]
condition = Condition(val[0], operator, v)
else:
condition = Condition(val[0], cls.operators_map[val[1]], v)
conditions_list.append(condition)
return conditions_list | Evaluvates the user conditions and returns it as
list of tuples
Eg : {'price__gt':200 , 'os':'android'} - > [('price','>',200),('os','=','android')]
Params:
Conditions dict
Returns:
List[tuple] | vorm/manager.py | _evaluate_user_conditions | VarthanV/vorm | 2 | python | @classmethod
def _evaluate_user_conditions(cls, conditions: dict) -> List:
"\n Evaluvates the user conditions and returns it as \n list of tuples\n Eg : {'price__gt':200 , 'os':'android'} - > [('price','>',200),('os','=','android')]\n\n Params:\n Conditions dict\n Returns:\n List[tuple] \n "
conditions_list = []
condition = None
for (k, v) in conditions.items():
if (not isinstance(v, _Constants.KNOWN_CLASSES)):
class_name = v.__class__.__name__.lower()
col_name = f'{class_name}_id'
condition = Condition(col_name, cls.operators_map['eq'], v.id)
else:
val = k.split(_Constants.SEPERATOR)
' Fallback to OP_EQUAL if he didnt mention any operator'
if (len(val) == 1):
operator = cls.operators_map[_Constants.OP_EQUAL]
condition = Condition(val[0], operator, v)
else:
condition = Condition(val[0], cls.operators_map[val[1]], v)
conditions_list.append(condition)
return conditions_list | @classmethod
def _evaluate_user_conditions(cls, conditions: dict) -> List:
"\n Evaluvates the user conditions and returns it as \n list of tuples\n Eg : {'price__gt':200 , 'os':'android'} - > [('price','>',200),('os','=','android')]\n\n Params:\n Conditions dict\n Returns:\n List[tuple] \n "
conditions_list = []
condition = None
for (k, v) in conditions.items():
if (not isinstance(v, _Constants.KNOWN_CLASSES)):
class_name = v.__class__.__name__.lower()
col_name = f'{class_name}_id'
condition = Condition(col_name, cls.operators_map['eq'], v.id)
else:
val = k.split(_Constants.SEPERATOR)
' Fallback to OP_EQUAL if he didnt mention any operator'
if (len(val) == 1):
operator = cls.operators_map[_Constants.OP_EQUAL]
condition = Condition(val[0], operator, v)
else:
condition = Condition(val[0], cls.operators_map[val[1]], v)
conditions_list.append(condition)
return conditions_list<|docstring|>Evaluvates the user conditions and returns it as
list of tuples
Eg : {'price__gt':200 , 'os':'android'} - > [('price','>',200),('os','=','android')]
Params:
Conditions dict
Returns:
List[tuple]<|endoftext|> |
7ef9c601a4262b37a75ef03921c81c5e9a34efc4a8f87f72c6a0975ce3c0a8c5 | @classmethod
def migrate(cls, table):
'\n A wrapper for the CREATE TABLE query of the respective\n DB drivers\n\n Parameters :\n table(BaseModel) : The class which inherits base model and has \n the required fields and their types mentioned.\n Example :\n class Student(BaseModel):\n table_name = "students"\n name = fields.CharField(max_length=250, nullable=False)\n standard = fields.CharField(max_length=100,nullable=False)\n age = fields.IntegerField()\n created_at = fields.DateField()\n\n Returns :\n result(Any) \n '
cls.model_class = table
if (not table.table_name):
raise ValueError('Expected to have a table_name')
_create_sql = cls._get_create_sql(table)
cls._execute_query(query=_create_sql) | A wrapper for the CREATE TABLE query of the respective
DB drivers
Parameters :
table(BaseModel) : The class which inherits base model and has
the required fields and their types mentioned.
Example :
class Student(BaseModel):
table_name = "students"
name = fields.CharField(max_length=250, nullable=False)
standard = fields.CharField(max_length=100,nullable=False)
age = fields.IntegerField()
created_at = fields.DateField()
Returns :
result(Any) | vorm/manager.py | migrate | VarthanV/vorm | 2 | python | @classmethod
def migrate(cls, table):
'\n A wrapper for the CREATE TABLE query of the respective\n DB drivers\n\n Parameters :\n table(BaseModel) : The class which inherits base model and has \n the required fields and their types mentioned.\n Example :\n class Student(BaseModel):\n table_name = "students"\n name = fields.CharField(max_length=250, nullable=False)\n standard = fields.CharField(max_length=100,nullable=False)\n age = fields.IntegerField()\n created_at = fields.DateField()\n\n Returns :\n result(Any) \n '
cls.model_class = table
if (not table.table_name):
raise ValueError('Expected to have a table_name')
_create_sql = cls._get_create_sql(table)
cls._execute_query(query=_create_sql) | @classmethod
def migrate(cls, table):
'\n A wrapper for the CREATE TABLE query of the respective\n DB drivers\n\n Parameters :\n table(BaseModel) : The class which inherits base model and has \n the required fields and their types mentioned.\n Example :\n class Student(BaseModel):\n table_name = "students"\n name = fields.CharField(max_length=250, nullable=False)\n standard = fields.CharField(max_length=100,nullable=False)\n age = fields.IntegerField()\n created_at = fields.DateField()\n\n Returns :\n result(Any) \n '
cls.model_class = table
if (not table.table_name):
raise ValueError('Expected to have a table_name')
_create_sql = cls._get_create_sql(table)
cls._execute_query(query=_create_sql)<|docstring|>A wrapper for the CREATE TABLE query of the respective
DB drivers
Parameters :
table(BaseModel) : The class which inherits base model and has
the required fields and their types mentioned.
Example :
class Student(BaseModel):
table_name = "students"
name = fields.CharField(max_length=250, nullable=False)
standard = fields.CharField(max_length=100,nullable=False)
age = fields.IntegerField()
created_at = fields.DateField()
Returns :
result(Any)<|endoftext|> |
cc35d1620583af39bb985cf96030974ce699c83f27c6f4895a45576b78acb115 | @classmethod
def _get_create_sql(cls, table) -> str:
'\n The function which converts the user defined model class \n in to a tuple form (column_name,SQL Attributes) ,Each\n field is evaluated and its corresponding SQL representations\n are added ,After evaluation a sql query is generated for the same.\n\n Parameters:\n table(BaseModel) - An instance of BaseModel\n\n Returns:\n query(str) - The sql query \n '
(_type, field_name) = cls._get_corresponding_auto_increment_sql_type()
_fields_list = [('`id`', '{type} NOT NULL {field} PRIMARY KEY'.format(type=_type, field=field_name))]
for (name, field) in inspect.getmembers(table):
attr_string = ''
if (name.startswith('_') or (name in _Constants.FIELDS_TO_EXCLUDE_IN_INSPECTION)):
continue
if isinstance(field, fields.ForeignKey):
_col_name = '{}_id'.format(name)
_fields_list.append((_col_name, 'INT'))
_fields_list.append(('FOREIGN KEY({column})'.format(column=_col_name), 'REFERENCES {table_name}({referred_column}) ON DELETE {on_delete}'.format(table_name=field.table.table_name, referred_column='id', on_delete=field.on_delete)))
continue
if isinstance(field, fields.CharField):
if (not field.max_length):
raise ValueError('A char field always requires a max_length property')
else:
attr_string += 'VARCHAR({}) '.format(field.max_length)
if (not isinstance(field, fields.CharField)):
attr_string += '{} '.format(field.field_sql_name)
if field.auto_increment:
attr_string += 'AUTO_INCREMENT '
if (not field.nullable):
attr_string += 'NOT NULL '
if (field.default is not fields.NotProvided):
if isinstance(field, fields.CharField):
attr_string += "DEFAULT '{}'".format(field.default)
elif isinstance(field, fields.IntegerField):
attr_string += 'DEFAULT {}'.format(field.default)
_fields_list.append((cls._escape_column_names(name), attr_string))
print(_fields_list)
_fields_list = [' '.join(x) for x in _fields_list]
return _Constants.CREATE_TABLE_SQL.format(name=table.table_name, fields=', '.join(_fields_list)) | The function which converts the user defined model class
in to a tuple form (column_name,SQL Attributes) ,Each
field is evaluated and its corresponding SQL representations
are added ,After evaluation a sql query is generated for the same.
Parameters:
table(BaseModel) - An instance of BaseModel
Returns:
query(str) - The sql query | vorm/manager.py | _get_create_sql | VarthanV/vorm | 2 | python | @classmethod
def _get_create_sql(cls, table) -> str:
'\n The function which converts the user defined model class \n in to a tuple form (column_name,SQL Attributes) ,Each\n field is evaluated and its corresponding SQL representations\n are added ,After evaluation a sql query is generated for the same.\n\n Parameters:\n table(BaseModel) - An instance of BaseModel\n\n Returns:\n query(str) - The sql query \n '
(_type, field_name) = cls._get_corresponding_auto_increment_sql_type()
_fields_list = [('`id`', '{type} NOT NULL {field} PRIMARY KEY'.format(type=_type, field=field_name))]
for (name, field) in inspect.getmembers(table):
attr_string =
if (name.startswith('_') or (name in _Constants.FIELDS_TO_EXCLUDE_IN_INSPECTION)):
continue
if isinstance(field, fields.ForeignKey):
_col_name = '{}_id'.format(name)
_fields_list.append((_col_name, 'INT'))
_fields_list.append(('FOREIGN KEY({column})'.format(column=_col_name), 'REFERENCES {table_name}({referred_column}) ON DELETE {on_delete}'.format(table_name=field.table.table_name, referred_column='id', on_delete=field.on_delete)))
continue
if isinstance(field, fields.CharField):
if (not field.max_length):
raise ValueError('A char field always requires a max_length property')
else:
attr_string += 'VARCHAR({}) '.format(field.max_length)
if (not isinstance(field, fields.CharField)):
attr_string += '{} '.format(field.field_sql_name)
if field.auto_increment:
attr_string += 'AUTO_INCREMENT '
if (not field.nullable):
attr_string += 'NOT NULL '
if (field.default is not fields.NotProvided):
if isinstance(field, fields.CharField):
attr_string += "DEFAULT '{}'".format(field.default)
elif isinstance(field, fields.IntegerField):
attr_string += 'DEFAULT {}'.format(field.default)
_fields_list.append((cls._escape_column_names(name), attr_string))
print(_fields_list)
_fields_list = [' '.join(x) for x in _fields_list]
return _Constants.CREATE_TABLE_SQL.format(name=table.table_name, fields=', '.join(_fields_list)) | @classmethod
def _get_create_sql(cls, table) -> str:
'\n The function which converts the user defined model class \n in to a tuple form (column_name,SQL Attributes) ,Each\n field is evaluated and its corresponding SQL representations\n are added ,After evaluation a sql query is generated for the same.\n\n Parameters:\n table(BaseModel) - An instance of BaseModel\n\n Returns:\n query(str) - The sql query \n '
(_type, field_name) = cls._get_corresponding_auto_increment_sql_type()
_fields_list = [('`id`', '{type} NOT NULL {field} PRIMARY KEY'.format(type=_type, field=field_name))]
for (name, field) in inspect.getmembers(table):
attr_string =
if (name.startswith('_') or (name in _Constants.FIELDS_TO_EXCLUDE_IN_INSPECTION)):
continue
if isinstance(field, fields.ForeignKey):
_col_name = '{}_id'.format(name)
_fields_list.append((_col_name, 'INT'))
_fields_list.append(('FOREIGN KEY({column})'.format(column=_col_name), 'REFERENCES {table_name}({referred_column}) ON DELETE {on_delete}'.format(table_name=field.table.table_name, referred_column='id', on_delete=field.on_delete)))
continue
if isinstance(field, fields.CharField):
if (not field.max_length):
raise ValueError('A char field always requires a max_length property')
else:
attr_string += 'VARCHAR({}) '.format(field.max_length)
if (not isinstance(field, fields.CharField)):
attr_string += '{} '.format(field.field_sql_name)
if field.auto_increment:
attr_string += 'AUTO_INCREMENT '
if (not field.nullable):
attr_string += 'NOT NULL '
if (field.default is not fields.NotProvided):
if isinstance(field, fields.CharField):
attr_string += "DEFAULT '{}'".format(field.default)
elif isinstance(field, fields.IntegerField):
attr_string += 'DEFAULT {}'.format(field.default)
_fields_list.append((cls._escape_column_names(name), attr_string))
print(_fields_list)
_fields_list = [' '.join(x) for x in _fields_list]
return _Constants.CREATE_TABLE_SQL.format(name=table.table_name, fields=', '.join(_fields_list))<|docstring|>The function which converts the user defined model class
in to a tuple form (column_name,SQL Attributes) ,Each
field is evaluated and its corresponding SQL representations
are added ,After evaluation a sql query is generated for the same.
Parameters:
table(BaseModel) - An instance of BaseModel
Returns:
query(str) - The sql query<|endoftext|> |
c11a8f9bdd8cee7ca93f7e203450c7afaf4c3aab398214cfc55659e64d0138ad | def _return_conditions_as_sql_string(self, conditions: List) -> str:
'\n Returns the users condtion as string joined with AND clause\n\n Params: \n conditions(List) : The list of user conditions as tuple\n\n Returns:\n sql_string(str) : The sql equivalent string \n '
return ' AND '.join(['{} {} {}'.format(self._escape_column_names(i.column), i.operator, self._get_escaped_value(i.value)) for i in conditions]) | Returns the users condtion as string joined with AND clause
Params:
conditions(List) : The list of user conditions as tuple
Returns:
sql_string(str) : The sql equivalent string | vorm/manager.py | _return_conditions_as_sql_string | VarthanV/vorm | 2 | python | def _return_conditions_as_sql_string(self, conditions: List) -> str:
'\n Returns the users condtion as string joined with AND clause\n\n Params: \n conditions(List) : The list of user conditions as tuple\n\n Returns:\n sql_string(str) : The sql equivalent string \n '
return ' AND '.join(['{} {} {}'.format(self._escape_column_names(i.column), i.operator, self._get_escaped_value(i.value)) for i in conditions]) | def _return_conditions_as_sql_string(self, conditions: List) -> str:
'\n Returns the users condtion as string joined with AND clause\n\n Params: \n conditions(List) : The list of user conditions as tuple\n\n Returns:\n sql_string(str) : The sql equivalent string \n '
return ' AND '.join(['{} {} {}'.format(self._escape_column_names(i.column), i.operator, self._get_escaped_value(i.value)) for i in conditions])<|docstring|>Returns the users condtion as string joined with AND clause
Params:
conditions(List) : The list of user conditions as tuple
Returns:
sql_string(str) : The sql equivalent string<|endoftext|> |
1fd94aab56513bc845d2fafb04b5a6ed8fb3cda2507dcf2cb196cd8d39753483 | def _dict_to_model_class(self, d, table):
'\n Converts the given dict to the specified model_class\n\n Params:\n d(dict) : The dict representation of the class\n table(BaseModel | Any) : The class which we want the values to be spread\n\n Returns :\n table : The table with the values of the dict spreaded \n '
return table(**d) | Converts the given dict to the specified model_class
Params:
d(dict) : The dict representation of the class
table(BaseModel | Any) : The class which we want the values to be spread
Returns :
table : The table with the values of the dict spreaded | vorm/manager.py | _dict_to_model_class | VarthanV/vorm | 2 | python | def _dict_to_model_class(self, d, table):
'\n Converts the given dict to the specified model_class\n\n Params:\n d(dict) : The dict representation of the class\n table(BaseModel | Any) : The class which we want the values to be spread\n\n Returns :\n table : The table with the values of the dict spreaded \n '
return table(**d) | def _dict_to_model_class(self, d, table):
'\n Converts the given dict to the specified model_class\n\n Params:\n d(dict) : The dict representation of the class\n table(BaseModel | Any) : The class which we want the values to be spread\n\n Returns :\n table : The table with the values of the dict spreaded \n '
return table(**d)<|docstring|>Converts the given dict to the specified model_class
Params:
d(dict) : The dict representation of the class
table(BaseModel | Any) : The class which we want the values to be spread
Returns :
table : The table with the values of the dict spreaded<|endoftext|> |
537082e11cf876b8db043dabda5b07aecb0f41b79df02202756339bedc865e0f | def _return_foreign_keys_from_a_table(self, model_class, query_dict):
'\n Finds foreign_key fields from a table ,queries it\n and attaches to the found attribute in the model class.\n '
for (name, field) in inspect.getmembers(model_class):
if isinstance(field, fields.ForeignKey):
column_name = '{}_id'.format(name)
result_set = field.table.objects.where(id__eq=query_dict[column_name])
setattr(model_class, name, result_set)
return model_class | Finds foreign_key fields from a table ,queries it
and attaches to the found attribute in the model class. | vorm/manager.py | _return_foreign_keys_from_a_table | VarthanV/vorm | 2 | python | def _return_foreign_keys_from_a_table(self, model_class, query_dict):
'\n Finds foreign_key fields from a table ,queries it\n and attaches to the found attribute in the model class.\n '
for (name, field) in inspect.getmembers(model_class):
if isinstance(field, fields.ForeignKey):
column_name = '{}_id'.format(name)
result_set = field.table.objects.where(id__eq=query_dict[column_name])
setattr(model_class, name, result_set)
return model_class | def _return_foreign_keys_from_a_table(self, model_class, query_dict):
'\n Finds foreign_key fields from a table ,queries it\n and attaches to the found attribute in the model class.\n '
for (name, field) in inspect.getmembers(model_class):
if isinstance(field, fields.ForeignKey):
column_name = '{}_id'.format(name)
result_set = field.table.objects.where(id__eq=query_dict[column_name])
setattr(model_class, name, result_set)
return model_class<|docstring|>Finds foreign_key fields from a table ,queries it
and attaches to the found attribute in the model class.<|endoftext|> |
f0797c33590b14d08a908a099cfc901a366efe994df38666e53be7a616825f87 | def where(self, fetch_relations=False, limit=None, **kwargs) -> List:
"\n A wrapper for the SQL's WHERE clause , You mention the constraints for the\n attributes , It will convert them in equivalent SQL query and returns a instance \n of the model class\n\n Eg : first_employee = Employee.objects.get(id=1)\n\n Params:\n kwargs :The constraints that you want to query the DB with\n fetch_relations(bool) : Defaults to false, If you want to fetch the foreign keys\n you can set it to True\n limit(int): The limit of the query set\n\n Returns :\n The model class \n "
cur2 = self._get_cursor()
print('kwargs is', kwargs)
if bool(kwargs):
condition_list = self._evaluate_user_conditions(kwargs)
_sql_query = _Constants.SELECT_WHERE_SQL.format(name=self.table_name, fields='*', query=self._return_conditions_as_sql_string(condition_list))
else:
_sql_query = _Constants.SELECT_ALL_SQL.format(fields='*', name=self.table_name)
if limit:
_sql_query += ' LIMIT {limit} ;'.format(limit=limit)
else:
_sql_query += ' ;'
cur2.execute(_sql_query)
rows = cur2.fetchall()
result = list()
for i in rows:
result_class = self._dict_to_model_class(i, self.model_class)
if fetch_relations:
result_class = self._return_foreign_keys_from_a_table(result_class, i)
result.append(result_class)
return result | A wrapper for the SQL's WHERE clause , You mention the constraints for the
attributes , It will convert them in equivalent SQL query and returns a instance
of the model class
Eg : first_employee = Employee.objects.get(id=1)
Params:
kwargs :The constraints that you want to query the DB with
fetch_relations(bool) : Defaults to false, If you want to fetch the foreign keys
you can set it to True
limit(int): The limit of the query set
Returns :
The model class | vorm/manager.py | where | VarthanV/vorm | 2 | python | def where(self, fetch_relations=False, limit=None, **kwargs) -> List:
"\n A wrapper for the SQL's WHERE clause , You mention the constraints for the\n attributes , It will convert them in equivalent SQL query and returns a instance \n of the model class\n\n Eg : first_employee = Employee.objects.get(id=1)\n\n Params:\n kwargs :The constraints that you want to query the DB with\n fetch_relations(bool) : Defaults to false, If you want to fetch the foreign keys\n you can set it to True\n limit(int): The limit of the query set\n\n Returns :\n The model class \n "
cur2 = self._get_cursor()
print('kwargs is', kwargs)
if bool(kwargs):
condition_list = self._evaluate_user_conditions(kwargs)
_sql_query = _Constants.SELECT_WHERE_SQL.format(name=self.table_name, fields='*', query=self._return_conditions_as_sql_string(condition_list))
else:
_sql_query = _Constants.SELECT_ALL_SQL.format(fields='*', name=self.table_name)
if limit:
_sql_query += ' LIMIT {limit} ;'.format(limit=limit)
else:
_sql_query += ' ;'
cur2.execute(_sql_query)
rows = cur2.fetchall()
result = list()
for i in rows:
result_class = self._dict_to_model_class(i, self.model_class)
if fetch_relations:
result_class = self._return_foreign_keys_from_a_table(result_class, i)
result.append(result_class)
return result | def where(self, fetch_relations=False, limit=None, **kwargs) -> List:
"\n A wrapper for the SQL's WHERE clause , You mention the constraints for the\n attributes , It will convert them in equivalent SQL query and returns a instance \n of the model class\n\n Eg : first_employee = Employee.objects.get(id=1)\n\n Params:\n kwargs :The constraints that you want to query the DB with\n fetch_relations(bool) : Defaults to false, If you want to fetch the foreign keys\n you can set it to True\n limit(int): The limit of the query set\n\n Returns :\n The model class \n "
cur2 = self._get_cursor()
print('kwargs is', kwargs)
if bool(kwargs):
condition_list = self._evaluate_user_conditions(kwargs)
_sql_query = _Constants.SELECT_WHERE_SQL.format(name=self.table_name, fields='*', query=self._return_conditions_as_sql_string(condition_list))
else:
_sql_query = _Constants.SELECT_ALL_SQL.format(fields='*', name=self.table_name)
if limit:
_sql_query += ' LIMIT {limit} ;'.format(limit=limit)
else:
_sql_query += ' ;'
cur2.execute(_sql_query)
rows = cur2.fetchall()
result = list()
for i in rows:
result_class = self._dict_to_model_class(i, self.model_class)
if fetch_relations:
result_class = self._return_foreign_keys_from_a_table(result_class, i)
result.append(result_class)
return result<|docstring|>A wrapper for the SQL's WHERE clause , You mention the constraints for the
attributes , It will convert them in equivalent SQL query and returns a instance
of the model class
Eg : first_employee = Employee.objects.get(id=1)
Params:
kwargs :The constraints that you want to query the DB with
fetch_relations(bool) : Defaults to false, If you want to fetch the foreign keys
you can set it to True
limit(int): The limit of the query set
Returns :
The model class<|endoftext|> |
de2a99602bf55e4e94fc4de46bdfccd4cd5845ce72941c498068ab651c3f7efe | def get_one(self, fetch_relations=False, **kwargs):
'\n Calls the where method and returns the first result from \n the query_set\n '
result = self.where(limit=1, fetch_relations=fetch_relations, **kwargs)
if (not len(result)):
return None
return result[0] | Calls the where method and returns the first result from
the query_set | vorm/manager.py | get_one | VarthanV/vorm | 2 | python | def get_one(self, fetch_relations=False, **kwargs):
'\n Calls the where method and returns the first result from \n the query_set\n '
result = self.where(limit=1, fetch_relations=fetch_relations, **kwargs)
if (not len(result)):
return None
return result[0] | def get_one(self, fetch_relations=False, **kwargs):
'\n Calls the where method and returns the first result from \n the query_set\n '
result = self.where(limit=1, fetch_relations=fetch_relations, **kwargs)
if (not len(result)):
return None
return result[0]<|docstring|>Calls the where method and returns the first result from
the query_set<|endoftext|> |
bfb96410314777d8925831d51ca25156d90aa9c3b8748a7413726d3ce06fba55 | def insert(self, **kwargs):
"\n Inserts a record into the database by converting the model class into its\n equivalent SQL query\n\n Eg : student = Student.objects.insert(name='Vishnu Varthan' ,class='12C')\n "
fields = list()
values = list()
for (k, v) in kwargs.items():
key = k
value = v
if (not isinstance(v, _Constants.KNOWN_CLASSES)):
key = '{}_id'.format(k)
value = v.id
fields.append(key)
values.append(value)
_sql_query = _Constants.INSERT_SQL.format(name=self._escape_column_names(self.table_name), fields=' , '.join([self._escape_column_names(i).format(i) for i in fields]), placeholders=' , '.join([self._get_escaped_value(i) for i in values]))
err = self._get_cursor().execute(_sql_query)
if ((not err) and (not kwargs.get('id'))):
cur = self._get_cursor()
cur.execute(self._get_last_inserted_sql())
last_inserted_id = cur.fetchone()
kwargs['id'] = last_inserted_id['id']
return self._dict_to_model_class(kwargs, self.model_class) | Inserts a record into the database by converting the model class into its
equivalent SQL query
Eg : student = Student.objects.insert(name='Vishnu Varthan' ,class='12C') | vorm/manager.py | insert | VarthanV/vorm | 2 | python | def insert(self, **kwargs):
"\n Inserts a record into the database by converting the model class into its\n equivalent SQL query\n\n Eg : student = Student.objects.insert(name='Vishnu Varthan' ,class='12C')\n "
fields = list()
values = list()
for (k, v) in kwargs.items():
key = k
value = v
if (not isinstance(v, _Constants.KNOWN_CLASSES)):
key = '{}_id'.format(k)
value = v.id
fields.append(key)
values.append(value)
_sql_query = _Constants.INSERT_SQL.format(name=self._escape_column_names(self.table_name), fields=' , '.join([self._escape_column_names(i).format(i) for i in fields]), placeholders=' , '.join([self._get_escaped_value(i) for i in values]))
err = self._get_cursor().execute(_sql_query)
if ((not err) and (not kwargs.get('id'))):
cur = self._get_cursor()
cur.execute(self._get_last_inserted_sql())
last_inserted_id = cur.fetchone()
kwargs['id'] = last_inserted_id['id']
return self._dict_to_model_class(kwargs, self.model_class) | def insert(self, **kwargs):
"\n Inserts a record into the database by converting the model class into its\n equivalent SQL query\n\n Eg : student = Student.objects.insert(name='Vishnu Varthan' ,class='12C')\n "
fields = list()
values = list()
for (k, v) in kwargs.items():
key = k
value = v
if (not isinstance(v, _Constants.KNOWN_CLASSES)):
key = '{}_id'.format(k)
value = v.id
fields.append(key)
values.append(value)
_sql_query = _Constants.INSERT_SQL.format(name=self._escape_column_names(self.table_name), fields=' , '.join([self._escape_column_names(i).format(i) for i in fields]), placeholders=' , '.join([self._get_escaped_value(i) for i in values]))
err = self._get_cursor().execute(_sql_query)
if ((not err) and (not kwargs.get('id'))):
cur = self._get_cursor()
cur.execute(self._get_last_inserted_sql())
last_inserted_id = cur.fetchone()
kwargs['id'] = last_inserted_id['id']
return self._dict_to_model_class(kwargs, self.model_class)<|docstring|>Inserts a record into the database by converting the model class into its
equivalent SQL query
Eg : student = Student.objects.insert(name='Vishnu Varthan' ,class='12C')<|endoftext|> |
21fc58c5407d15d7d0802d4952217226ed76932b63f81b3935ef674cc41ca471 | def update(self, new_data: dict, **kwargs):
"\n Updates the specified table in the database by evaluvating the conditions \n we specified\n p = Pizza.objects.update(new_data={'name':'Paneer pizza'},id=1)\n SQL Query : UPDATE pizza SET `name` = 'Paneer pizza' WHERE `id` = 1\n\n Params:\n new_data(dict) : A dict which contains the new values , column_name\n being the key and the new value for the column being the value for\n the key\n conditions(kwargs) : Conditions for the row to be updated\n\n Returns :\n model_class \n "
new_data_list = ', '.join([f'{self._escape_column_names(field_name)} = {self._get_escaped_value(value)}' for (field_name, value) in new_data.items()])
condition_list = self._evaluate_user_conditions(kwargs)
condition_string = self._return_conditions_as_sql_string(condition_list)
_sql_query = _Constants.UPDATE_SQL.format(name=self.table_name, new_data=new_data_list, conditions=condition_string)
cur = self._get_cursor()
cur.execute(_sql_query)
for (k, v) in new_data.items():
kwargs[k] = v
return self.get_one(**kwargs) | Updates the specified table in the database by evaluvating the conditions
we specified
p = Pizza.objects.update(new_data={'name':'Paneer pizza'},id=1)
SQL Query : UPDATE pizza SET `name` = 'Paneer pizza' WHERE `id` = 1
Params:
new_data(dict) : A dict which contains the new values , column_name
being the key and the new value for the column being the value for
the key
conditions(kwargs) : Conditions for the row to be updated
Returns :
model_class | vorm/manager.py | update | VarthanV/vorm | 2 | python | def update(self, new_data: dict, **kwargs):
"\n Updates the specified table in the database by evaluvating the conditions \n we specified\n p = Pizza.objects.update(new_data={'name':'Paneer pizza'},id=1)\n SQL Query : UPDATE pizza SET `name` = 'Paneer pizza' WHERE `id` = 1\n\n Params:\n new_data(dict) : A dict which contains the new values , column_name\n being the key and the new value for the column being the value for\n the key\n conditions(kwargs) : Conditions for the row to be updated\n\n Returns :\n model_class \n "
new_data_list = ', '.join([f'{self._escape_column_names(field_name)} = {self._get_escaped_value(value)}' for (field_name, value) in new_data.items()])
condition_list = self._evaluate_user_conditions(kwargs)
condition_string = self._return_conditions_as_sql_string(condition_list)
_sql_query = _Constants.UPDATE_SQL.format(name=self.table_name, new_data=new_data_list, conditions=condition_string)
cur = self._get_cursor()
cur.execute(_sql_query)
for (k, v) in new_data.items():
kwargs[k] = v
return self.get_one(**kwargs) | def update(self, new_data: dict, **kwargs):
"\n Updates the specified table in the database by evaluvating the conditions \n we specified\n p = Pizza.objects.update(new_data={'name':'Paneer pizza'},id=1)\n SQL Query : UPDATE pizza SET `name` = 'Paneer pizza' WHERE `id` = 1\n\n Params:\n new_data(dict) : A dict which contains the new values , column_name\n being the key and the new value for the column being the value for\n the key\n conditions(kwargs) : Conditions for the row to be updated\n\n Returns :\n model_class \n "
new_data_list = ', '.join([f'{self._escape_column_names(field_name)} = {self._get_escaped_value(value)}' for (field_name, value) in new_data.items()])
condition_list = self._evaluate_user_conditions(kwargs)
condition_string = self._return_conditions_as_sql_string(condition_list)
_sql_query = _Constants.UPDATE_SQL.format(name=self.table_name, new_data=new_data_list, conditions=condition_string)
cur = self._get_cursor()
cur.execute(_sql_query)
for (k, v) in new_data.items():
kwargs[k] = v
return self.get_one(**kwargs)<|docstring|>Updates the specified table in the database by evaluvating the conditions
we specified
p = Pizza.objects.update(new_data={'name':'Paneer pizza'},id=1)
SQL Query : UPDATE pizza SET `name` = 'Paneer pizza' WHERE `id` = 1
Params:
new_data(dict) : A dict which contains the new values , column_name
being the key and the new value for the column being the value for
the key
conditions(kwargs) : Conditions for the row to be updated
Returns :
model_class<|endoftext|> |
d094a51e3e0a56d10b7335fadb665f5d31c00330921c4d8249132ead884c4d85 | def delete(self, **kwargs):
'\n Delete a record from database\n '
condition_list = self._evaluate_user_conditions(kwargs)
_sql_query = _Constants.DELETE_ALL_SQL.format(name=self.table_name, conditions=self._return_conditions_as_sql_string(condition_list))
cur = self._get_cursor()
cur.execute(_sql_query) | Delete a record from database | vorm/manager.py | delete | VarthanV/vorm | 2 | python | def delete(self, **kwargs):
'\n \n '
condition_list = self._evaluate_user_conditions(kwargs)
_sql_query = _Constants.DELETE_ALL_SQL.format(name=self.table_name, conditions=self._return_conditions_as_sql_string(condition_list))
cur = self._get_cursor()
cur.execute(_sql_query) | def delete(self, **kwargs):
'\n \n '
condition_list = self._evaluate_user_conditions(kwargs)
_sql_query = _Constants.DELETE_ALL_SQL.format(name=self.table_name, conditions=self._return_conditions_as_sql_string(condition_list))
cur = self._get_cursor()
cur.execute(_sql_query)<|docstring|>Delete a record from database<|endoftext|> |
69f6d6339e1f5895c77de5e4d6fc1cb6e13478840f0643d6c2700478b3c986a3 | def main():
'Entry point of the program.'
coloredlogs.install(level='INFO')
rabbit = Client('hms_agenda', 'haum', ['agenda.*'])
rabbit.connect()
bot = AgendaParser(rabbit)
@topic('agenda.query')
def query_callback(client, topic, message):
bot.parse_command(client, topic, message)
rabbit.listeners.append(query_callback)
try:
rabbit.start_consuming()
except KeyboardInterrupt:
get_logger().critical('Got a keyboard interrupt')
finally:
rabbit.disconnect() | Entry point of the program. | hms_agenda/main.py | main | haum/hms_agenda | 0 | python | def main():
coloredlogs.install(level='INFO')
rabbit = Client('hms_agenda', 'haum', ['agenda.*'])
rabbit.connect()
bot = AgendaParser(rabbit)
@topic('agenda.query')
def query_callback(client, topic, message):
bot.parse_command(client, topic, message)
rabbit.listeners.append(query_callback)
try:
rabbit.start_consuming()
except KeyboardInterrupt:
get_logger().critical('Got a keyboard interrupt')
finally:
rabbit.disconnect() | def main():
coloredlogs.install(level='INFO')
rabbit = Client('hms_agenda', 'haum', ['agenda.*'])
rabbit.connect()
bot = AgendaParser(rabbit)
@topic('agenda.query')
def query_callback(client, topic, message):
bot.parse_command(client, topic, message)
rabbit.listeners.append(query_callback)
try:
rabbit.start_consuming()
except KeyboardInterrupt:
get_logger().critical('Got a keyboard interrupt')
finally:
rabbit.disconnect()<|docstring|>Entry point of the program.<|endoftext|> |
6f15c4f54ada234f77d1db6e033fecbd6c2fc79448b438956080864c1f1fe75b | def create_model_FLP(hubs, clients, cost_matrix_2):
'\n Model to minimize the number of hubs to open following the classical facility location problem.\n Maybe we will not need this part\n :param hubs:\n :param clients:\n :param cost_matrix_2:\n :return: hub that are open.\n ' | Model to minimize the number of hubs to open following the classical facility location problem.
Maybe we will not need this part
:param hubs:
:param clients:
:param cost_matrix_2:
:return: hub that are open. | src/LP/solver/model_4.py | create_model_FLP | AymanABDELHAMID/2E-CVRP | 0 | python | def create_model_FLP(hubs, clients, cost_matrix_2):
'\n Model to minimize the number of hubs to open following the classical facility location problem.\n Maybe we will not need this part\n :param hubs:\n :param clients:\n :param cost_matrix_2:\n :return: hub that are open.\n ' | def create_model_FLP(hubs, clients, cost_matrix_2):
'\n Model to minimize the number of hubs to open following the classical facility location problem.\n Maybe we will not need this part\n :param hubs:\n :param clients:\n :param cost_matrix_2:\n :return: hub that are open.\n '<|docstring|>Model to minimize the number of hubs to open following the classical facility location problem.
Maybe we will not need this part
:param hubs:
:param clients:
:param cost_matrix_2:
:return: hub that are open.<|endoftext|> |
63e6ffedbef988f1bc398f2ee09d8cbc735d776433561055eb9ccbcf3ad08016 | def create_model(hubs, clients, cost_matrix_1, cost_matrix_2):
'\n Model to assign robots to clients respecting:\n 1. Robot maximum capacity\n 2. Robot maximum distance\n 3. Client Time Windows\n :param hubs:\n :param clients:\n :param cost_matrix_1:\n :param cost_matrix_2:\n :return: robot assignment and demand for each hub\n '
start_time = time.time()
model = gp.Model('RAP')
n_c = len(clients)
R_max = len(clients)
R = list(range(R_max))
C = [c.name for c in clients]
D_c = [c.demand for c in clients]
st = {c.name: c.st for c in clients}
t1 = {c.name: c.tw1 for c in clients}
t2 = {c.name: c.tw2 for c in clients}
Loc_c = [c.loc for c in clients]
Loc_c_dict = {c.name: c.loc for c in clients}
L = list(set([l[0] for l in cost_matrix_2.keys()]))
H = list(range(len(hubs)))
R_cap = 50
R_dist = 200
c_hub = 50
c_robot = 10
op_time = (max(t2.values()) + 15)
o = model.addVars(H, vtype=GRB.BINARY, name='o')
x = model.addVars(C, R, H, vtype=GRB.BINARY, name='x')
rob = model.addVars(R, H, vtype=GRB.BINARY, name='rob')
y = model.addVars(L, L, R, vtype=GRB.BINARY, name='y')
t = model.addVars(L, ub=op_time, name='t')
xa = model.addVars(C, name='xa')
xb = model.addVars(C, name='xb')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for r in R for h in H)) == 1) for c in C), name='client_must_be_served')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for r in R)) <= o[h]) for c in C for h in H), name='no_hub_no_robot')
model.addConstrs(((rob[(r, h)] <= o[h]) for r in R for h in H), name='no_hub_no_robot_2')
model.addConstrs(((x.sum(c, '*', h) <= 1) for c in C for h in H), name='one_robot')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for h in H for r in R)) == 1) for c in C), name='Just_one_hub')
model.addConstrs(((gp.quicksum(((D_c[int(c)] * x[(c, r, h)]) for c in C for h in H)) <= (R_cap * rob.sum(r, '*'))) for r in R), name='robot_cap')
model.addConstrs(((gp.quicksum(((cost_matrix_2[(i, j)] * y[(i, j, r)]) for i in L for j in L if (i != j))) <= R_dist) for r in R), name='robot_dist')
model.addConstrs(((gp.quicksum((y[(int(j), int(i), r)] for j in L if (int(j) != int(i)))) == gp.quicksum((x[(i, r, h)] for h in H))) for r in R for i in C), name='Tour1')
model.addConstrs(((gp.quicksum((y[(int(i), int(j), r)] for j in L if (int(j) != int(i)))) == gp.quicksum((x[(i, r, h)] for h in H))) for r in R for i in C), name='Tour2')
model.addConstrs((((y[(int(j), int(i), r)] + y[(int(i), int(j), r)]) <= 1) for r in R for i in L for j in L if (int(j) != int(i))), name='Tour3')
model.addConstrs(((gp.quicksum((y[(int(c), (n_c + h), r)] for c in C)) == rob[(r, h)]) for r in R for h in H), name='sameHub1_y')
model.addConstrs(((gp.quicksum((y[((n_c + h), int(c), r)] for c in C)) == rob[(r, h)]) for r in R for h in H), name='sameHub2_y')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for c in C)) <= ((n_c + 1) * rob[(r, h)])) for r in R for h in H), name='sameHub1_x')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for c in C)) <= ((n_c + 1) * rob[(r, h)])) for r in R for h in H), name='sameHub2_x')
model.addConstrs(((gp.quicksum((rob[(r, h)] for h in H)) <= 1) for r in R), name='one_hub_per_robot')
V_cap = 300
total_demand = math.fsum((c.demand for c in clients))
t = math.ceil((total_demand / V_cap))
V = list(range(t))
c_truck = 50
c_truck_distance = 5
DH = list(range((len(hubs) + 1)))
z = model.addVars(V, DH, DH, vtype=GRB.BINARY, name='z')
w = model.addVars(V, H, vtype=GRB.BINARY, name='w')
model.addConstrs(((gp.quicksum((z[(v, h1, h2)] for v in V for h1 in DH if (h1 != h2))) == o[h2]) for h2 in DH[:(- 1)]), name='if_truck_then_hub')
model.addConstrs(((gp.quicksum((z[(v, h1, h2)] for v in V for h1 in DH if (h1 != h2))) == gp.quicksum((z[(v, h2, h1)] for v in V for h1 in DH if (h1 != h2)))) for h2 in DH), name='trucks3')
model.addConstrs(((gp.quicksum((z[(v, dh1, dh2)] for v in V for dh1 in DH if (dh1 != dh2))) <= 1) for dh2 in DH), name='no_subtours_1')
model.addConstrs(((gp.quicksum((z[(v, dh2, dh1)] for v in V for dh1 in DH if (dh1 != dh2))) <= 1) for dh2 in DH), name='no_subtours_2')
model.addConstrs((((z[(v, dh2, dh1)] + z[(v, dh1, dh2)]) <= 1) for v in V for dh1 in DH for dh2 in DH if (dh1 != dh2)), name='no_subtours_3')
D_h = model.addVars(H, name='D_h')
model.addConstrs(((D_h[h] == gp.quicksum(((D_c[int(c)] * x[(c, r, h)]) for c in C for r in R))) for h in H), name='total_demand_per_hub')
model.addConstrs(((gp.quicksum(((D_h[h] * z[(v, i, h)]) for i in DH)) <= V_cap) for h in H for v in V), name='truck_capacity')
cost_robot_var = gp.quicksum(((cost_matrix_2[(c1, c2)] * y[(c1, c2, r)]) for c1 in L for c2 in L if (c1 != c2) for r in R))
cost_robot_fixed = gp.quicksum(((c_robot * rob[(r, h)]) for r in R for h in H))
cost_hub_fixed = gp.quicksum(((c_hub * o[h]) for h in H))
cost_truck_var = gp.quicksum((((cost_matrix_1[(dh1, dh2)] * c_truck_distance) * z[(v, dh1, dh2)]) for v in V for dh1 in DH for dh2 in DH if (dh1 != dh2)))
cost_truck_fixed = (c_truck * t)
model.setObjective(((((cost_robot_var + cost_robot_fixed) + cost_hub_fixed) + cost_truck_fixed) + cost_truck_var), GRB.MINIMIZE)
model.write('../output/lp_model/RAP_TRP_v1.lp')
model.optimize()
print(('Optimal solution found with total cost: %g' % model.objVal))
for v in model.getVars():
if (v.x >= 0.5):
print(('%s %g' % (v.varName, v.x)))
print(('Executed in %s Minutes' % ((time.time() - start_time) / 60)))
print('Analyzing Solutions...')
print('1. Robots:')
robots = {r: [] for r in R}
active_robots = []
for r in R:
robot = ''
hub = ''
clients_served = []
clients_served_real = []
for h in H:
for c in C:
if (x[(c, r, h)].X > 0.5):
robot = str((r + 1))
hub = str((h + 1))
clients_served.append((int(c) + 1))
clients_served_real.append(int(c))
if robot:
print('Robot {}, will be serving from hub {}.'.format(robot, hub))
print('The robot will serve the following clients: {}'.format(clients_served))
active_robots.append(r)
if hub:
clients_served_real.append(((n_c + int(hub)) - 1))
robots[r] = clients_served_real
print('2. Robot tours:')
tours = {r: [] for r in R}
for r in R:
links = list()
for i in L:
for j in L:
if (y[(i, j, r)].X > 0.5):
links.append((i, j))
tours[r] = links
print('Links visited by each robot: ')
print(tours)
import matplotlib.pyplot as plt
import networkx as nx
G = nx.DiGraph()
list_nodes = list(range(1, len(L)))
G.add_nodes_from(list_nodes)
nodes_clients = {int(c.name): c.loc for c in clients}
nodes_hubs = {(n_c + h): hubs[h] for h in H}
node_pos = {**nodes_clients, **nodes_hubs}
for r in active_robots:
red_edges = [(i, j) for i in L for j in L if (y[(i, j, r)].x > 0.5)]
for i in L:
for j in L:
if (y[(i, j, r)].x > 0.5):
G.add_edge(i, j)
node_col = [('white' if (not (node in robots[r])) else 'red') for node in G.nodes()]
edge_col = [('black' if (not (edge in red_edges)) else 'red') for edge in G.edges()]
nx.draw_networkx(G, node_pos, node_color=node_col, node_size=450)
nx.draw_networkx_edges(G, node_pos, edge_color=edge_col)
plt.axis('off')
G.remove_edges_from(list(G.edges()))
plt.savefig('../output/plots/tour-robot_Ca1-3-30_{}.png'.format((r + 1)))
plt.clf() | Model to assign robots to clients respecting:
1. Robot maximum capacity
2. Robot maximum distance
3. Client Time Windows
:param hubs:
:param clients:
:param cost_matrix_1:
:param cost_matrix_2:
:return: robot assignment and demand for each hub | src/LP/solver/model_4.py | create_model | AymanABDELHAMID/2E-CVRP | 0 | python | def create_model(hubs, clients, cost_matrix_1, cost_matrix_2):
'\n Model to assign robots to clients respecting:\n 1. Robot maximum capacity\n 2. Robot maximum distance\n 3. Client Time Windows\n :param hubs:\n :param clients:\n :param cost_matrix_1:\n :param cost_matrix_2:\n :return: robot assignment and demand for each hub\n '
start_time = time.time()
model = gp.Model('RAP')
n_c = len(clients)
R_max = len(clients)
R = list(range(R_max))
C = [c.name for c in clients]
D_c = [c.demand for c in clients]
st = {c.name: c.st for c in clients}
t1 = {c.name: c.tw1 for c in clients}
t2 = {c.name: c.tw2 for c in clients}
Loc_c = [c.loc for c in clients]
Loc_c_dict = {c.name: c.loc for c in clients}
L = list(set([l[0] for l in cost_matrix_2.keys()]))
H = list(range(len(hubs)))
R_cap = 50
R_dist = 200
c_hub = 50
c_robot = 10
op_time = (max(t2.values()) + 15)
o = model.addVars(H, vtype=GRB.BINARY, name='o')
x = model.addVars(C, R, H, vtype=GRB.BINARY, name='x')
rob = model.addVars(R, H, vtype=GRB.BINARY, name='rob')
y = model.addVars(L, L, R, vtype=GRB.BINARY, name='y')
t = model.addVars(L, ub=op_time, name='t')
xa = model.addVars(C, name='xa')
xb = model.addVars(C, name='xb')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for r in R for h in H)) == 1) for c in C), name='client_must_be_served')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for r in R)) <= o[h]) for c in C for h in H), name='no_hub_no_robot')
model.addConstrs(((rob[(r, h)] <= o[h]) for r in R for h in H), name='no_hub_no_robot_2')
model.addConstrs(((x.sum(c, '*', h) <= 1) for c in C for h in H), name='one_robot')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for h in H for r in R)) == 1) for c in C), name='Just_one_hub')
model.addConstrs(((gp.quicksum(((D_c[int(c)] * x[(c, r, h)]) for c in C for h in H)) <= (R_cap * rob.sum(r, '*'))) for r in R), name='robot_cap')
model.addConstrs(((gp.quicksum(((cost_matrix_2[(i, j)] * y[(i, j, r)]) for i in L for j in L if (i != j))) <= R_dist) for r in R), name='robot_dist')
model.addConstrs(((gp.quicksum((y[(int(j), int(i), r)] for j in L if (int(j) != int(i)))) == gp.quicksum((x[(i, r, h)] for h in H))) for r in R for i in C), name='Tour1')
model.addConstrs(((gp.quicksum((y[(int(i), int(j), r)] for j in L if (int(j) != int(i)))) == gp.quicksum((x[(i, r, h)] for h in H))) for r in R for i in C), name='Tour2')
model.addConstrs((((y[(int(j), int(i), r)] + y[(int(i), int(j), r)]) <= 1) for r in R for i in L for j in L if (int(j) != int(i))), name='Tour3')
model.addConstrs(((gp.quicksum((y[(int(c), (n_c + h), r)] for c in C)) == rob[(r, h)]) for r in R for h in H), name='sameHub1_y')
model.addConstrs(((gp.quicksum((y[((n_c + h), int(c), r)] for c in C)) == rob[(r, h)]) for r in R for h in H), name='sameHub2_y')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for c in C)) <= ((n_c + 1) * rob[(r, h)])) for r in R for h in H), name='sameHub1_x')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for c in C)) <= ((n_c + 1) * rob[(r, h)])) for r in R for h in H), name='sameHub2_x')
model.addConstrs(((gp.quicksum((rob[(r, h)] for h in H)) <= 1) for r in R), name='one_hub_per_robot')
V_cap = 300
total_demand = math.fsum((c.demand for c in clients))
t = math.ceil((total_demand / V_cap))
V = list(range(t))
c_truck = 50
c_truck_distance = 5
DH = list(range((len(hubs) + 1)))
z = model.addVars(V, DH, DH, vtype=GRB.BINARY, name='z')
w = model.addVars(V, H, vtype=GRB.BINARY, name='w')
model.addConstrs(((gp.quicksum((z[(v, h1, h2)] for v in V for h1 in DH if (h1 != h2))) == o[h2]) for h2 in DH[:(- 1)]), name='if_truck_then_hub')
model.addConstrs(((gp.quicksum((z[(v, h1, h2)] for v in V for h1 in DH if (h1 != h2))) == gp.quicksum((z[(v, h2, h1)] for v in V for h1 in DH if (h1 != h2)))) for h2 in DH), name='trucks3')
model.addConstrs(((gp.quicksum((z[(v, dh1, dh2)] for v in V for dh1 in DH if (dh1 != dh2))) <= 1) for dh2 in DH), name='no_subtours_1')
model.addConstrs(((gp.quicksum((z[(v, dh2, dh1)] for v in V for dh1 in DH if (dh1 != dh2))) <= 1) for dh2 in DH), name='no_subtours_2')
model.addConstrs((((z[(v, dh2, dh1)] + z[(v, dh1, dh2)]) <= 1) for v in V for dh1 in DH for dh2 in DH if (dh1 != dh2)), name='no_subtours_3')
D_h = model.addVars(H, name='D_h')
model.addConstrs(((D_h[h] == gp.quicksum(((D_c[int(c)] * x[(c, r, h)]) for c in C for r in R))) for h in H), name='total_demand_per_hub')
model.addConstrs(((gp.quicksum(((D_h[h] * z[(v, i, h)]) for i in DH)) <= V_cap) for h in H for v in V), name='truck_capacity')
cost_robot_var = gp.quicksum(((cost_matrix_2[(c1, c2)] * y[(c1, c2, r)]) for c1 in L for c2 in L if (c1 != c2) for r in R))
cost_robot_fixed = gp.quicksum(((c_robot * rob[(r, h)]) for r in R for h in H))
cost_hub_fixed = gp.quicksum(((c_hub * o[h]) for h in H))
cost_truck_var = gp.quicksum((((cost_matrix_1[(dh1, dh2)] * c_truck_distance) * z[(v, dh1, dh2)]) for v in V for dh1 in DH for dh2 in DH if (dh1 != dh2)))
cost_truck_fixed = (c_truck * t)
model.setObjective(((((cost_robot_var + cost_robot_fixed) + cost_hub_fixed) + cost_truck_fixed) + cost_truck_var), GRB.MINIMIZE)
model.write('../output/lp_model/RAP_TRP_v1.lp')
model.optimize()
print(('Optimal solution found with total cost: %g' % model.objVal))
for v in model.getVars():
if (v.x >= 0.5):
print(('%s %g' % (v.varName, v.x)))
print(('Executed in %s Minutes' % ((time.time() - start_time) / 60)))
print('Analyzing Solutions...')
print('1. Robots:')
robots = {r: [] for r in R}
active_robots = []
for r in R:
robot =
hub =
clients_served = []
clients_served_real = []
for h in H:
for c in C:
if (x[(c, r, h)].X > 0.5):
robot = str((r + 1))
hub = str((h + 1))
clients_served.append((int(c) + 1))
clients_served_real.append(int(c))
if robot:
print('Robot {}, will be serving from hub {}.'.format(robot, hub))
print('The robot will serve the following clients: {}'.format(clients_served))
active_robots.append(r)
if hub:
clients_served_real.append(((n_c + int(hub)) - 1))
robots[r] = clients_served_real
print('2. Robot tours:')
tours = {r: [] for r in R}
for r in R:
links = list()
for i in L:
for j in L:
if (y[(i, j, r)].X > 0.5):
links.append((i, j))
tours[r] = links
print('Links visited by each robot: ')
print(tours)
import matplotlib.pyplot as plt
import networkx as nx
G = nx.DiGraph()
list_nodes = list(range(1, len(L)))
G.add_nodes_from(list_nodes)
nodes_clients = {int(c.name): c.loc for c in clients}
nodes_hubs = {(n_c + h): hubs[h] for h in H}
node_pos = {**nodes_clients, **nodes_hubs}
for r in active_robots:
red_edges = [(i, j) for i in L for j in L if (y[(i, j, r)].x > 0.5)]
for i in L:
for j in L:
if (y[(i, j, r)].x > 0.5):
G.add_edge(i, j)
node_col = [('white' if (not (node in robots[r])) else 'red') for node in G.nodes()]
edge_col = [('black' if (not (edge in red_edges)) else 'red') for edge in G.edges()]
nx.draw_networkx(G, node_pos, node_color=node_col, node_size=450)
nx.draw_networkx_edges(G, node_pos, edge_color=edge_col)
plt.axis('off')
G.remove_edges_from(list(G.edges()))
plt.savefig('../output/plots/tour-robot_Ca1-3-30_{}.png'.format((r + 1)))
plt.clf() | def create_model(hubs, clients, cost_matrix_1, cost_matrix_2):
'\n Model to assign robots to clients respecting:\n 1. Robot maximum capacity\n 2. Robot maximum distance\n 3. Client Time Windows\n :param hubs:\n :param clients:\n :param cost_matrix_1:\n :param cost_matrix_2:\n :return: robot assignment and demand for each hub\n '
start_time = time.time()
model = gp.Model('RAP')
n_c = len(clients)
R_max = len(clients)
R = list(range(R_max))
C = [c.name for c in clients]
D_c = [c.demand for c in clients]
st = {c.name: c.st for c in clients}
t1 = {c.name: c.tw1 for c in clients}
t2 = {c.name: c.tw2 for c in clients}
Loc_c = [c.loc for c in clients]
Loc_c_dict = {c.name: c.loc for c in clients}
L = list(set([l[0] for l in cost_matrix_2.keys()]))
H = list(range(len(hubs)))
R_cap = 50
R_dist = 200
c_hub = 50
c_robot = 10
op_time = (max(t2.values()) + 15)
o = model.addVars(H, vtype=GRB.BINARY, name='o')
x = model.addVars(C, R, H, vtype=GRB.BINARY, name='x')
rob = model.addVars(R, H, vtype=GRB.BINARY, name='rob')
y = model.addVars(L, L, R, vtype=GRB.BINARY, name='y')
t = model.addVars(L, ub=op_time, name='t')
xa = model.addVars(C, name='xa')
xb = model.addVars(C, name='xb')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for r in R for h in H)) == 1) for c in C), name='client_must_be_served')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for r in R)) <= o[h]) for c in C for h in H), name='no_hub_no_robot')
model.addConstrs(((rob[(r, h)] <= o[h]) for r in R for h in H), name='no_hub_no_robot_2')
model.addConstrs(((x.sum(c, '*', h) <= 1) for c in C for h in H), name='one_robot')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for h in H for r in R)) == 1) for c in C), name='Just_one_hub')
model.addConstrs(((gp.quicksum(((D_c[int(c)] * x[(c, r, h)]) for c in C for h in H)) <= (R_cap * rob.sum(r, '*'))) for r in R), name='robot_cap')
model.addConstrs(((gp.quicksum(((cost_matrix_2[(i, j)] * y[(i, j, r)]) for i in L for j in L if (i != j))) <= R_dist) for r in R), name='robot_dist')
model.addConstrs(((gp.quicksum((y[(int(j), int(i), r)] for j in L if (int(j) != int(i)))) == gp.quicksum((x[(i, r, h)] for h in H))) for r in R for i in C), name='Tour1')
model.addConstrs(((gp.quicksum((y[(int(i), int(j), r)] for j in L if (int(j) != int(i)))) == gp.quicksum((x[(i, r, h)] for h in H))) for r in R for i in C), name='Tour2')
model.addConstrs((((y[(int(j), int(i), r)] + y[(int(i), int(j), r)]) <= 1) for r in R for i in L for j in L if (int(j) != int(i))), name='Tour3')
model.addConstrs(((gp.quicksum((y[(int(c), (n_c + h), r)] for c in C)) == rob[(r, h)]) for r in R for h in H), name='sameHub1_y')
model.addConstrs(((gp.quicksum((y[((n_c + h), int(c), r)] for c in C)) == rob[(r, h)]) for r in R for h in H), name='sameHub2_y')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for c in C)) <= ((n_c + 1) * rob[(r, h)])) for r in R for h in H), name='sameHub1_x')
model.addConstrs(((gp.quicksum((x[(c, r, h)] for c in C)) <= ((n_c + 1) * rob[(r, h)])) for r in R for h in H), name='sameHub2_x')
model.addConstrs(((gp.quicksum((rob[(r, h)] for h in H)) <= 1) for r in R), name='one_hub_per_robot')
V_cap = 300
total_demand = math.fsum((c.demand for c in clients))
t = math.ceil((total_demand / V_cap))
V = list(range(t))
c_truck = 50
c_truck_distance = 5
DH = list(range((len(hubs) + 1)))
z = model.addVars(V, DH, DH, vtype=GRB.BINARY, name='z')
w = model.addVars(V, H, vtype=GRB.BINARY, name='w')
model.addConstrs(((gp.quicksum((z[(v, h1, h2)] for v in V for h1 in DH if (h1 != h2))) == o[h2]) for h2 in DH[:(- 1)]), name='if_truck_then_hub')
model.addConstrs(((gp.quicksum((z[(v, h1, h2)] for v in V for h1 in DH if (h1 != h2))) == gp.quicksum((z[(v, h2, h1)] for v in V for h1 in DH if (h1 != h2)))) for h2 in DH), name='trucks3')
model.addConstrs(((gp.quicksum((z[(v, dh1, dh2)] for v in V for dh1 in DH if (dh1 != dh2))) <= 1) for dh2 in DH), name='no_subtours_1')
model.addConstrs(((gp.quicksum((z[(v, dh2, dh1)] for v in V for dh1 in DH if (dh1 != dh2))) <= 1) for dh2 in DH), name='no_subtours_2')
model.addConstrs((((z[(v, dh2, dh1)] + z[(v, dh1, dh2)]) <= 1) for v in V for dh1 in DH for dh2 in DH if (dh1 != dh2)), name='no_subtours_3')
D_h = model.addVars(H, name='D_h')
model.addConstrs(((D_h[h] == gp.quicksum(((D_c[int(c)] * x[(c, r, h)]) for c in C for r in R))) for h in H), name='total_demand_per_hub')
model.addConstrs(((gp.quicksum(((D_h[h] * z[(v, i, h)]) for i in DH)) <= V_cap) for h in H for v in V), name='truck_capacity')
cost_robot_var = gp.quicksum(((cost_matrix_2[(c1, c2)] * y[(c1, c2, r)]) for c1 in L for c2 in L if (c1 != c2) for r in R))
cost_robot_fixed = gp.quicksum(((c_robot * rob[(r, h)]) for r in R for h in H))
cost_hub_fixed = gp.quicksum(((c_hub * o[h]) for h in H))
cost_truck_var = gp.quicksum((((cost_matrix_1[(dh1, dh2)] * c_truck_distance) * z[(v, dh1, dh2)]) for v in V for dh1 in DH for dh2 in DH if (dh1 != dh2)))
cost_truck_fixed = (c_truck * t)
model.setObjective(((((cost_robot_var + cost_robot_fixed) + cost_hub_fixed) + cost_truck_fixed) + cost_truck_var), GRB.MINIMIZE)
model.write('../output/lp_model/RAP_TRP_v1.lp')
model.optimize()
print(('Optimal solution found with total cost: %g' % model.objVal))
for v in model.getVars():
if (v.x >= 0.5):
print(('%s %g' % (v.varName, v.x)))
print(('Executed in %s Minutes' % ((time.time() - start_time) / 60)))
print('Analyzing Solutions...')
print('1. Robots:')
robots = {r: [] for r in R}
active_robots = []
for r in R:
robot =
hub =
clients_served = []
clients_served_real = []
for h in H:
for c in C:
if (x[(c, r, h)].X > 0.5):
robot = str((r + 1))
hub = str((h + 1))
clients_served.append((int(c) + 1))
clients_served_real.append(int(c))
if robot:
print('Robot {}, will be serving from hub {}.'.format(robot, hub))
print('The robot will serve the following clients: {}'.format(clients_served))
active_robots.append(r)
if hub:
clients_served_real.append(((n_c + int(hub)) - 1))
robots[r] = clients_served_real
print('2. Robot tours:')
tours = {r: [] for r in R}
for r in R:
links = list()
for i in L:
for j in L:
if (y[(i, j, r)].X > 0.5):
links.append((i, j))
tours[r] = links
print('Links visited by each robot: ')
print(tours)
import matplotlib.pyplot as plt
import networkx as nx
G = nx.DiGraph()
list_nodes = list(range(1, len(L)))
G.add_nodes_from(list_nodes)
nodes_clients = {int(c.name): c.loc for c in clients}
nodes_hubs = {(n_c + h): hubs[h] for h in H}
node_pos = {**nodes_clients, **nodes_hubs}
for r in active_robots:
red_edges = [(i, j) for i in L for j in L if (y[(i, j, r)].x > 0.5)]
for i in L:
for j in L:
if (y[(i, j, r)].x > 0.5):
G.add_edge(i, j)
node_col = [('white' if (not (node in robots[r])) else 'red') for node in G.nodes()]
edge_col = [('black' if (not (edge in red_edges)) else 'red') for edge in G.edges()]
nx.draw_networkx(G, node_pos, node_color=node_col, node_size=450)
nx.draw_networkx_edges(G, node_pos, edge_color=edge_col)
plt.axis('off')
G.remove_edges_from(list(G.edges()))
plt.savefig('../output/plots/tour-robot_Ca1-3-30_{}.png'.format((r + 1)))
plt.clf()<|docstring|>Model to assign robots to clients respecting:
1. Robot maximum capacity
2. Robot maximum distance
3. Client Time Windows
:param hubs:
:param clients:
:param cost_matrix_1:
:param cost_matrix_2:
:return: robot assignment and demand for each hub<|endoftext|> |
5dd65257e7192cdd6c4bc4d3564a5feda6c984b7615125b189d449ba70a7f3c4 | def create_model_TRP(depot, hubs, cost_matrix_1):
'\n Model tp optimize truck routing from central depot to hubs rspecting:\n 1. Hubs demands\n 2. Truck maximum capacity\n :param depot:\n :param hubs:\n :param cost_matrix_1:\n :return:\n ' | Model tp optimize truck routing from central depot to hubs rspecting:
1. Hubs demands
2. Truck maximum capacity
:param depot:
:param hubs:
:param cost_matrix_1:
:return: | src/LP/solver/model_4.py | create_model_TRP | AymanABDELHAMID/2E-CVRP | 0 | python | def create_model_TRP(depot, hubs, cost_matrix_1):
'\n Model tp optimize truck routing from central depot to hubs rspecting:\n 1. Hubs demands\n 2. Truck maximum capacity\n :param depot:\n :param hubs:\n :param cost_matrix_1:\n :return:\n ' | def create_model_TRP(depot, hubs, cost_matrix_1):
'\n Model tp optimize truck routing from central depot to hubs rspecting:\n 1. Hubs demands\n 2. Truck maximum capacity\n :param depot:\n :param hubs:\n :param cost_matrix_1:\n :return:\n '<|docstring|>Model tp optimize truck routing from central depot to hubs rspecting:
1. Hubs demands
2. Truck maximum capacity
:param depot:
:param hubs:
:param cost_matrix_1:
:return:<|endoftext|> |
5bc935b83a0817943f6beacd6a11599423f22bb9cf8c3cf7eec9bce523b80e9a | @property
def locale(self) -> str:
'The locale for this item.'
return self._data[ItemListData].locale | The locale for this item. | cassiopeia/core/staticdata/item.py | locale | LukePeltier/cassiopeia | 437 | python | @property
def locale(self) -> str:
return self._data[ItemListData].locale | @property
def locale(self) -> str:
return self._data[ItemListData].locale<|docstring|>The locale for this item.<|endoftext|> |
7fd89cc6ba7e7fbc3492571b45e16bbf295e535e38bb50756e794cdbaed2bc30 | @property
def included_data(self) -> Set[str]:
"A set of tags to return additional information for this item when it's loaded."
return self._data[ItemListData].includedData | A set of tags to return additional information for this item when it's loaded. | cassiopeia/core/staticdata/item.py | included_data | LukePeltier/cassiopeia | 437 | python | @property
def included_data(self) -> Set[str]:
return self._data[ItemListData].includedData | @property
def included_data(self) -> Set[str]:
return self._data[ItemListData].includedData<|docstring|>A set of tags to return additional information for this item when it's loaded.<|endoftext|> |
19c0a60e76f632b028883b4ef75a2cd7103fa922d64d685aae6f99efa48e0772 | @lazy_property
def region(self) -> Region:
'The region for this item.'
return Region(self._data[ItemData].region) | The region for this item. | cassiopeia/core/staticdata/item.py | region | LukePeltier/cassiopeia | 437 | python | @lazy_property
def region(self) -> Region:
return Region(self._data[ItemData].region) | @lazy_property
def region(self) -> Region:
return Region(self._data[ItemData].region)<|docstring|>The region for this item.<|endoftext|> |
ac6567be441e23c675734d3765f9a9eb3400aa840c20d61a6cbb8bcbd02015e1 | @lazy_property
def platform(self) -> Platform:
'The platform for this item.'
return self.region.platform | The platform for this item. | cassiopeia/core/staticdata/item.py | platform | LukePeltier/cassiopeia | 437 | python | @lazy_property
def platform(self) -> Platform:
return self.region.platform | @lazy_property
def platform(self) -> Platform:
return self.region.platform<|docstring|>The platform for this item.<|endoftext|> |
3d9dc7f15a38ba1f74df1f3defb3bc166f46f716d901e5aadb2d8c9ce53fe2e1 | @property
def version(self) -> str:
'The version for this item.'
try:
return self._data[ItemData].version
except AttributeError:
version = get_latest_version(region=self.region, endpoint='item')
self(version=version)
return self._data[ItemData].version | The version for this item. | cassiopeia/core/staticdata/item.py | version | LukePeltier/cassiopeia | 437 | python | @property
def version(self) -> str:
try:
return self._data[ItemData].version
except AttributeError:
version = get_latest_version(region=self.region, endpoint='item')
self(version=version)
return self._data[ItemData].version | @property
def version(self) -> str:
try:
return self._data[ItemData].version
except AttributeError:
version = get_latest_version(region=self.region, endpoint='item')
self(version=version)
return self._data[ItemData].version<|docstring|>The version for this item.<|endoftext|> |
80a6e806d1d9358e5858b063ba058a8b7b4fc80b6a710833d690ffe67fb14eed | @property
def locale(self) -> str:
'The locale for this item.'
return (self._data[ItemData].locale or self.region.default_locale) | The locale for this item. | cassiopeia/core/staticdata/item.py | locale | LukePeltier/cassiopeia | 437 | python | @property
def locale(self) -> str:
return (self._data[ItemData].locale or self.region.default_locale) | @property
def locale(self) -> str:
return (self._data[ItemData].locale or self.region.default_locale)<|docstring|>The locale for this item.<|endoftext|> |
259363c0d48f6d46f80d6b31a63ee7355f18da89f73db7a0b94e7ad9b7b8b3f0 | @property
def included_data(self) -> Set[str]:
"A set of tags to return additonal information for this item when it's loaded."
return self._data[ItemData].includedData | A set of tags to return additonal information for this item when it's loaded. | cassiopeia/core/staticdata/item.py | included_data | LukePeltier/cassiopeia | 437 | python | @property
def included_data(self) -> Set[str]:
return self._data[ItemData].includedData | @property
def included_data(self) -> Set[str]:
return self._data[ItemData].includedData<|docstring|>A set of tags to return additonal information for this item when it's loaded.<|endoftext|> |
1338592d5bd5185c911d3817d58a13f9491f4a7b570e2e3b7715f0fe5536c181 | @CassiopeiaGhost.property(ItemData)
@ghost_load_on
def id(self) -> int:
"The item's ID."
return self._data[ItemData].id | The item's ID. | cassiopeia/core/staticdata/item.py | id | LukePeltier/cassiopeia | 437 | python | @CassiopeiaGhost.property(ItemData)
@ghost_load_on
def id(self) -> int:
return self._data[ItemData].id | @CassiopeiaGhost.property(ItemData)
@ghost_load_on
def id(self) -> int:
return self._data[ItemData].id<|docstring|>The item's ID.<|endoftext|> |
9ce607199df94ac9cc5c76ecb8f88a31fe59c1f6826d126d05e7eee3c53943bd | @CassiopeiaGhost.property(ItemData)
@ghost_load_on
@lazy
def image(self) -> Image:
'The image information for this item.'
image = Image.from_data(self._data[ItemData].image)
image(version=self.version)
return image | The image information for this item. | cassiopeia/core/staticdata/item.py | image | LukePeltier/cassiopeia | 437 | python | @CassiopeiaGhost.property(ItemData)
@ghost_load_on
@lazy
def image(self) -> Image:
image = Image.from_data(self._data[ItemData].image)
image(version=self.version)
return image | @CassiopeiaGhost.property(ItemData)
@ghost_load_on
@lazy
def image(self) -> Image:
image = Image.from_data(self._data[ItemData].image)
image(version=self.version)
return image<|docstring|>The image information for this item.<|endoftext|> |
f3299fa50873c297780fe0ea9a6d518ea55a8a8fa65b7b5479af6e77ab162d7d | def real_hoft(self, Fp=None, Fc=None):
"\n Returns the real-valued h(t) that would be produced in a single instrument.\n Translates epoch as needed.\n Based on 'hoft' in lalsimutils.py\n "
htC = self.complex_hoft(force_T=(1.0 / self.P.deltaF), deltaT=self.P.deltaT)
TDlen = htC.data.length
if rosDebug:
print('Size sanity check ', TDlen, (1 / (self.P.deltaF * self.P.deltaT)))
print(' Raw complex magnitude , ', np.max(htC.data.data))
hp = lal.CreateREAL8TimeSeries('h(t)', htC.epoch, 0.0, self.P.deltaT, lalsimutils.lsu_DimensionlessUnit, TDlen)
hc = lal.CreateREAL8TimeSeries('h(t)', htC.epoch, 0.0, self.P.deltaT, lalsimutils.lsu_DimensionlessUnit, TDlen)
hT = lal.CreateREAL8TimeSeries('h(t)', htC.epoch, 0.0, self.P.deltaT, lalsimutils.lsu_DimensionlessUnit, TDlen)
hp.data.data = np.real(htC.data.data)
hc.data.data = np.imag(htC.data.data)
if ((Fp != None) and (Fc != None)):
hp.data.data *= Fp
hc.data.data *= Fc
hp = lal.AddREAL8TimeSeries(hp, hc)
hoft = hp
elif (self.P.radec == False):
fp = lalsimutils.Fplus(self.P.theta, self.P.phi, self.P.psi)
fc = lalsimutils.Fcross(self.P.theta, self.P.phi, self.P.psi)
hp.data.data *= fp
hc.data.data *= fc
hp.data.data = lal.AddREAL8TimeSeries(hp, hc)
hoft = hp
else:
hp.epoch = (hp.epoch + self.P.tref)
hc.epoch = (hc.epoch + self.P.tref)
if rosDebug:
print(' Real h(t) before detector weighting, ', np.max(hp.data.data), np.max(hc.data.data))
hoft = lalsim.SimDetectorStrainREAL8TimeSeries(hp, hc, self.P.phi, self.P.theta, self.P.psi, lalsim.DetectorPrefixToLALDetector(str(self.P.detector)))
hoft = lal.CutREAL8TimeSeries(hoft, 0, hp.data.length)
if rosDebug:
print('Size before and after detector weighting ', hp.data.length, hoft.data.length)
if rosDebug:
print(' Real h_{IFO}(t) generated, pre-taper : max strain =', np.max(hoft.data.data))
if (self.P.taper != lalsimutils.lsu_TAPER_NONE):
lalsim.SimInspiralREAL8WaveTaper(hoft.data, self.P.taper)
if (self.P.deltaF is not None):
TDlen = int((((1.0 / self.P.deltaF) * 1.0) / self.P.deltaT))
print('Size sanity check 2 ', int((((1.0 / self.P.deltaF) * 1.0) / self.P.deltaT)), hoft.data.length)
assert (TDlen >= hoft.data.length)
npts = hoft.data.length
hoft = lal.ResizeREAL8TimeSeries(hoft, 0, TDlen)
hoft.data.data[npts:TDlen] = 0
if rosDebug:
print(' Real h_{IFO}(t) generated : max strain =', np.max(hoft.data.data))
return hoft | Returns the real-valued h(t) that would be produced in a single instrument.
Translates epoch as needed.
Based on 'hoft' in lalsimutils.py | MonteCarloMarginalizeCode/Code/RIFT/physics/EOBTidalExternal.py | real_hoft | liz-champion/research-projects-RIT | 8 | python | def real_hoft(self, Fp=None, Fc=None):
"\n Returns the real-valued h(t) that would be produced in a single instrument.\n Translates epoch as needed.\n Based on 'hoft' in lalsimutils.py\n "
htC = self.complex_hoft(force_T=(1.0 / self.P.deltaF), deltaT=self.P.deltaT)
TDlen = htC.data.length
if rosDebug:
print('Size sanity check ', TDlen, (1 / (self.P.deltaF * self.P.deltaT)))
print(' Raw complex magnitude , ', np.max(htC.data.data))
hp = lal.CreateREAL8TimeSeries('h(t)', htC.epoch, 0.0, self.P.deltaT, lalsimutils.lsu_DimensionlessUnit, TDlen)
hc = lal.CreateREAL8TimeSeries('h(t)', htC.epoch, 0.0, self.P.deltaT, lalsimutils.lsu_DimensionlessUnit, TDlen)
hT = lal.CreateREAL8TimeSeries('h(t)', htC.epoch, 0.0, self.P.deltaT, lalsimutils.lsu_DimensionlessUnit, TDlen)
hp.data.data = np.real(htC.data.data)
hc.data.data = np.imag(htC.data.data)
if ((Fp != None) and (Fc != None)):
hp.data.data *= Fp
hc.data.data *= Fc
hp = lal.AddREAL8TimeSeries(hp, hc)
hoft = hp
elif (self.P.radec == False):
fp = lalsimutils.Fplus(self.P.theta, self.P.phi, self.P.psi)
fc = lalsimutils.Fcross(self.P.theta, self.P.phi, self.P.psi)
hp.data.data *= fp
hc.data.data *= fc
hp.data.data = lal.AddREAL8TimeSeries(hp, hc)
hoft = hp
else:
hp.epoch = (hp.epoch + self.P.tref)
hc.epoch = (hc.epoch + self.P.tref)
if rosDebug:
print(' Real h(t) before detector weighting, ', np.max(hp.data.data), np.max(hc.data.data))
hoft = lalsim.SimDetectorStrainREAL8TimeSeries(hp, hc, self.P.phi, self.P.theta, self.P.psi, lalsim.DetectorPrefixToLALDetector(str(self.P.detector)))
hoft = lal.CutREAL8TimeSeries(hoft, 0, hp.data.length)
if rosDebug:
print('Size before and after detector weighting ', hp.data.length, hoft.data.length)
if rosDebug:
print(' Real h_{IFO}(t) generated, pre-taper : max strain =', np.max(hoft.data.data))
if (self.P.taper != lalsimutils.lsu_TAPER_NONE):
lalsim.SimInspiralREAL8WaveTaper(hoft.data, self.P.taper)
if (self.P.deltaF is not None):
TDlen = int((((1.0 / self.P.deltaF) * 1.0) / self.P.deltaT))
print('Size sanity check 2 ', int((((1.0 / self.P.deltaF) * 1.0) / self.P.deltaT)), hoft.data.length)
assert (TDlen >= hoft.data.length)
npts = hoft.data.length
hoft = lal.ResizeREAL8TimeSeries(hoft, 0, TDlen)
hoft.data.data[npts:TDlen] = 0
if rosDebug:
print(' Real h_{IFO}(t) generated : max strain =', np.max(hoft.data.data))
return hoft | def real_hoft(self, Fp=None, Fc=None):
"\n Returns the real-valued h(t) that would be produced in a single instrument.\n Translates epoch as needed.\n Based on 'hoft' in lalsimutils.py\n "
htC = self.complex_hoft(force_T=(1.0 / self.P.deltaF), deltaT=self.P.deltaT)
TDlen = htC.data.length
if rosDebug:
print('Size sanity check ', TDlen, (1 / (self.P.deltaF * self.P.deltaT)))
print(' Raw complex magnitude , ', np.max(htC.data.data))
hp = lal.CreateREAL8TimeSeries('h(t)', htC.epoch, 0.0, self.P.deltaT, lalsimutils.lsu_DimensionlessUnit, TDlen)
hc = lal.CreateREAL8TimeSeries('h(t)', htC.epoch, 0.0, self.P.deltaT, lalsimutils.lsu_DimensionlessUnit, TDlen)
hT = lal.CreateREAL8TimeSeries('h(t)', htC.epoch, 0.0, self.P.deltaT, lalsimutils.lsu_DimensionlessUnit, TDlen)
hp.data.data = np.real(htC.data.data)
hc.data.data = np.imag(htC.data.data)
if ((Fp != None) and (Fc != None)):
hp.data.data *= Fp
hc.data.data *= Fc
hp = lal.AddREAL8TimeSeries(hp, hc)
hoft = hp
elif (self.P.radec == False):
fp = lalsimutils.Fplus(self.P.theta, self.P.phi, self.P.psi)
fc = lalsimutils.Fcross(self.P.theta, self.P.phi, self.P.psi)
hp.data.data *= fp
hc.data.data *= fc
hp.data.data = lal.AddREAL8TimeSeries(hp, hc)
hoft = hp
else:
hp.epoch = (hp.epoch + self.P.tref)
hc.epoch = (hc.epoch + self.P.tref)
if rosDebug:
print(' Real h(t) before detector weighting, ', np.max(hp.data.data), np.max(hc.data.data))
hoft = lalsim.SimDetectorStrainREAL8TimeSeries(hp, hc, self.P.phi, self.P.theta, self.P.psi, lalsim.DetectorPrefixToLALDetector(str(self.P.detector)))
hoft = lal.CutREAL8TimeSeries(hoft, 0, hp.data.length)
if rosDebug:
print('Size before and after detector weighting ', hp.data.length, hoft.data.length)
if rosDebug:
print(' Real h_{IFO}(t) generated, pre-taper : max strain =', np.max(hoft.data.data))
if (self.P.taper != lalsimutils.lsu_TAPER_NONE):
lalsim.SimInspiralREAL8WaveTaper(hoft.data, self.P.taper)
if (self.P.deltaF is not None):
TDlen = int((((1.0 / self.P.deltaF) * 1.0) / self.P.deltaT))
print('Size sanity check 2 ', int((((1.0 / self.P.deltaF) * 1.0) / self.P.deltaT)), hoft.data.length)
assert (TDlen >= hoft.data.length)
npts = hoft.data.length
hoft = lal.ResizeREAL8TimeSeries(hoft, 0, TDlen)
hoft.data.data[npts:TDlen] = 0
if rosDebug:
print(' Real h_{IFO}(t) generated : max strain =', np.max(hoft.data.data))
return hoft<|docstring|>Returns the real-valued h(t) that would be produced in a single instrument.
Translates epoch as needed.
Based on 'hoft' in lalsimutils.py<|endoftext|> |
303473c0911e8050b785d89448635da349a10d6daa7fd9eae28c99d62d2775d7 | def non_herm_hoff(self):
"\n Returns the 2-sided h(f) associated with the real-valued h(t) seen in a real instrument.\n Translates epoch as needed.\n Based on 'non_herm_hoff' in lalsimutils.py\n "
htR = self.real_hoft()
if (self.P.deltaF == None):
TDlen = nextPow2(htR.data.length)
htR = lal.ResizeREAL8TimeSeries(htR, 0, TDlen)
else:
TDlen = int((((1.0 / self.P.deltaF) * 1.0) / self.P.deltaT))
assert (TDlen == htR.data.length)
fwdplan = lal.CreateForwardCOMPLEX16FFTPlan(htR.data.length, 0)
htC = lal.CreateCOMPLEX16TimeSeries('hoft', htR.epoch, htR.f0, htR.deltaT, htR.sampleUnits, htR.data.length)
htC.data.data[:htR.data.length] = htR.data.data
hf = lal.CreateCOMPLEX16FrequencySeries('Template h(f)', htR.epoch, htR.f0, ((1.0 / htR.deltaT) / htR.data.length), lalsimutils.lsu_HertzUnit, htR.data.length)
lal.COMPLEX16TimeFreqFFT(hf, htC, fwdplan)
return hf | Returns the 2-sided h(f) associated with the real-valued h(t) seen in a real instrument.
Translates epoch as needed.
Based on 'non_herm_hoff' in lalsimutils.py | MonteCarloMarginalizeCode/Code/RIFT/physics/EOBTidalExternal.py | non_herm_hoff | liz-champion/research-projects-RIT | 8 | python | def non_herm_hoff(self):
"\n Returns the 2-sided h(f) associated with the real-valued h(t) seen in a real instrument.\n Translates epoch as needed.\n Based on 'non_herm_hoff' in lalsimutils.py\n "
htR = self.real_hoft()
if (self.P.deltaF == None):
TDlen = nextPow2(htR.data.length)
htR = lal.ResizeREAL8TimeSeries(htR, 0, TDlen)
else:
TDlen = int((((1.0 / self.P.deltaF) * 1.0) / self.P.deltaT))
assert (TDlen == htR.data.length)
fwdplan = lal.CreateForwardCOMPLEX16FFTPlan(htR.data.length, 0)
htC = lal.CreateCOMPLEX16TimeSeries('hoft', htR.epoch, htR.f0, htR.deltaT, htR.sampleUnits, htR.data.length)
htC.data.data[:htR.data.length] = htR.data.data
hf = lal.CreateCOMPLEX16FrequencySeries('Template h(f)', htR.epoch, htR.f0, ((1.0 / htR.deltaT) / htR.data.length), lalsimutils.lsu_HertzUnit, htR.data.length)
lal.COMPLEX16TimeFreqFFT(hf, htC, fwdplan)
return hf | def non_herm_hoff(self):
"\n Returns the 2-sided h(f) associated with the real-valued h(t) seen in a real instrument.\n Translates epoch as needed.\n Based on 'non_herm_hoff' in lalsimutils.py\n "
htR = self.real_hoft()
if (self.P.deltaF == None):
TDlen = nextPow2(htR.data.length)
htR = lal.ResizeREAL8TimeSeries(htR, 0, TDlen)
else:
TDlen = int((((1.0 / self.P.deltaF) * 1.0) / self.P.deltaT))
assert (TDlen == htR.data.length)
fwdplan = lal.CreateForwardCOMPLEX16FFTPlan(htR.data.length, 0)
htC = lal.CreateCOMPLEX16TimeSeries('hoft', htR.epoch, htR.f0, htR.deltaT, htR.sampleUnits, htR.data.length)
htC.data.data[:htR.data.length] = htR.data.data
hf = lal.CreateCOMPLEX16FrequencySeries('Template h(f)', htR.epoch, htR.f0, ((1.0 / htR.deltaT) / htR.data.length), lalsimutils.lsu_HertzUnit, htR.data.length)
lal.COMPLEX16TimeFreqFFT(hf, htC, fwdplan)
return hf<|docstring|>Returns the 2-sided h(f) associated with the real-valued h(t) seen in a real instrument.
Translates epoch as needed.
Based on 'non_herm_hoff' in lalsimutils.py<|endoftext|> |
cd6c2417073e202a278342a6f6d0cb37ffdb4b950257eaae9f84aed273a5900d | def estimateDurationSec(self):
'\n estimateDuration uses the ACTUAL UNITS IN THE WAVEFORM, which are already in sec\n '
return np.real((self.waveform_modes_complex[(2, 2)][((- 1), 0)] - self.waveform_modes_complex[(2, 2)][(0, 0)])) | estimateDuration uses the ACTUAL UNITS IN THE WAVEFORM, which are already in sec | MonteCarloMarginalizeCode/Code/RIFT/physics/EOBTidalExternal.py | estimateDurationSec | liz-champion/research-projects-RIT | 8 | python | def estimateDurationSec(self):
'\n \n '
return np.real((self.waveform_modes_complex[(2, 2)][((- 1), 0)] - self.waveform_modes_complex[(2, 2)][(0, 0)])) | def estimateDurationSec(self):
'\n \n '
return np.real((self.waveform_modes_complex[(2, 2)][((- 1), 0)] - self.waveform_modes_complex[(2, 2)][(0, 0)]))<|docstring|>estimateDuration uses the ACTUAL UNITS IN THE WAVEFORM, which are already in sec<|endoftext|> |
14e195ea0bc67a56012a0115340fe780292310fd25b5f2c8ce95234686c16c19 | def hlmoft(self, force_T=False, deltaT=(1.0 / 16384), time_over_M_zero=0.0, taper_start_time=True):
'\n hlmoft uses stored interpolated values for hlm(t) generated via the standard cleaning process, scaling them \n to physical units for use in injection code.\n\n If the time window is sufficiently short, the result is NOT tapered (!!) -- no additional tapering is applied\n\n The code will ALWAYS have zero padding on the end -- half of the buffer is zero padding!\n This can cause loss of frequency content if you are not careful\n '
hlmT = {}
m_total_s = ((MsunInSec * (self.P.m1 + self.P.m2)) / lal.MSUN_SI)
distance_s = (self.P.dist / lal.C_SI)
T_estimated = np.real((self.waveform_modes_complex[(2, 2)][((- 1), 0)] - self.waveform_modes_complex[(2, 2)][(0, 0)]))
npts = 0
n_crit = 0
if (not force_T):
npts_estimated = int((T_estimated / deltaT))
npts = lalsimutils.nextPow2(npts_estimated)
else:
npts = int((force_T / deltaT))
print(' Forcing length T=', force_T, ' length ', npts)
T_buffer_required = (npts * deltaT)
print(' EOB internal: Estimated time window (sec) ', T_estimated, ' versus buffer duration ', T_buffer_required)
print(' EOB internal: Requested size vs buffer size', npts, len(self.waveform_modes_complex[(2, 2)]))
if ((T_buffer_required / 2) > T_estimated):
tvals = ((np.arange(npts) * deltaT) + float(self.waveform_modes_complex[(2, 2)][(0, 0)]))
t_crit = float((- self.waveform_modes_complex[(2, 2)][(0, 0)]))
n_crit = int((t_crit / deltaT))
else:
print(' EOB internal: Warning LOSSY conversion to insure half of data is zeros ')
tvals = (((T_buffer_required / 2) + ((((- npts) + 1) + np.arange(npts)) * deltaT)) + np.real(self.waveform_modes_complex[(2, 2)][((- 1), 0)]))
t_crit = ((T_buffer_required / 2) - np.real(self.waveform_modes_complex[(2, 2)][((- 1), 0)]))
n_crit = int((t_crit / deltaT))
if rosDebug:
print(' time range being sampled ', [min(tvals), max(tvals)], ' corresponding to dimensionless range', [(min(tvals) / m_total_s), (max(tvals) / m_total_s)])
print(' estimated peak sample at ', n_crit)
for mode in self.waveform_modes.keys():
amp_vals = ((m_total_s / distance_s) * self.waveform_modes_complex_interpolated_amplitude[mode](tvals))
phase_vals = self.waveform_modes_complex_interpolated_phase[mode](tvals)
phase_vals = lalsimutils.unwind_phase(phase_vals)
if rosDebug:
print(' Mode ', mode, ' physical strain max, indx,', np.max(amp_vals), np.argmax(amp_vals))
wfmTS = lal.CreateCOMPLEX16TimeSeries('h', lal.LIGOTimeGPS(0.0), 0.0, deltaT, lalsimutils.lsu_DimensionlessUnit, npts)
wfmTS.data.data = (amp_vals * np.exp((1j * phase_vals)))
if taper_start_time:
tTaper = 1
nTaper = int((tTaper / deltaT))
hoft_window = lal.CreateTukeyREAL8Window((nTaper * 2), 0.8)
factorTaper = hoft_window.data.data[0:nTaper]
wfmTS.data.data[:nTaper] *= factorTaper
hlmT[mode] = wfmTS
epoch_crit = float((- t_crit))
print(' EOB internal: zero epoch sample location', n_crit, np.argmax(np.abs(hlmT[(2, 2)].data.data)))
for mode in hlmT:
hlmT[mode].epoch = epoch_crit
return hlmT | hlmoft uses stored interpolated values for hlm(t) generated via the standard cleaning process, scaling them
to physical units for use in injection code.
If the time window is sufficiently short, the result is NOT tapered (!!) -- no additional tapering is applied
The code will ALWAYS have zero padding on the end -- half of the buffer is zero padding!
This can cause loss of frequency content if you are not careful | MonteCarloMarginalizeCode/Code/RIFT/physics/EOBTidalExternal.py | hlmoft | liz-champion/research-projects-RIT | 8 | python | def hlmoft(self, force_T=False, deltaT=(1.0 / 16384), time_over_M_zero=0.0, taper_start_time=True):
'\n hlmoft uses stored interpolated values for hlm(t) generated via the standard cleaning process, scaling them \n to physical units for use in injection code.\n\n If the time window is sufficiently short, the result is NOT tapered (!!) -- no additional tapering is applied\n\n The code will ALWAYS have zero padding on the end -- half of the buffer is zero padding!\n This can cause loss of frequency content if you are not careful\n '
hlmT = {}
m_total_s = ((MsunInSec * (self.P.m1 + self.P.m2)) / lal.MSUN_SI)
distance_s = (self.P.dist / lal.C_SI)
T_estimated = np.real((self.waveform_modes_complex[(2, 2)][((- 1), 0)] - self.waveform_modes_complex[(2, 2)][(0, 0)]))
npts = 0
n_crit = 0
if (not force_T):
npts_estimated = int((T_estimated / deltaT))
npts = lalsimutils.nextPow2(npts_estimated)
else:
npts = int((force_T / deltaT))
print(' Forcing length T=', force_T, ' length ', npts)
T_buffer_required = (npts * deltaT)
print(' EOB internal: Estimated time window (sec) ', T_estimated, ' versus buffer duration ', T_buffer_required)
print(' EOB internal: Requested size vs buffer size', npts, len(self.waveform_modes_complex[(2, 2)]))
if ((T_buffer_required / 2) > T_estimated):
tvals = ((np.arange(npts) * deltaT) + float(self.waveform_modes_complex[(2, 2)][(0, 0)]))
t_crit = float((- self.waveform_modes_complex[(2, 2)][(0, 0)]))
n_crit = int((t_crit / deltaT))
else:
print(' EOB internal: Warning LOSSY conversion to insure half of data is zeros ')
tvals = (((T_buffer_required / 2) + ((((- npts) + 1) + np.arange(npts)) * deltaT)) + np.real(self.waveform_modes_complex[(2, 2)][((- 1), 0)]))
t_crit = ((T_buffer_required / 2) - np.real(self.waveform_modes_complex[(2, 2)][((- 1), 0)]))
n_crit = int((t_crit / deltaT))
if rosDebug:
print(' time range being sampled ', [min(tvals), max(tvals)], ' corresponding to dimensionless range', [(min(tvals) / m_total_s), (max(tvals) / m_total_s)])
print(' estimated peak sample at ', n_crit)
for mode in self.waveform_modes.keys():
amp_vals = ((m_total_s / distance_s) * self.waveform_modes_complex_interpolated_amplitude[mode](tvals))
phase_vals = self.waveform_modes_complex_interpolated_phase[mode](tvals)
phase_vals = lalsimutils.unwind_phase(phase_vals)
if rosDebug:
print(' Mode ', mode, ' physical strain max, indx,', np.max(amp_vals), np.argmax(amp_vals))
wfmTS = lal.CreateCOMPLEX16TimeSeries('h', lal.LIGOTimeGPS(0.0), 0.0, deltaT, lalsimutils.lsu_DimensionlessUnit, npts)
wfmTS.data.data = (amp_vals * np.exp((1j * phase_vals)))
if taper_start_time:
tTaper = 1
nTaper = int((tTaper / deltaT))
hoft_window = lal.CreateTukeyREAL8Window((nTaper * 2), 0.8)
factorTaper = hoft_window.data.data[0:nTaper]
wfmTS.data.data[:nTaper] *= factorTaper
hlmT[mode] = wfmTS
epoch_crit = float((- t_crit))
print(' EOB internal: zero epoch sample location', n_crit, np.argmax(np.abs(hlmT[(2, 2)].data.data)))
for mode in hlmT:
hlmT[mode].epoch = epoch_crit
return hlmT | def hlmoft(self, force_T=False, deltaT=(1.0 / 16384), time_over_M_zero=0.0, taper_start_time=True):
'\n hlmoft uses stored interpolated values for hlm(t) generated via the standard cleaning process, scaling them \n to physical units for use in injection code.\n\n If the time window is sufficiently short, the result is NOT tapered (!!) -- no additional tapering is applied\n\n The code will ALWAYS have zero padding on the end -- half of the buffer is zero padding!\n This can cause loss of frequency content if you are not careful\n '
hlmT = {}
m_total_s = ((MsunInSec * (self.P.m1 + self.P.m2)) / lal.MSUN_SI)
distance_s = (self.P.dist / lal.C_SI)
T_estimated = np.real((self.waveform_modes_complex[(2, 2)][((- 1), 0)] - self.waveform_modes_complex[(2, 2)][(0, 0)]))
npts = 0
n_crit = 0
if (not force_T):
npts_estimated = int((T_estimated / deltaT))
npts = lalsimutils.nextPow2(npts_estimated)
else:
npts = int((force_T / deltaT))
print(' Forcing length T=', force_T, ' length ', npts)
T_buffer_required = (npts * deltaT)
print(' EOB internal: Estimated time window (sec) ', T_estimated, ' versus buffer duration ', T_buffer_required)
print(' EOB internal: Requested size vs buffer size', npts, len(self.waveform_modes_complex[(2, 2)]))
if ((T_buffer_required / 2) > T_estimated):
tvals = ((np.arange(npts) * deltaT) + float(self.waveform_modes_complex[(2, 2)][(0, 0)]))
t_crit = float((- self.waveform_modes_complex[(2, 2)][(0, 0)]))
n_crit = int((t_crit / deltaT))
else:
print(' EOB internal: Warning LOSSY conversion to insure half of data is zeros ')
tvals = (((T_buffer_required / 2) + ((((- npts) + 1) + np.arange(npts)) * deltaT)) + np.real(self.waveform_modes_complex[(2, 2)][((- 1), 0)]))
t_crit = ((T_buffer_required / 2) - np.real(self.waveform_modes_complex[(2, 2)][((- 1), 0)]))
n_crit = int((t_crit / deltaT))
if rosDebug:
print(' time range being sampled ', [min(tvals), max(tvals)], ' corresponding to dimensionless range', [(min(tvals) / m_total_s), (max(tvals) / m_total_s)])
print(' estimated peak sample at ', n_crit)
for mode in self.waveform_modes.keys():
amp_vals = ((m_total_s / distance_s) * self.waveform_modes_complex_interpolated_amplitude[mode](tvals))
phase_vals = self.waveform_modes_complex_interpolated_phase[mode](tvals)
phase_vals = lalsimutils.unwind_phase(phase_vals)
if rosDebug:
print(' Mode ', mode, ' physical strain max, indx,', np.max(amp_vals), np.argmax(amp_vals))
wfmTS = lal.CreateCOMPLEX16TimeSeries('h', lal.LIGOTimeGPS(0.0), 0.0, deltaT, lalsimutils.lsu_DimensionlessUnit, npts)
wfmTS.data.data = (amp_vals * np.exp((1j * phase_vals)))
if taper_start_time:
tTaper = 1
nTaper = int((tTaper / deltaT))
hoft_window = lal.CreateTukeyREAL8Window((nTaper * 2), 0.8)
factorTaper = hoft_window.data.data[0:nTaper]
wfmTS.data.data[:nTaper] *= factorTaper
hlmT[mode] = wfmTS
epoch_crit = float((- t_crit))
print(' EOB internal: zero epoch sample location', n_crit, np.argmax(np.abs(hlmT[(2, 2)].data.data)))
for mode in hlmT:
hlmT[mode].epoch = epoch_crit
return hlmT<|docstring|>hlmoft uses stored interpolated values for hlm(t) generated via the standard cleaning process, scaling them
to physical units for use in injection code.
If the time window is sufficiently short, the result is NOT tapered (!!) -- no additional tapering is applied
The code will ALWAYS have zero padding on the end -- half of the buffer is zero padding!
This can cause loss of frequency content if you are not careful<|endoftext|> |
1cf9cf8b1efb1295b21c5b251670c359323e2a785fabdd631c6def362ffc0931 | def hlmoff(self, force_T=False, deltaT=(1.0 / 16384), time_over_M_zero=0.0):
'\n hlmoff takes fourier transforms of LAL timeseries generated from hlmoft.\n All modes have physical units, appropriate to a physical signal.\n '
hlmF = {}
hlmT = self.hlmoft(force_T=force_T, deltaT=deltaT, time_over_M_zero=time_over_M_zero)
for mode in self.waveform_modes.keys():
wfmTS = hlmT[mode]
wfmFD = lalsimutils.DataFourier(wfmTS)
hlmF[mode] = wfmFD
return hlmF | hlmoff takes fourier transforms of LAL timeseries generated from hlmoft.
All modes have physical units, appropriate to a physical signal. | MonteCarloMarginalizeCode/Code/RIFT/physics/EOBTidalExternal.py | hlmoff | liz-champion/research-projects-RIT | 8 | python | def hlmoff(self, force_T=False, deltaT=(1.0 / 16384), time_over_M_zero=0.0):
'\n hlmoff takes fourier transforms of LAL timeseries generated from hlmoft.\n All modes have physical units, appropriate to a physical signal.\n '
hlmF = {}
hlmT = self.hlmoft(force_T=force_T, deltaT=deltaT, time_over_M_zero=time_over_M_zero)
for mode in self.waveform_modes.keys():
wfmTS = hlmT[mode]
wfmFD = lalsimutils.DataFourier(wfmTS)
hlmF[mode] = wfmFD
return hlmF | def hlmoff(self, force_T=False, deltaT=(1.0 / 16384), time_over_M_zero=0.0):
'\n hlmoff takes fourier transforms of LAL timeseries generated from hlmoft.\n All modes have physical units, appropriate to a physical signal.\n '
hlmF = {}
hlmT = self.hlmoft(force_T=force_T, deltaT=deltaT, time_over_M_zero=time_over_M_zero)
for mode in self.waveform_modes.keys():
wfmTS = hlmT[mode]
wfmFD = lalsimutils.DataFourier(wfmTS)
hlmF[mode] = wfmFD
return hlmF<|docstring|>hlmoff takes fourier transforms of LAL timeseries generated from hlmoft.
All modes have physical units, appropriate to a physical signal.<|endoftext|> |
eb936a045f1cb696f7d74ca502edd079ba84d38d1bbeee1a92b57f56718c5cc3 | def __string__(self):
'\n\n :return: A description of the instance\n :rtype string:\n '
return f'{self.operation()} {self.value}' | :return: A description of the instance
:rtype string: | exercises/CI/linting/resources/src/operations.py | __string__ | lvl-up/cic | 1 | python | def __string__(self):
'\n\n :return: A description of the instance\n :rtype string:\n '
return f'{self.operation()} {self.value}' | def __string__(self):
'\n\n :return: A description of the instance\n :rtype string:\n '
return f'{self.operation()} {self.value}'<|docstring|>:return: A description of the instance
:rtype string:<|endoftext|> |
e17f87bf47219bbf0f71858e1908be21ac517e386562d7f314456a88351c8306 | @abstractmethod
def apply(self, running_total):
'\n Interface method for applying an operation to a given valuee\n ' | Interface method for applying an operation to a given valuee | exercises/CI/linting/resources/src/operations.py | apply | lvl-up/cic | 1 | python | @abstractmethod
def apply(self, running_total):
'\n \n ' | @abstractmethod
def apply(self, running_total):
'\n \n '<|docstring|>Interface method for applying an operation to a given valuee<|endoftext|> |
5150a8ebc6694ff8bffe4bdc7f07400e2fdd7d9c2d0bf93fbd8792b974c91964 | @abstractmethod
def operation(self):
'\n Interface method to declare the operation\n ' | Interface method to declare the operation | exercises/CI/linting/resources/src/operations.py | operation | lvl-up/cic | 1 | python | @abstractmethod
def operation(self):
'\n \n ' | @abstractmethod
def operation(self):
'\n \n '<|docstring|>Interface method to declare the operation<|endoftext|> |
d780a7929e592880c7535dddcf0cc21b751fd8f59f1655c15e199af508365912 | def apply(self, running_total):
'\n Apply this add operation to the given value\n\n :param running_total: The value to which the operation should be applied\n :type running_total: int\n :return: the result having applied the operation\n :rtype :int\n '
return (running_total + self.value) | Apply this add operation to the given value
:param running_total: The value to which the operation should be applied
:type running_total: int
:return: the result having applied the operation
:rtype :int | exercises/CI/linting/resources/src/operations.py | apply | lvl-up/cic | 1 | python | def apply(self, running_total):
'\n Apply this add operation to the given value\n\n :param running_total: The value to which the operation should be applied\n :type running_total: int\n :return: the result having applied the operation\n :rtype :int\n '
return (running_total + self.value) | def apply(self, running_total):
'\n Apply this add operation to the given value\n\n :param running_total: The value to which the operation should be applied\n :type running_total: int\n :return: the result having applied the operation\n :rtype :int\n '
return (running_total + self.value)<|docstring|>Apply this add operation to the given value
:param running_total: The value to which the operation should be applied
:type running_total: int
:return: the result having applied the operation
:rtype :int<|endoftext|> |
fdbb70f53372d435d35cc404d52ca808cd756b629a4c57135f5a7befeeaaadce | def operation(self):
'\n Operation applied by this class of operation\n\n :return: the operation\n :rtype: string\n '
return '+' | Operation applied by this class of operation
:return: the operation
:rtype: string | exercises/CI/linting/resources/src/operations.py | operation | lvl-up/cic | 1 | python | def operation(self):
'\n Operation applied by this class of operation\n\n :return: the operation\n :rtype: string\n '
return '+' | def operation(self):
'\n Operation applied by this class of operation\n\n :return: the operation\n :rtype: string\n '
return '+'<|docstring|>Operation applied by this class of operation
:return: the operation
:rtype: string<|endoftext|> |
ed72fac0b34a914f133827c9210d61c1253373bd219d11ede02406d399a475d3 | def set_box_color(bp, color):
'Set colour for boxplot.'
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color) | Set colour for boxplot. | numerical_experiments/quad_num_exp_SNR_plots.py | set_box_color | Megscammell/Estimating-Direction | 0 | python | def set_box_color(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color) | def set_box_color(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color)<|docstring|>Set colour for boxplot.<|endoftext|> |
fe7618b3c7718c8ce4d64c2ca9371cb68b23a19862ff1049497867dfa3e9f2db | def create_boxplots_ratio_2(arr1, arr2, labels, m, n, lambda_max, title, ticks, no_vars, range_1, range_2, region, function_type, func_evals):
'Create boxplots.'
plt.figure(figsize=(5, 5))
plt.ylim(range_1, range_2)
bpl = plt.boxplot(arr1.T, positions=((np.array(range(len(arr1))) * 2.0) - 0.4))
bpr = plt.boxplot(arr2.T, positions=((np.array(range(len(arr2))) * 2.0) + 0.4))
set_box_color(bpl, 'navy')
set_box_color(bpr, 'purple')
plt.plot([], c='navy', label=labels[0])
plt.plot([], c='purple', label=labels[1])
plt.xlabel('SNR', size=14)
plt.xticks(np.arange(0, (len(ticks) * 2), 2), ticks, size=15)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.savefig(('%s_ratio_m_%s_n_%s_lambda_max_%s_%s_%s_%s_%s_%s.png' % (title, m, n, lambda_max, no_vars, no_vars, region, function_type, func_evals))) | Create boxplots. | numerical_experiments/quad_num_exp_SNR_plots.py | create_boxplots_ratio_2 | Megscammell/Estimating-Direction | 0 | python | def create_boxplots_ratio_2(arr1, arr2, labels, m, n, lambda_max, title, ticks, no_vars, range_1, range_2, region, function_type, func_evals):
plt.figure(figsize=(5, 5))
plt.ylim(range_1, range_2)
bpl = plt.boxplot(arr1.T, positions=((np.array(range(len(arr1))) * 2.0) - 0.4))
bpr = plt.boxplot(arr2.T, positions=((np.array(range(len(arr2))) * 2.0) + 0.4))
set_box_color(bpl, 'navy')
set_box_color(bpr, 'purple')
plt.plot([], c='navy', label=labels[0])
plt.plot([], c='purple', label=labels[1])
plt.xlabel('SNR', size=14)
plt.xticks(np.arange(0, (len(ticks) * 2), 2), ticks, size=15)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.savefig(('%s_ratio_m_%s_n_%s_lambda_max_%s_%s_%s_%s_%s_%s.png' % (title, m, n, lambda_max, no_vars, no_vars, region, function_type, func_evals))) | def create_boxplots_ratio_2(arr1, arr2, labels, m, n, lambda_max, title, ticks, no_vars, range_1, range_2, region, function_type, func_evals):
plt.figure(figsize=(5, 5))
plt.ylim(range_1, range_2)
bpl = plt.boxplot(arr1.T, positions=((np.array(range(len(arr1))) * 2.0) - 0.4))
bpr = plt.boxplot(arr2.T, positions=((np.array(range(len(arr2))) * 2.0) + 0.4))
set_box_color(bpl, 'navy')
set_box_color(bpr, 'purple')
plt.plot([], c='navy', label=labels[0])
plt.plot([], c='purple', label=labels[1])
plt.xlabel('SNR', size=14)
plt.xticks(np.arange(0, (len(ticks) * 2), 2), ticks, size=15)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.savefig(('%s_ratio_m_%s_n_%s_lambda_max_%s_%s_%s_%s_%s_%s.png' % (title, m, n, lambda_max, no_vars, no_vars, region, function_type, func_evals)))<|docstring|>Create boxplots.<|endoftext|> |
5fff2df0ff4d933e20459e4b2d2458ef9525f98b6ec52c43659f08ae96a5e094 | def create_boxplots_ratio_3(arr1, arr2, arr3, labels, m, n, lambda_max, title, ticks, no_vars, range_1, range_2, region, function_type, func_evals):
'Create boxplots.'
plt.figure(figsize=(5, 5))
plt.ylim(range_1, range_2)
bpl = plt.boxplot(arr1.T, positions=((np.array(range(len(arr1))) * 3.0) - 0.6))
bpc = plt.boxplot(arr2.T, positions=(np.array(range(len(arr1))) * 3.0))
bpr = plt.boxplot(arr3.T, positions=((np.array(range(len(arr2))) * 3.0) + 0.6))
set_box_color(bpl, 'green')
set_box_color(bpc, 'navy')
set_box_color(bpr, 'purple')
plt.plot([], c='green', label=labels[0])
plt.plot([], c='navy', label=labels[1])
plt.plot([], c='purple', label=labels[2])
plt.xlabel('SNR', size=14)
plt.xticks(np.arange(0, (len(ticks) * 3), 3), ticks, size=15)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.savefig(('%s_ratio_m_%s_n_%s_lambda_max_%s_%s_%s_%s_%s.png' % (title, m, n, lambda_max, no_vars, region, function_type, func_evals))) | Create boxplots. | numerical_experiments/quad_num_exp_SNR_plots.py | create_boxplots_ratio_3 | Megscammell/Estimating-Direction | 0 | python | def create_boxplots_ratio_3(arr1, arr2, arr3, labels, m, n, lambda_max, title, ticks, no_vars, range_1, range_2, region, function_type, func_evals):
plt.figure(figsize=(5, 5))
plt.ylim(range_1, range_2)
bpl = plt.boxplot(arr1.T, positions=((np.array(range(len(arr1))) * 3.0) - 0.6))
bpc = plt.boxplot(arr2.T, positions=(np.array(range(len(arr1))) * 3.0))
bpr = plt.boxplot(arr3.T, positions=((np.array(range(len(arr2))) * 3.0) + 0.6))
set_box_color(bpl, 'green')
set_box_color(bpc, 'navy')
set_box_color(bpr, 'purple')
plt.plot([], c='green', label=labels[0])
plt.plot([], c='navy', label=labels[1])
plt.plot([], c='purple', label=labels[2])
plt.xlabel('SNR', size=14)
plt.xticks(np.arange(0, (len(ticks) * 3), 3), ticks, size=15)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.savefig(('%s_ratio_m_%s_n_%s_lambda_max_%s_%s_%s_%s_%s.png' % (title, m, n, lambda_max, no_vars, region, function_type, func_evals))) | def create_boxplots_ratio_3(arr1, arr2, arr3, labels, m, n, lambda_max, title, ticks, no_vars, range_1, range_2, region, function_type, func_evals):
plt.figure(figsize=(5, 5))
plt.ylim(range_1, range_2)
bpl = plt.boxplot(arr1.T, positions=((np.array(range(len(arr1))) * 3.0) - 0.6))
bpc = plt.boxplot(arr2.T, positions=(np.array(range(len(arr1))) * 3.0))
bpr = plt.boxplot(arr3.T, positions=((np.array(range(len(arr2))) * 3.0) + 0.6))
set_box_color(bpl, 'green')
set_box_color(bpc, 'navy')
set_box_color(bpr, 'purple')
plt.plot([], c='green', label=labels[0])
plt.plot([], c='navy', label=labels[1])
plt.plot([], c='purple', label=labels[2])
plt.xlabel('SNR', size=14)
plt.xticks(np.arange(0, (len(ticks) * 3), 3), ticks, size=15)
plt.yticks(fontsize=14)
plt.tight_layout()
plt.savefig(('%s_ratio_m_%s_n_%s_lambda_max_%s_%s_%s_%s_%s.png' % (title, m, n, lambda_max, no_vars, region, function_type, func_evals)))<|docstring|>Create boxplots.<|endoftext|> |
717cb07275609e904c8f35bd2b2c4e485aa715d740208ec3e47285c70c741542 | def create_scatter_plot(arr1, arr2, arr1_title, arr2_title, labels_legend, title, m, n, lambda_max, no_vars, max_num, region, function_type, func_evals):
'Create scatter plots.'
plt.figure(figsize=(7, 5))
plt.ylim((- 0.1), max_num)
plt.xlim((- 0.1), max_num)
plt.xlabel(arr1_title, size=14)
plt.ylabel(arr2_title, size=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
color_list = [sns.xkcd_rgb['pale red'], sns.xkcd_rgb['medium blue'], sns.xkcd_rgb['medium purple'], sns.xkcd_rgb['medium green'], sns.xkcd_rgb['pale orange'], sns.xkcd_rgb['pale pink']]
for j in range(arr1.shape[0]):
plt.scatter(arr1[j], arr2[j], marker='*', color=color_list[j])
plt.plot([], c=color_list[j], label=labels_legend[j])
plt.legend(bbox_to_anchor=(0.99, 1.025), loc='upper left', prop={'size': 14})
plt.plot([0, max_num], [0, max_num], color='black')
plt.tight_layout()
plt.savefig(('%s_scatter_m_%s_n_%s_lambda_max_%s_%s_%s_%s_%s.png' % (title, m, n, lambda_max, no_vars, region, function_type, func_evals))) | Create scatter plots. | numerical_experiments/quad_num_exp_SNR_plots.py | create_scatter_plot | Megscammell/Estimating-Direction | 0 | python | def create_scatter_plot(arr1, arr2, arr1_title, arr2_title, labels_legend, title, m, n, lambda_max, no_vars, max_num, region, function_type, func_evals):
plt.figure(figsize=(7, 5))
plt.ylim((- 0.1), max_num)
plt.xlim((- 0.1), max_num)
plt.xlabel(arr1_title, size=14)
plt.ylabel(arr2_title, size=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
color_list = [sns.xkcd_rgb['pale red'], sns.xkcd_rgb['medium blue'], sns.xkcd_rgb['medium purple'], sns.xkcd_rgb['medium green'], sns.xkcd_rgb['pale orange'], sns.xkcd_rgb['pale pink']]
for j in range(arr1.shape[0]):
plt.scatter(arr1[j], arr2[j], marker='*', color=color_list[j])
plt.plot([], c=color_list[j], label=labels_legend[j])
plt.legend(bbox_to_anchor=(0.99, 1.025), loc='upper left', prop={'size': 14})
plt.plot([0, max_num], [0, max_num], color='black')
plt.tight_layout()
plt.savefig(('%s_scatter_m_%s_n_%s_lambda_max_%s_%s_%s_%s_%s.png' % (title, m, n, lambda_max, no_vars, region, function_type, func_evals))) | def create_scatter_plot(arr1, arr2, arr1_title, arr2_title, labels_legend, title, m, n, lambda_max, no_vars, max_num, region, function_type, func_evals):
plt.figure(figsize=(7, 5))
plt.ylim((- 0.1), max_num)
plt.xlim((- 0.1), max_num)
plt.xlabel(arr1_title, size=14)
plt.ylabel(arr2_title, size=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
color_list = [sns.xkcd_rgb['pale red'], sns.xkcd_rgb['medium blue'], sns.xkcd_rgb['medium purple'], sns.xkcd_rgb['medium green'], sns.xkcd_rgb['pale orange'], sns.xkcd_rgb['pale pink']]
for j in range(arr1.shape[0]):
plt.scatter(arr1[j], arr2[j], marker='*', color=color_list[j])
plt.plot([], c=color_list[j], label=labels_legend[j])
plt.legend(bbox_to_anchor=(0.99, 1.025), loc='upper left', prop={'size': 14})
plt.plot([0, max_num], [0, max_num], color='black')
plt.tight_layout()
plt.savefig(('%s_scatter_m_%s_n_%s_lambda_max_%s_%s_%s_%s_%s.png' % (title, m, n, lambda_max, no_vars, region, function_type, func_evals)))<|docstring|>Create scatter plots.<|endoftext|> |
c9a9be1661fc099930f5edccfcece46a55b6dab34788f87ccf5564cbfbb6138b | def row_sum_odd_numbers(n):
'\n Given the triangle of consecutive odd numbers\n '
result = 0
start = ((n * n) - (n - 1))
end = (start + (n * 2))
for i in range(start, end, 2):
result += i
return result | Given the triangle of consecutive odd numbers | python/7kyu/sum_of_odd_numbers.py | row_sum_odd_numbers | momchilantonov/codewars | 0 | python | def row_sum_odd_numbers(n):
'\n \n '
result = 0
start = ((n * n) - (n - 1))
end = (start + (n * 2))
for i in range(start, end, 2):
result += i
return result | def row_sum_odd_numbers(n):
'\n \n '
result = 0
start = ((n * n) - (n - 1))
end = (start + (n * 2))
for i in range(start, end, 2):
result += i
return result<|docstring|>Given the triangle of consecutive odd numbers<|endoftext|> |
dad452e83ccdc3148b6aaa61c1b105af1e1d45b3dd3692520ed5c8c8786bd89d | def __init__(self, featured_entry):
'Initialize the commit operation.'
super().__init__()
self._featured_entry = featured_entry | Initialize the commit operation. | invenio_communities/communities/services/uow.py | __init__ | mb-wali/invenio-communities | 0 | python | def __init__(self, featured_entry):
super().__init__()
self._featured_entry = featured_entry | def __init__(self, featured_entry):
super().__init__()
self._featured_entry = featured_entry<|docstring|>Initialize the commit operation.<|endoftext|> |
178175ddd61830ce1b01ae069c548d9665332352c2780028ea48fe16b2e7b268 | def on_register(self, uow):
'Add to db session.'
db.session.add(self._featured_entry) | Add to db session. | invenio_communities/communities/services/uow.py | on_register | mb-wali/invenio-communities | 0 | python | def on_register(self, uow):
db.session.add(self._featured_entry) | def on_register(self, uow):
db.session.add(self._featured_entry)<|docstring|>Add to db session.<|endoftext|> |
cf0dee866351938fe5c700762810b538b31ee372e3f8efbca5daed8eebed0be7 | def __init__(self, featured_entry):
'Initialize the delete operation.'
super().__init__()
self._featured_entry = featured_entry | Initialize the delete operation. | invenio_communities/communities/services/uow.py | __init__ | mb-wali/invenio-communities | 0 | python | def __init__(self, featured_entry):
super().__init__()
self._featured_entry = featured_entry | def __init__(self, featured_entry):
super().__init__()
self._featured_entry = featured_entry<|docstring|>Initialize the delete operation.<|endoftext|> |
994483827941aad9056c697066b78ab7601ba62a7cba8f69ff9749f534f5291f | def on_register(self, uow):
'Delete entry.'
db.session.delete(self._featured_entry) | Delete entry. | invenio_communities/communities/services/uow.py | on_register | mb-wali/invenio-communities | 0 | python | def on_register(self, uow):
db.session.delete(self._featured_entry) | def on_register(self, uow):
db.session.delete(self._featured_entry)<|docstring|>Delete entry.<|endoftext|> |
f5111ea26b00f458e387c98b855692cd7ce46364cbab92b852a4adf869f761e3 | def mesh2tri(nodes, elements):
'Generate a matplotlib.tri.Triangulation object from the mesh\n \n Parameters\n ----------\n nodes : ndarray (float)\n Array with number and nodes coordinates:\n `number coordX coordY BCX BCY`\n elements : ndarray (int)\n Array with the node number for the nodes that correspond to each\n element.\n \n Returns\n -------\n tri : Triangulation\n An unstructured triangular grid consisting of npoints points\n and ntri triangles.\n \n '
x = nodes[(:, 1)]
y = nodes[(:, 2)]
triangs = []
for el in elements:
if (el[1] == 1):
triangs.append(el[[3, 4, 5]])
triangs.append(el[[5, 6, 3]])
if (el[1] == 2):
triangs.append(el[[3, 6, 8]])
triangs.append(el[[6, 7, 8]])
triangs.append(el[[6, 4, 7]])
triangs.append(el[[7, 5, 8]])
if (el[1] == 3):
triangs.append(el[3:])
tri = Triangulation(x, y, np.array(triangs))
return tri | Generate a matplotlib.tri.Triangulation object from the mesh
Parameters
----------
nodes : ndarray (float)
Array with number and nodes coordinates:
`number coordX coordY BCX BCY`
elements : ndarray (int)
Array with the node number for the nodes that correspond to each
element.
Returns
-------
tri : Triangulation
An unstructured triangular grid consisting of npoints points
and ntri triangles. | solidspydyn/postprocesor.py | mesh2tri | jgomezc1/SOLIDSPy_DYN | 1 | python | def mesh2tri(nodes, elements):
'Generate a matplotlib.tri.Triangulation object from the mesh\n \n Parameters\n ----------\n nodes : ndarray (float)\n Array with number and nodes coordinates:\n `number coordX coordY BCX BCY`\n elements : ndarray (int)\n Array with the node number for the nodes that correspond to each\n element.\n \n Returns\n -------\n tri : Triangulation\n An unstructured triangular grid consisting of npoints points\n and ntri triangles.\n \n '
x = nodes[(:, 1)]
y = nodes[(:, 2)]
triangs = []
for el in elements:
if (el[1] == 1):
triangs.append(el[[3, 4, 5]])
triangs.append(el[[5, 6, 3]])
if (el[1] == 2):
triangs.append(el[[3, 6, 8]])
triangs.append(el[[6, 7, 8]])
triangs.append(el[[6, 4, 7]])
triangs.append(el[[7, 5, 8]])
if (el[1] == 3):
triangs.append(el[3:])
tri = Triangulation(x, y, np.array(triangs))
return tri | def mesh2tri(nodes, elements):
'Generate a matplotlib.tri.Triangulation object from the mesh\n \n Parameters\n ----------\n nodes : ndarray (float)\n Array with number and nodes coordinates:\n `number coordX coordY BCX BCY`\n elements : ndarray (int)\n Array with the node number for the nodes that correspond to each\n element.\n \n Returns\n -------\n tri : Triangulation\n An unstructured triangular grid consisting of npoints points\n and ntri triangles.\n \n '
x = nodes[(:, 1)]
y = nodes[(:, 2)]
triangs = []
for el in elements:
if (el[1] == 1):
triangs.append(el[[3, 4, 5]])
triangs.append(el[[5, 6, 3]])
if (el[1] == 2):
triangs.append(el[[3, 6, 8]])
triangs.append(el[[6, 7, 8]])
triangs.append(el[[6, 4, 7]])
triangs.append(el[[7, 5, 8]])
if (el[1] == 3):
triangs.append(el[3:])
tri = Triangulation(x, y, np.array(triangs))
return tri<|docstring|>Generate a matplotlib.tri.Triangulation object from the mesh
Parameters
----------
nodes : ndarray (float)
Array with number and nodes coordinates:
`number coordX coordY BCX BCY`
elements : ndarray (int)
Array with the node number for the nodes that correspond to each
element.
Returns
-------
tri : Triangulation
An unstructured triangular grid consisting of npoints points
and ntri triangles.<|endoftext|> |
632a13e5d0aefaf51f3f87198ae9b79abab537f74b4d976a1b862ace2a2ca963 | def plot_disp(UC, nodes, elements, plt_type='contourf', levels=12, savefigs=False, title='Solution:'):
'Plot the nodal displacement using a triangulation\n\n Parameters\n ----------\n UC : ndarray (float)\n Array with the displacements.\n nodes : ndarray (float)\n Array with number and nodes coordinates:\n `number coordX coordY BCX BCY`\n elements : ndarray (int)\n Array with the node number for the nodes that correspond to each\n element.\n\n '
tri = mesh2tri(nodes, elements)
tri_plot(tri, UC[(:, 0)], title='$u_x$', figtitle=(title + 'Horizontal displacement'), levels=levels, plt_type=plt_type, savefigs=savefigs, filename='ux_sol.pdf')
tri_plot(tri, UC[(:, 1)], title='$u_y$', figtitle=(title + 'Vertical displacement'), levels=levels, plt_type=plt_type, savefigs=savefigs, filename='uy_sol.pdf') | Plot the nodal displacement using a triangulation
Parameters
----------
UC : ndarray (float)
Array with the displacements.
nodes : ndarray (float)
Array with number and nodes coordinates:
`number coordX coordY BCX BCY`
elements : ndarray (int)
Array with the node number for the nodes that correspond to each
element. | solidspydyn/postprocesor.py | plot_disp | jgomezc1/SOLIDSPy_DYN | 1 | python | def plot_disp(UC, nodes, elements, plt_type='contourf', levels=12, savefigs=False, title='Solution:'):
'Plot the nodal displacement using a triangulation\n\n Parameters\n ----------\n UC : ndarray (float)\n Array with the displacements.\n nodes : ndarray (float)\n Array with number and nodes coordinates:\n `number coordX coordY BCX BCY`\n elements : ndarray (int)\n Array with the node number for the nodes that correspond to each\n element.\n\n '
tri = mesh2tri(nodes, elements)
tri_plot(tri, UC[(:, 0)], title='$u_x$', figtitle=(title + 'Horizontal displacement'), levels=levels, plt_type=plt_type, savefigs=savefigs, filename='ux_sol.pdf')
tri_plot(tri, UC[(:, 1)], title='$u_y$', figtitle=(title + 'Vertical displacement'), levels=levels, plt_type=plt_type, savefigs=savefigs, filename='uy_sol.pdf') | def plot_disp(UC, nodes, elements, plt_type='contourf', levels=12, savefigs=False, title='Solution:'):
'Plot the nodal displacement using a triangulation\n\n Parameters\n ----------\n UC : ndarray (float)\n Array with the displacements.\n nodes : ndarray (float)\n Array with number and nodes coordinates:\n `number coordX coordY BCX BCY`\n elements : ndarray (int)\n Array with the node number for the nodes that correspond to each\n element.\n\n '
tri = mesh2tri(nodes, elements)
tri_plot(tri, UC[(:, 0)], title='$u_x$', figtitle=(title + 'Horizontal displacement'), levels=levels, plt_type=plt_type, savefigs=savefigs, filename='ux_sol.pdf')
tri_plot(tri, UC[(:, 1)], title='$u_y$', figtitle=(title + 'Vertical displacement'), levels=levels, plt_type=plt_type, savefigs=savefigs, filename='uy_sol.pdf')<|docstring|>Plot the nodal displacement using a triangulation
Parameters
----------
UC : ndarray (float)
Array with the displacements.
nodes : ndarray (float)
Array with number and nodes coordinates:
`number coordX coordY BCX BCY`
elements : ndarray (int)
Array with the node number for the nodes that correspond to each
element.<|endoftext|> |
f11b8df1587148d962ee9d492f6d1510696f197574099a528583960ff2e3ffd3 | def grafmat(k):
'Plot stiffness matrix sparsity\n\n Parameters\n ----------\n k : ndarray (int)\n Stiffness matrix of the system.\n\n '
plt.figure('Stiffness matrix')
plt.spy(k)
plt.title('Stiffness matrix')
plt.ylabel('$i$ index', size=10)
plt.xlabel('$j$ index', size=10) | Plot stiffness matrix sparsity
Parameters
----------
k : ndarray (int)
Stiffness matrix of the system. | solidspydyn/postprocesor.py | grafmat | jgomezc1/SOLIDSPy_DYN | 1 | python | def grafmat(k):
'Plot stiffness matrix sparsity\n\n Parameters\n ----------\n k : ndarray (int)\n Stiffness matrix of the system.\n\n '
plt.figure('Stiffness matrix')
plt.spy(k)
plt.title('Stiffness matrix')
plt.ylabel('$i$ index', size=10)
plt.xlabel('$j$ index', size=10) | def grafmat(k):
'Plot stiffness matrix sparsity\n\n Parameters\n ----------\n k : ndarray (int)\n Stiffness matrix of the system.\n\n '
plt.figure('Stiffness matrix')
plt.spy(k)
plt.title('Stiffness matrix')
plt.ylabel('$i$ index', size=10)
plt.xlabel('$j$ index', size=10)<|docstring|>Plot stiffness matrix sparsity
Parameters
----------
k : ndarray (int)
Stiffness matrix of the system.<|endoftext|> |
6e95c4766c9394111a3002212d2c6084a18599b174b5a181c09b12a227db8cfd | def complete_disp(IBC, nodes, UG):
'\n Fill the displacement vectors with imposed and computed values.\n \n IBC : ndarray (int)\n IBC (Indicator of Boundary Conditions) indicates if the nodes\n has any type of boundary conditions applied to it.\n UG : ndarray (float)\n Array with the computed displacements.\n nodes : ndarray (float)\n Array with number and nodes coordinates:\n \n Returns\n -------\n UC : ndarray (float)\n Array with the displacements.\n\n '
nn = nodes.shape[0]
UC = np.zeros([nn, 2], dtype=np.float)
for i in range(nn):
for j in range(2):
kk = IBC[(i, j)]
if (kk == (- 1)):
UC[(i, j)] = 0.0
else:
UC[(i, j)] = UG[kk]
return UC | Fill the displacement vectors with imposed and computed values.
IBC : ndarray (int)
IBC (Indicator of Boundary Conditions) indicates if the nodes
has any type of boundary conditions applied to it.
UG : ndarray (float)
Array with the computed displacements.
nodes : ndarray (float)
Array with number and nodes coordinates:
Returns
-------
UC : ndarray (float)
Array with the displacements. | solidspydyn/postprocesor.py | complete_disp | jgomezc1/SOLIDSPy_DYN | 1 | python | def complete_disp(IBC, nodes, UG):
'\n Fill the displacement vectors with imposed and computed values.\n \n IBC : ndarray (int)\n IBC (Indicator of Boundary Conditions) indicates if the nodes\n has any type of boundary conditions applied to it.\n UG : ndarray (float)\n Array with the computed displacements.\n nodes : ndarray (float)\n Array with number and nodes coordinates:\n \n Returns\n -------\n UC : ndarray (float)\n Array with the displacements.\n\n '
nn = nodes.shape[0]
UC = np.zeros([nn, 2], dtype=np.float)
for i in range(nn):
for j in range(2):
kk = IBC[(i, j)]
if (kk == (- 1)):
UC[(i, j)] = 0.0
else:
UC[(i, j)] = UG[kk]
return UC | def complete_disp(IBC, nodes, UG):
'\n Fill the displacement vectors with imposed and computed values.\n \n IBC : ndarray (int)\n IBC (Indicator of Boundary Conditions) indicates if the nodes\n has any type of boundary conditions applied to it.\n UG : ndarray (float)\n Array with the computed displacements.\n nodes : ndarray (float)\n Array with number and nodes coordinates:\n \n Returns\n -------\n UC : ndarray (float)\n Array with the displacements.\n\n '
nn = nodes.shape[0]
UC = np.zeros([nn, 2], dtype=np.float)
for i in range(nn):
for j in range(2):
kk = IBC[(i, j)]
if (kk == (- 1)):
UC[(i, j)] = 0.0
else:
UC[(i, j)] = UG[kk]
return UC<|docstring|>Fill the displacement vectors with imposed and computed values.
IBC : ndarray (int)
IBC (Indicator of Boundary Conditions) indicates if the nodes
has any type of boundary conditions applied to it.
UG : ndarray (float)
Array with the computed displacements.
nodes : ndarray (float)
Array with number and nodes coordinates:
Returns
-------
UC : ndarray (float)
Array with the displacements.<|endoftext|> |
fa4b1e678fd3030834a957cec83d269a023e63bbb2ee254b6e6ea45b9987f610 | def eigvals(A, tol=1e-06):
'Eigenvalues and eigenvectors for a 2x2 symmetric matrix/tensor\n \n Parameters\n ----------\n A : ndarray\n Symmetric matrix.\n tol : float (optional)\n Tolerance for considering a matrix diagonal.\n\n Returns\n -------\n eig1 : float\n First eigenvalue.\n eig2 : float\n Second eigenvalue.\n vec1 : ndarray\n First eigenvector.\n vec2 : ndarray\n Second eigenvector\n \n Examples\n --------\n \n >>> A = np.array([[5, 6],\n ... [6, 9]])\n >>> eig1, eig2, vec1, vec2 = eigvals(A)\n >>> np.allclose(eig1, 7 + 2*np.sqrt(10))\n True\n >>> np.allclose(eig2, 7 - 2*np.sqrt(10))\n True\n >>> np.allclose(vec1, np.array([-0.584710284663765, -0.8112421851755609]))\n True\n >>> np.allclose(vec2, np.array([-0.8112421851755609,0.584710284663765]))\n True\n \n '
if (np.abs(A).max() < tol):
eig1 = 0.0
eig2 = 0.0
vec1 = np.array([np.NaN, np.NaN])
vec2 = np.array([np.NaN, np.NaN])
elif ((abs(A[(0, 1)]) / np.abs(A).max()) < tol):
eig1 = A[(0, 0)]
eig2 = A[(1, 1)]
vec1 = np.array([1, 0])
vec2 = np.array([0, 1])
else:
tr = (A[(0, 0)] + A[(1, 1)])
det = ((A[(0, 0)] * A[(1, 1)]) - (A[(0, 1)] ** 2))
eig1 = (0.5 * (tr - np.sqrt(((tr ** 2) - (4 * det)))))
eig2 = (0.5 * (tr + np.sqrt(((tr ** 2) - (4 * det)))))
vec1 = np.array([(A[(0, 0)] - eig2), A[(0, 1)]])
vec1 = (vec1 / np.sqrt(((vec1[0] ** 2) + (vec1[1] ** 2))))
vec2 = np.array([(- vec1[1]), vec1[0]])
if (abs(eig2) > abs(eig1)):
(eig2, eig1) = (eig1, eig2)
(vec2, vec1) = (vec1, vec2)
return (eig1, eig2, vec1, vec2) | Eigenvalues and eigenvectors for a 2x2 symmetric matrix/tensor
Parameters
----------
A : ndarray
Symmetric matrix.
tol : float (optional)
Tolerance for considering a matrix diagonal.
Returns
-------
eig1 : float
First eigenvalue.
eig2 : float
Second eigenvalue.
vec1 : ndarray
First eigenvector.
vec2 : ndarray
Second eigenvector
Examples
--------
>>> A = np.array([[5, 6],
... [6, 9]])
>>> eig1, eig2, vec1, vec2 = eigvals(A)
>>> np.allclose(eig1, 7 + 2*np.sqrt(10))
True
>>> np.allclose(eig2, 7 - 2*np.sqrt(10))
True
>>> np.allclose(vec1, np.array([-0.584710284663765, -0.8112421851755609]))
True
>>> np.allclose(vec2, np.array([-0.8112421851755609,0.584710284663765]))
True | solidspydyn/postprocesor.py | eigvals | jgomezc1/SOLIDSPy_DYN | 1 | python | def eigvals(A, tol=1e-06):
'Eigenvalues and eigenvectors for a 2x2 symmetric matrix/tensor\n \n Parameters\n ----------\n A : ndarray\n Symmetric matrix.\n tol : float (optional)\n Tolerance for considering a matrix diagonal.\n\n Returns\n -------\n eig1 : float\n First eigenvalue.\n eig2 : float\n Second eigenvalue.\n vec1 : ndarray\n First eigenvector.\n vec2 : ndarray\n Second eigenvector\n \n Examples\n --------\n \n >>> A = np.array([[5, 6],\n ... [6, 9]])\n >>> eig1, eig2, vec1, vec2 = eigvals(A)\n >>> np.allclose(eig1, 7 + 2*np.sqrt(10))\n True\n >>> np.allclose(eig2, 7 - 2*np.sqrt(10))\n True\n >>> np.allclose(vec1, np.array([-0.584710284663765, -0.8112421851755609]))\n True\n >>> np.allclose(vec2, np.array([-0.8112421851755609,0.584710284663765]))\n True\n \n '
if (np.abs(A).max() < tol):
eig1 = 0.0
eig2 = 0.0
vec1 = np.array([np.NaN, np.NaN])
vec2 = np.array([np.NaN, np.NaN])
elif ((abs(A[(0, 1)]) / np.abs(A).max()) < tol):
eig1 = A[(0, 0)]
eig2 = A[(1, 1)]
vec1 = np.array([1, 0])
vec2 = np.array([0, 1])
else:
tr = (A[(0, 0)] + A[(1, 1)])
det = ((A[(0, 0)] * A[(1, 1)]) - (A[(0, 1)] ** 2))
eig1 = (0.5 * (tr - np.sqrt(((tr ** 2) - (4 * det)))))
eig2 = (0.5 * (tr + np.sqrt(((tr ** 2) - (4 * det)))))
vec1 = np.array([(A[(0, 0)] - eig2), A[(0, 1)]])
vec1 = (vec1 / np.sqrt(((vec1[0] ** 2) + (vec1[1] ** 2))))
vec2 = np.array([(- vec1[1]), vec1[0]])
if (abs(eig2) > abs(eig1)):
(eig2, eig1) = (eig1, eig2)
(vec2, vec1) = (vec1, vec2)
return (eig1, eig2, vec1, vec2) | def eigvals(A, tol=1e-06):
'Eigenvalues and eigenvectors for a 2x2 symmetric matrix/tensor\n \n Parameters\n ----------\n A : ndarray\n Symmetric matrix.\n tol : float (optional)\n Tolerance for considering a matrix diagonal.\n\n Returns\n -------\n eig1 : float\n First eigenvalue.\n eig2 : float\n Second eigenvalue.\n vec1 : ndarray\n First eigenvector.\n vec2 : ndarray\n Second eigenvector\n \n Examples\n --------\n \n >>> A = np.array([[5, 6],\n ... [6, 9]])\n >>> eig1, eig2, vec1, vec2 = eigvals(A)\n >>> np.allclose(eig1, 7 + 2*np.sqrt(10))\n True\n >>> np.allclose(eig2, 7 - 2*np.sqrt(10))\n True\n >>> np.allclose(vec1, np.array([-0.584710284663765, -0.8112421851755609]))\n True\n >>> np.allclose(vec2, np.array([-0.8112421851755609,0.584710284663765]))\n True\n \n '
if (np.abs(A).max() < tol):
eig1 = 0.0
eig2 = 0.0
vec1 = np.array([np.NaN, np.NaN])
vec2 = np.array([np.NaN, np.NaN])
elif ((abs(A[(0, 1)]) / np.abs(A).max()) < tol):
eig1 = A[(0, 0)]
eig2 = A[(1, 1)]
vec1 = np.array([1, 0])
vec2 = np.array([0, 1])
else:
tr = (A[(0, 0)] + A[(1, 1)])
det = ((A[(0, 0)] * A[(1, 1)]) - (A[(0, 1)] ** 2))
eig1 = (0.5 * (tr - np.sqrt(((tr ** 2) - (4 * det)))))
eig2 = (0.5 * (tr + np.sqrt(((tr ** 2) - (4 * det)))))
vec1 = np.array([(A[(0, 0)] - eig2), A[(0, 1)]])
vec1 = (vec1 / np.sqrt(((vec1[0] ** 2) + (vec1[1] ** 2))))
vec2 = np.array([(- vec1[1]), vec1[0]])
if (abs(eig2) > abs(eig1)):
(eig2, eig1) = (eig1, eig2)
(vec2, vec1) = (vec1, vec2)
return (eig1, eig2, vec1, vec2)<|docstring|>Eigenvalues and eigenvectors for a 2x2 symmetric matrix/tensor
Parameters
----------
A : ndarray
Symmetric matrix.
tol : float (optional)
Tolerance for considering a matrix diagonal.
Returns
-------
eig1 : float
First eigenvalue.
eig2 : float
Second eigenvalue.
vec1 : ndarray
First eigenvector.
vec2 : ndarray
Second eigenvector
Examples
--------
>>> A = np.array([[5, 6],
... [6, 9]])
>>> eig1, eig2, vec1, vec2 = eigvals(A)
>>> np.allclose(eig1, 7 + 2*np.sqrt(10))
True
>>> np.allclose(eig2, 7 - 2*np.sqrt(10))
True
>>> np.allclose(vec1, np.array([-0.584710284663765, -0.8112421851755609]))
True
>>> np.allclose(vec2, np.array([-0.8112421851755609,0.584710284663765]))
True<|endoftext|> |
c8b94d3cffe595a8466ba5f72ac775bfc20cdd9db62bfb04b91d61fab08f26be | def principal_dirs(field):
'Compute the principal directions of a tensor field\n\n Parameters\n ----------\n field : ndarray\n Tensor field. The tensor is written as "vector" using\n Voigt notation.\n\n Returns\n -------\n eigs1 : ndarray\n Array with the first eigenvalues.\n eigs2 : ndarray\n Array with the second eigenvalues.\n vecs1 : ndarray\n Array with the first eigenvectors.\n vecs2 : ndarray\n Array with the Second eigenvector.\n\n '
num = field.shape[0]
eigs1 = np.empty(num)
eigs2 = np.empty(num)
vecs1 = np.empty((num, 2))
vecs2 = np.empty((num, 2))
A = np.zeros((2, 2))
for (cont, tensor) in enumerate(field):
A[(0, 0)] = tensor[0]
A[(1, 1)] = tensor[1]
A[(0, 1)] = tensor[2]
(eig1, eig2, vec1, vec2) = eigvals(A, tol=1e-06)
eigs1[cont] = eig1
eigs2[cont] = eig2
vecs1[(cont, :)] = vec1
vecs2[(cont, :)] = vec2
return (eigs1, eigs2, vecs1, vecs2) | Compute the principal directions of a tensor field
Parameters
----------
field : ndarray
Tensor field. The tensor is written as "vector" using
Voigt notation.
Returns
-------
eigs1 : ndarray
Array with the first eigenvalues.
eigs2 : ndarray
Array with the second eigenvalues.
vecs1 : ndarray
Array with the first eigenvectors.
vecs2 : ndarray
Array with the Second eigenvector. | solidspydyn/postprocesor.py | principal_dirs | jgomezc1/SOLIDSPy_DYN | 1 | python | def principal_dirs(field):
'Compute the principal directions of a tensor field\n\n Parameters\n ----------\n field : ndarray\n Tensor field. The tensor is written as "vector" using\n Voigt notation.\n\n Returns\n -------\n eigs1 : ndarray\n Array with the first eigenvalues.\n eigs2 : ndarray\n Array with the second eigenvalues.\n vecs1 : ndarray\n Array with the first eigenvectors.\n vecs2 : ndarray\n Array with the Second eigenvector.\n\n '
num = field.shape[0]
eigs1 = np.empty(num)
eigs2 = np.empty(num)
vecs1 = np.empty((num, 2))
vecs2 = np.empty((num, 2))
A = np.zeros((2, 2))
for (cont, tensor) in enumerate(field):
A[(0, 0)] = tensor[0]
A[(1, 1)] = tensor[1]
A[(0, 1)] = tensor[2]
(eig1, eig2, vec1, vec2) = eigvals(A, tol=1e-06)
eigs1[cont] = eig1
eigs2[cont] = eig2
vecs1[(cont, :)] = vec1
vecs2[(cont, :)] = vec2
return (eigs1, eigs2, vecs1, vecs2) | def principal_dirs(field):
'Compute the principal directions of a tensor field\n\n Parameters\n ----------\n field : ndarray\n Tensor field. The tensor is written as "vector" using\n Voigt notation.\n\n Returns\n -------\n eigs1 : ndarray\n Array with the first eigenvalues.\n eigs2 : ndarray\n Array with the second eigenvalues.\n vecs1 : ndarray\n Array with the first eigenvectors.\n vecs2 : ndarray\n Array with the Second eigenvector.\n\n '
num = field.shape[0]
eigs1 = np.empty(num)
eigs2 = np.empty(num)
vecs1 = np.empty((num, 2))
vecs2 = np.empty((num, 2))
A = np.zeros((2, 2))
for (cont, tensor) in enumerate(field):
A[(0, 0)] = tensor[0]
A[(1, 1)] = tensor[1]
A[(0, 1)] = tensor[2]
(eig1, eig2, vec1, vec2) = eigvals(A, tol=1e-06)
eigs1[cont] = eig1
eigs2[cont] = eig2
vecs1[(cont, :)] = vec1
vecs2[(cont, :)] = vec2
return (eigs1, eigs2, vecs1, vecs2)<|docstring|>Compute the principal directions of a tensor field
Parameters
----------
field : ndarray
Tensor field. The tensor is written as "vector" using
Voigt notation.
Returns
-------
eigs1 : ndarray
Array with the first eigenvalues.
eigs2 : ndarray
Array with the second eigenvalues.
vecs1 : ndarray
Array with the first eigenvectors.
vecs2 : ndarray
Array with the Second eigenvector.<|endoftext|> |
a77298082eff0aa26540e33734d08737c3cfbacfd5e2175937cf1b222230dc94 | def energy(UG, KG):
'\n Computes the potential energy for the current sln.\n\n Parameters\n ----------\n UG : ndarray (float)\n Array with the computed displacements.\n KG : ndarray (float)\n Global stiffness matrix.\n\n Returns\n -------\n EFE : scalar (float)\n Total energy in the system. :math:`-\\frac{1}{2} U^T K U`\n\n '
f = KG.dot(UG)
EFE = ((- 0.5) * f.dot(UG))
return EFE | Computes the potential energy for the current sln.
Parameters
----------
UG : ndarray (float)
Array with the computed displacements.
KG : ndarray (float)
Global stiffness matrix.
Returns
-------
EFE : scalar (float)
Total energy in the system. :math:`-\frac{1}{2} U^T K U` | solidspydyn/postprocesor.py | energy | jgomezc1/SOLIDSPy_DYN | 1 | python | def energy(UG, KG):
'\n Computes the potential energy for the current sln.\n\n Parameters\n ----------\n UG : ndarray (float)\n Array with the computed displacements.\n KG : ndarray (float)\n Global stiffness matrix.\n\n Returns\n -------\n EFE : scalar (float)\n Total energy in the system. :math:`-\\frac{1}{2} U^T K U`\n\n '
f = KG.dot(UG)
EFE = ((- 0.5) * f.dot(UG))
return EFE | def energy(UG, KG):
'\n Computes the potential energy for the current sln.\n\n Parameters\n ----------\n UG : ndarray (float)\n Array with the computed displacements.\n KG : ndarray (float)\n Global stiffness matrix.\n\n Returns\n -------\n EFE : scalar (float)\n Total energy in the system. :math:`-\\frac{1}{2} U^T K U`\n\n '
f = KG.dot(UG)
EFE = ((- 0.5) * f.dot(UG))
return EFE<|docstring|>Computes the potential energy for the current sln.
Parameters
----------
UG : ndarray (float)
Array with the computed displacements.
KG : ndarray (float)
Global stiffness matrix.
Returns
-------
EFE : scalar (float)
Total energy in the system. :math:`-\frac{1}{2} U^T K U`<|endoftext|> |
19596e50f7dc32f693e63ea461434cd4d3686aeee178bbeac5e1ec8ee652f2c0 | def nodal_historyH(idnod, ninc, U, IBC, fname):
'Writes the response time history for the horizontal\n degree of freedom of node idnod. The response is written\n to the text file as sepcified by fname\n \n idnod : scalar (int)\n Nodal point idntifier.\n ninc : intger (int)\n Integer indicating the number of increments.\n U : ndarray (float)\n Array with the computed displacements.\n IBC : ndarray (integer)\n Array with the equation numbers\n fname: string.\n String with the file name.\n \n Returns\n -------\n Writes down the text file with the response.\n\n '
uh = np.zeros(ninc)
idof = IBC[(idnod, 0)]
uh[:] = U[(idof, :)]
np.savetxt(fname, uh)
return | Writes the response time history for the horizontal
degree of freedom of node idnod. The response is written
to the text file as sepcified by fname
idnod : scalar (int)
Nodal point idntifier.
ninc : intger (int)
Integer indicating the number of increments.
U : ndarray (float)
Array with the computed displacements.
IBC : ndarray (integer)
Array with the equation numbers
fname: string.
String with the file name.
Returns
-------
Writes down the text file with the response. | solidspydyn/postprocesor.py | nodal_historyH | jgomezc1/SOLIDSPy_DYN | 1 | python | def nodal_historyH(idnod, ninc, U, IBC, fname):
'Writes the response time history for the horizontal\n degree of freedom of node idnod. The response is written\n to the text file as sepcified by fname\n \n idnod : scalar (int)\n Nodal point idntifier.\n ninc : intger (int)\n Integer indicating the number of increments.\n U : ndarray (float)\n Array with the computed displacements.\n IBC : ndarray (integer)\n Array with the equation numbers\n fname: string.\n String with the file name.\n \n Returns\n -------\n Writes down the text file with the response.\n\n '
uh = np.zeros(ninc)
idof = IBC[(idnod, 0)]
uh[:] = U[(idof, :)]
np.savetxt(fname, uh)
return | def nodal_historyH(idnod, ninc, U, IBC, fname):
'Writes the response time history for the horizontal\n degree of freedom of node idnod. The response is written\n to the text file as sepcified by fname\n \n idnod : scalar (int)\n Nodal point idntifier.\n ninc : intger (int)\n Integer indicating the number of increments.\n U : ndarray (float)\n Array with the computed displacements.\n IBC : ndarray (integer)\n Array with the equation numbers\n fname: string.\n String with the file name.\n \n Returns\n -------\n Writes down the text file with the response.\n\n '
uh = np.zeros(ninc)
idof = IBC[(idnod, 0)]
uh[:] = U[(idof, :)]
np.savetxt(fname, uh)
return<|docstring|>Writes the response time history for the horizontal
degree of freedom of node idnod. The response is written
to the text file as sepcified by fname
idnod : scalar (int)
Nodal point idntifier.
ninc : intger (int)
Integer indicating the number of increments.
U : ndarray (float)
Array with the computed displacements.
IBC : ndarray (integer)
Array with the equation numbers
fname: string.
String with the file name.
Returns
-------
Writes down the text file with the response.<|endoftext|> |
00e13620a526172b7f6a7c7fa6fa3eb61087d3c6553a00cef15b27ba6e248f99 | def nodal_historyV(idnod, ninc, U, IBC, fname):
'Writes the response time history for the vertical\n degree of freedom of node idnod. The response is written\n to the text file as sepcified by fname\n \n idnod : scalar (int)\n Nodal point idntifier.\n ninc : intger (int)\n Integer indicating the number of increments.\n U : ndarray (float)\n Array with the computed displacements.\n IBC : ndarray (integer)\n Array with the equation numbers\n fname: string.\n String with the file name.\n \n Returns\n -------\n Writes down the text file with the response.\n\n '
uh = np.zeros(ninc)
idof = IBC[(idnod, 1)]
uh[:] = U[(idof, :)]
np.savetxt(fname, uh)
return | Writes the response time history for the vertical
degree of freedom of node idnod. The response is written
to the text file as sepcified by fname
idnod : scalar (int)
Nodal point idntifier.
ninc : intger (int)
Integer indicating the number of increments.
U : ndarray (float)
Array with the computed displacements.
IBC : ndarray (integer)
Array with the equation numbers
fname: string.
String with the file name.
Returns
-------
Writes down the text file with the response. | solidspydyn/postprocesor.py | nodal_historyV | jgomezc1/SOLIDSPy_DYN | 1 | python | def nodal_historyV(idnod, ninc, U, IBC, fname):
'Writes the response time history for the vertical\n degree of freedom of node idnod. The response is written\n to the text file as sepcified by fname\n \n idnod : scalar (int)\n Nodal point idntifier.\n ninc : intger (int)\n Integer indicating the number of increments.\n U : ndarray (float)\n Array with the computed displacements.\n IBC : ndarray (integer)\n Array with the equation numbers\n fname: string.\n String with the file name.\n \n Returns\n -------\n Writes down the text file with the response.\n\n '
uh = np.zeros(ninc)
idof = IBC[(idnod, 1)]
uh[:] = U[(idof, :)]
np.savetxt(fname, uh)
return | def nodal_historyV(idnod, ninc, U, IBC, fname):
'Writes the response time history for the vertical\n degree of freedom of node idnod. The response is written\n to the text file as sepcified by fname\n \n idnod : scalar (int)\n Nodal point idntifier.\n ninc : intger (int)\n Integer indicating the number of increments.\n U : ndarray (float)\n Array with the computed displacements.\n IBC : ndarray (integer)\n Array with the equation numbers\n fname: string.\n String with the file name.\n \n Returns\n -------\n Writes down the text file with the response.\n\n '
uh = np.zeros(ninc)
idof = IBC[(idnod, 1)]
uh[:] = U[(idof, :)]
np.savetxt(fname, uh)
return<|docstring|>Writes the response time history for the vertical
degree of freedom of node idnod. The response is written
to the text file as sepcified by fname
idnod : scalar (int)
Nodal point idntifier.
ninc : intger (int)
Integer indicating the number of increments.
U : ndarray (float)
Array with the computed displacements.
IBC : ndarray (integer)
Array with the equation numbers
fname: string.
String with the file name.
Returns
-------
Writes down the text file with the response.<|endoftext|> |
aefa53c553e2fb9137029938410569295ac7c6a86eaf355fc886ac5dad825d1f | def sheets(idnod, ninc, U, IBC, fname, folder):
'\n Writes a file with the nodal history for a list of nodes\n stored in idnod\n \n idnod : ndarray (int)\n List with the nodal point names.\n ninc : intger\n Integer indicating the number of increments.\n U : ndarray (float)\n Array with the computed displacements.\n IBC : ndarray (integer)\n Array with the equation numbers\n fname: string.\n String with the file name.\n folder: string.\n String with the folder name.\n \n \n Returns\n -------\n nn : integer.\n Integer with the size of the idnod list.\n\n '
nn = idnod.shape[0]
uv = np.zeros([nn, ninc])
for i in range(nn):
idof = IBC[(idnod[i], 0)]
uv[(i, :)] = U[(idof, :)]
np.savetxt(((folder + fname) + '.txt'), uv)
return nn | Writes a file with the nodal history for a list of nodes
stored in idnod
idnod : ndarray (int)
List with the nodal point names.
ninc : intger
Integer indicating the number of increments.
U : ndarray (float)
Array with the computed displacements.
IBC : ndarray (integer)
Array with the equation numbers
fname: string.
String with the file name.
folder: string.
String with the folder name.
Returns
-------
nn : integer.
Integer with the size of the idnod list. | solidspydyn/postprocesor.py | sheets | jgomezc1/SOLIDSPy_DYN | 1 | python | def sheets(idnod, ninc, U, IBC, fname, folder):
'\n Writes a file with the nodal history for a list of nodes\n stored in idnod\n \n idnod : ndarray (int)\n List with the nodal point names.\n ninc : intger\n Integer indicating the number of increments.\n U : ndarray (float)\n Array with the computed displacements.\n IBC : ndarray (integer)\n Array with the equation numbers\n fname: string.\n String with the file name.\n folder: string.\n String with the folder name.\n \n \n Returns\n -------\n nn : integer.\n Integer with the size of the idnod list.\n\n '
nn = idnod.shape[0]
uv = np.zeros([nn, ninc])
for i in range(nn):
idof = IBC[(idnod[i], 0)]
uv[(i, :)] = U[(idof, :)]
np.savetxt(((folder + fname) + '.txt'), uv)
return nn | def sheets(idnod, ninc, U, IBC, fname, folder):
'\n Writes a file with the nodal history for a list of nodes\n stored in idnod\n \n idnod : ndarray (int)\n List with the nodal point names.\n ninc : intger\n Integer indicating the number of increments.\n U : ndarray (float)\n Array with the computed displacements.\n IBC : ndarray (integer)\n Array with the equation numbers\n fname: string.\n String with the file name.\n folder: string.\n String with the folder name.\n \n \n Returns\n -------\n nn : integer.\n Integer with the size of the idnod list.\n\n '
nn = idnod.shape[0]
uv = np.zeros([nn, ninc])
for i in range(nn):
idof = IBC[(idnod[i], 0)]
uv[(i, :)] = U[(idof, :)]
np.savetxt(((folder + fname) + '.txt'), uv)
return nn<|docstring|>Writes a file with the nodal history for a list of nodes
stored in idnod
idnod : ndarray (int)
List with the nodal point names.
ninc : intger
Integer indicating the number of increments.
U : ndarray (float)
Array with the computed displacements.
IBC : ndarray (integer)
Array with the equation numbers
fname: string.
String with the file name.
folder: string.
String with the folder name.
Returns
-------
nn : integer.
Integer with the size of the idnod list.<|endoftext|> |
1709f5aa400e9baf55888b9880cbb3e0f28fdc4af90c82c52151b557fdc0b140 | def PLOTsheets(fname, folder, dt, ninc, npts, dk):
'\n Plots the time histories for a series of nodes with\n response stored in the file fname.\n \n fname: string.\n String with the file name.\n folder: string.\n String with the folder name.\n \n dt : Scalar (float)\n Time step.\n ninc : intger (int)\n Integer indicating the number of increments.\n npts : Integer (int)\n Integer with the number of poiints.\n dk : integer (integer)\n Scaling factor.\n \n '
plt.figure(0)
DATOS = np.loadtxt((folder + fname))
signal = np.zeros([ninc, npts], dtype=float)
k = 0
for j in range(npts):
for i in range(ninc):
signal[(i, k)] = (DATOS[(j, i)] + (k / dk))
sig.grafsignalG(signal[(:, k)], 'salida', 'Displacement', 'l', 0.0, 20.0, dt, 0)
k = (k + 1)
return | Plots the time histories for a series of nodes with
response stored in the file fname.
fname: string.
String with the file name.
folder: string.
String with the folder name.
dt : Scalar (float)
Time step.
ninc : intger (int)
Integer indicating the number of increments.
npts : Integer (int)
Integer with the number of poiints.
dk : integer (integer)
Scaling factor. | solidspydyn/postprocesor.py | PLOTsheets | jgomezc1/SOLIDSPy_DYN | 1 | python | def PLOTsheets(fname, folder, dt, ninc, npts, dk):
'\n Plots the time histories for a series of nodes with\n response stored in the file fname.\n \n fname: string.\n String with the file name.\n folder: string.\n String with the folder name.\n \n dt : Scalar (float)\n Time step.\n ninc : intger (int)\n Integer indicating the number of increments.\n npts : Integer (int)\n Integer with the number of poiints.\n dk : integer (integer)\n Scaling factor.\n \n '
plt.figure(0)
DATOS = np.loadtxt((folder + fname))
signal = np.zeros([ninc, npts], dtype=float)
k = 0
for j in range(npts):
for i in range(ninc):
signal[(i, k)] = (DATOS[(j, i)] + (k / dk))
sig.grafsignalG(signal[(:, k)], 'salida', 'Displacement', 'l', 0.0, 20.0, dt, 0)
k = (k + 1)
return | def PLOTsheets(fname, folder, dt, ninc, npts, dk):
'\n Plots the time histories for a series of nodes with\n response stored in the file fname.\n \n fname: string.\n String with the file name.\n folder: string.\n String with the folder name.\n \n dt : Scalar (float)\n Time step.\n ninc : intger (int)\n Integer indicating the number of increments.\n npts : Integer (int)\n Integer with the number of poiints.\n dk : integer (integer)\n Scaling factor.\n \n '
plt.figure(0)
DATOS = np.loadtxt((folder + fname))
signal = np.zeros([ninc, npts], dtype=float)
k = 0
for j in range(npts):
for i in range(ninc):
signal[(i, k)] = (DATOS[(j, i)] + (k / dk))
sig.grafsignalG(signal[(:, k)], 'salida', 'Displacement', 'l', 0.0, 20.0, dt, 0)
k = (k + 1)
return<|docstring|>Plots the time histories for a series of nodes with
response stored in the file fname.
fname: string.
String with the file name.
folder: string.
String with the folder name.
dt : Scalar (float)
Time step.
ninc : intger (int)
Integer indicating the number of increments.
npts : Integer (int)
Integer with the number of poiints.
dk : integer (integer)
Scaling factor.<|endoftext|> |
090ffcdb94d99e944371c2151e279ad26eb0e4f01754007642bd0d144507a9b5 | def respuesta(cells, cell_data, phy_lin):
'Extracts the nodes located at the physical line\n phy_line\n\n Parameters\n ----------\n cell : dictionary\n Dictionary created by meshio with cells information.\n cell_data: dictionary\n Dictionary created by meshio with cells data information.\n phy_lin : int\n Physical line to print nodal histories.\n\n Returns\n -------\n nodes_carga : int\n Array with the nodal data corresponding to the physical\n line phy_line.\n\n '
lines = cells['line']
phy_line = cell_data['line']['physical']
id_carga = [cont for cont in range(len(phy_line)) if (phy_line[cont] == phy_lin)]
nodes_carga = lines[id_carga]
nodes_carga = nodes_carga.flatten()
nodes_carga = list(set(nodes_carga))
nodes_carga.sort(reverse=False)
return nodes_carga | Extracts the nodes located at the physical line
phy_line
Parameters
----------
cell : dictionary
Dictionary created by meshio with cells information.
cell_data: dictionary
Dictionary created by meshio with cells data information.
phy_lin : int
Physical line to print nodal histories.
Returns
-------
nodes_carga : int
Array with the nodal data corresponding to the physical
line phy_line. | solidspydyn/postprocesor.py | respuesta | jgomezc1/SOLIDSPy_DYN | 1 | python | def respuesta(cells, cell_data, phy_lin):
'Extracts the nodes located at the physical line\n phy_line\n\n Parameters\n ----------\n cell : dictionary\n Dictionary created by meshio with cells information.\n cell_data: dictionary\n Dictionary created by meshio with cells data information.\n phy_lin : int\n Physical line to print nodal histories.\n\n Returns\n -------\n nodes_carga : int\n Array with the nodal data corresponding to the physical\n line phy_line.\n\n '
lines = cells['line']
phy_line = cell_data['line']['physical']
id_carga = [cont for cont in range(len(phy_line)) if (phy_line[cont] == phy_lin)]
nodes_carga = lines[id_carga]
nodes_carga = nodes_carga.flatten()
nodes_carga = list(set(nodes_carga))
nodes_carga.sort(reverse=False)
return nodes_carga | def respuesta(cells, cell_data, phy_lin):
'Extracts the nodes located at the physical line\n phy_line\n\n Parameters\n ----------\n cell : dictionary\n Dictionary created by meshio with cells information.\n cell_data: dictionary\n Dictionary created by meshio with cells data information.\n phy_lin : int\n Physical line to print nodal histories.\n\n Returns\n -------\n nodes_carga : int\n Array with the nodal data corresponding to the physical\n line phy_line.\n\n '
lines = cells['line']
phy_line = cell_data['line']['physical']
id_carga = [cont for cont in range(len(phy_line)) if (phy_line[cont] == phy_lin)]
nodes_carga = lines[id_carga]
nodes_carga = nodes_carga.flatten()
nodes_carga = list(set(nodes_carga))
nodes_carga.sort(reverse=False)
return nodes_carga<|docstring|>Extracts the nodes located at the physical line
phy_line
Parameters
----------
cell : dictionary
Dictionary created by meshio with cells information.
cell_data: dictionary
Dictionary created by meshio with cells data information.
phy_lin : int
Physical line to print nodal histories.
Returns
-------
nodes_carga : int
Array with the nodal data corresponding to the physical
line phy_line.<|endoftext|> |
0669aa7469bcd46aeb612606305f58b5375865a6bec4a4a36928eee308a6d0bb | def hillshade(agg: xr.DataArray, azimuth: int=225, angle_altitude: int=25, name: Optional[str]='hillshade', shadows: bool=False) -> xr.DataArray:
"\n Calculates, for all cells in the array, an illumination value of\n each cell based on illumination from a specific azimuth and\n altitude.\n\n Parameters\n ----------\n agg : xarray.DataArray\n 2D NumPy, CuPy, NumPy-backed Dask, or Cupy-backed Dask array\n of elevation values.\n angle_altitude : int, default=25\n Altitude angle of the sun specified in degrees.\n azimuth : int, default=225\n The angle between the north vector and the perpendicular\n projection of the light source down onto the horizon\n specified in degrees.\n name : str, default='hillshade'\n Name of output DataArray.\n shadows : bool, default=False\n Whether to calculate shadows or not. Shadows are available\n only for Cupy-backed Dask arrays and only if rtxpy is\n installed and appropriate graphics hardware is available.\n\n Returns\n -------\n hillshade_agg : xarray.DataArray, of same type as `agg`\n 2D aggregate array of illumination values.\n\n References\n ----------\n - GeoExamples: http://geoexamples.blogspot.com/2014/03/shaded-relief-images-using-gdal-python.html # noqa\n\n Examples\n --------\n .. sourcecode:: python\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> from xrspatial import hillshade\n >>> data = np.array([\n ... [0., 0., 0., 0., 0.],\n ... [0., 1., 0., 2., 0.],\n ... [0., 0., 3., 0., 0.],\n ... [0., 0., 0., 0., 0.],\n ... [0., 0., 0., 0., 0.]])\n >>> n, m = data.shape\n >>> raster = xr.DataArray(data, dims=['y', 'x'], name='raster')\n >>> raster['y'] = np.arange(n)[::-1]\n >>> raster['x'] = np.arange(m)\n >>> hillshade_agg = hillshade(raster)\n >>> print(hillshade_agg)\n <xarray.DataArray 'hillshade' (y: 5, x: 5)>\n array([[ nan, nan, nan, nan, nan],\n [ nan, 0.71130913, 0.44167341, 0.71130913, nan],\n [ nan, 0.95550163, 0.71130913, 0.52478473, nan],\n [ nan, 0.71130913, 0.88382559, 0.71130913, nan],\n [ nan, nan, nan, nan, nan]])\n Coordinates:\n * y (y) int32 4 3 2 1 0\n * x (x) int32 0 1 2 3 4\n "
if (shadows and (not has_rtx())):
raise RuntimeError('Can only calculate shadows if cupy and rtxpy are available')
if isinstance(agg.data, np.ndarray):
out = _run_numpy(agg.data, azimuth, angle_altitude)
elif (has_cuda() and has_cupy() and is_cupy_array(agg.data)):
if (shadows and has_rtx()):
from .gpu_rtx.hillshade import hillshade_rtx
out = hillshade_rtx(agg, azimuth, angle_altitude, shadows=shadows)
else:
out = _run_cupy(agg.data, azimuth, angle_altitude)
elif (has_cuda() and has_cupy() and isinstance(agg.data, da.Array) and is_cupy_backed(agg)):
raise NotImplementedError('Dask/CuPy hillshade not implemented')
elif isinstance(agg.data, da.Array):
out = _run_dask_numpy(agg.data, azimuth, angle_altitude)
else:
raise TypeError('Unsupported Array Type: {}'.format(type(agg.data)))
return xr.DataArray(out, name=name, coords=agg.coords, dims=agg.dims, attrs=agg.attrs) | Calculates, for all cells in the array, an illumination value of
each cell based on illumination from a specific azimuth and
altitude.
Parameters
----------
agg : xarray.DataArray
2D NumPy, CuPy, NumPy-backed Dask, or Cupy-backed Dask array
of elevation values.
angle_altitude : int, default=25
Altitude angle of the sun specified in degrees.
azimuth : int, default=225
The angle between the north vector and the perpendicular
projection of the light source down onto the horizon
specified in degrees.
name : str, default='hillshade'
Name of output DataArray.
shadows : bool, default=False
Whether to calculate shadows or not. Shadows are available
only for Cupy-backed Dask arrays and only if rtxpy is
installed and appropriate graphics hardware is available.
Returns
-------
hillshade_agg : xarray.DataArray, of same type as `agg`
2D aggregate array of illumination values.
References
----------
- GeoExamples: http://geoexamples.blogspot.com/2014/03/shaded-relief-images-using-gdal-python.html # noqa
Examples
--------
.. sourcecode:: python
>>> import numpy as np
>>> import xarray as xr
>>> from xrspatial import hillshade
>>> data = np.array([
... [0., 0., 0., 0., 0.],
... [0., 1., 0., 2., 0.],
... [0., 0., 3., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.]])
>>> n, m = data.shape
>>> raster = xr.DataArray(data, dims=['y', 'x'], name='raster')
>>> raster['y'] = np.arange(n)[::-1]
>>> raster['x'] = np.arange(m)
>>> hillshade_agg = hillshade(raster)
>>> print(hillshade_agg)
<xarray.DataArray 'hillshade' (y: 5, x: 5)>
array([[ nan, nan, nan, nan, nan],
[ nan, 0.71130913, 0.44167341, 0.71130913, nan],
[ nan, 0.95550163, 0.71130913, 0.52478473, nan],
[ nan, 0.71130913, 0.88382559, 0.71130913, nan],
[ nan, nan, nan, nan, nan]])
Coordinates:
* y (y) int32 4 3 2 1 0
* x (x) int32 0 1 2 3 4 | xrspatial/hillshade.py | hillshade | DahnJ/xarray-spatial | 1 | python | def hillshade(agg: xr.DataArray, azimuth: int=225, angle_altitude: int=25, name: Optional[str]='hillshade', shadows: bool=False) -> xr.DataArray:
"\n Calculates, for all cells in the array, an illumination value of\n each cell based on illumination from a specific azimuth and\n altitude.\n\n Parameters\n ----------\n agg : xarray.DataArray\n 2D NumPy, CuPy, NumPy-backed Dask, or Cupy-backed Dask array\n of elevation values.\n angle_altitude : int, default=25\n Altitude angle of the sun specified in degrees.\n azimuth : int, default=225\n The angle between the north vector and the perpendicular\n projection of the light source down onto the horizon\n specified in degrees.\n name : str, default='hillshade'\n Name of output DataArray.\n shadows : bool, default=False\n Whether to calculate shadows or not. Shadows are available\n only for Cupy-backed Dask arrays and only if rtxpy is\n installed and appropriate graphics hardware is available.\n\n Returns\n -------\n hillshade_agg : xarray.DataArray, of same type as `agg`\n 2D aggregate array of illumination values.\n\n References\n ----------\n - GeoExamples: http://geoexamples.blogspot.com/2014/03/shaded-relief-images-using-gdal-python.html # noqa\n\n Examples\n --------\n .. sourcecode:: python\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> from xrspatial import hillshade\n >>> data = np.array([\n ... [0., 0., 0., 0., 0.],\n ... [0., 1., 0., 2., 0.],\n ... [0., 0., 3., 0., 0.],\n ... [0., 0., 0., 0., 0.],\n ... [0., 0., 0., 0., 0.]])\n >>> n, m = data.shape\n >>> raster = xr.DataArray(data, dims=['y', 'x'], name='raster')\n >>> raster['y'] = np.arange(n)[::-1]\n >>> raster['x'] = np.arange(m)\n >>> hillshade_agg = hillshade(raster)\n >>> print(hillshade_agg)\n <xarray.DataArray 'hillshade' (y: 5, x: 5)>\n array([[ nan, nan, nan, nan, nan],\n [ nan, 0.71130913, 0.44167341, 0.71130913, nan],\n [ nan, 0.95550163, 0.71130913, 0.52478473, nan],\n [ nan, 0.71130913, 0.88382559, 0.71130913, nan],\n [ nan, nan, nan, nan, nan]])\n Coordinates:\n * y (y) int32 4 3 2 1 0\n * x (x) int32 0 1 2 3 4\n "
if (shadows and (not has_rtx())):
raise RuntimeError('Can only calculate shadows if cupy and rtxpy are available')
if isinstance(agg.data, np.ndarray):
out = _run_numpy(agg.data, azimuth, angle_altitude)
elif (has_cuda() and has_cupy() and is_cupy_array(agg.data)):
if (shadows and has_rtx()):
from .gpu_rtx.hillshade import hillshade_rtx
out = hillshade_rtx(agg, azimuth, angle_altitude, shadows=shadows)
else:
out = _run_cupy(agg.data, azimuth, angle_altitude)
elif (has_cuda() and has_cupy() and isinstance(agg.data, da.Array) and is_cupy_backed(agg)):
raise NotImplementedError('Dask/CuPy hillshade not implemented')
elif isinstance(agg.data, da.Array):
out = _run_dask_numpy(agg.data, azimuth, angle_altitude)
else:
raise TypeError('Unsupported Array Type: {}'.format(type(agg.data)))
return xr.DataArray(out, name=name, coords=agg.coords, dims=agg.dims, attrs=agg.attrs) | def hillshade(agg: xr.DataArray, azimuth: int=225, angle_altitude: int=25, name: Optional[str]='hillshade', shadows: bool=False) -> xr.DataArray:
"\n Calculates, for all cells in the array, an illumination value of\n each cell based on illumination from a specific azimuth and\n altitude.\n\n Parameters\n ----------\n agg : xarray.DataArray\n 2D NumPy, CuPy, NumPy-backed Dask, or Cupy-backed Dask array\n of elevation values.\n angle_altitude : int, default=25\n Altitude angle of the sun specified in degrees.\n azimuth : int, default=225\n The angle between the north vector and the perpendicular\n projection of the light source down onto the horizon\n specified in degrees.\n name : str, default='hillshade'\n Name of output DataArray.\n shadows : bool, default=False\n Whether to calculate shadows or not. Shadows are available\n only for Cupy-backed Dask arrays and only if rtxpy is\n installed and appropriate graphics hardware is available.\n\n Returns\n -------\n hillshade_agg : xarray.DataArray, of same type as `agg`\n 2D aggregate array of illumination values.\n\n References\n ----------\n - GeoExamples: http://geoexamples.blogspot.com/2014/03/shaded-relief-images-using-gdal-python.html # noqa\n\n Examples\n --------\n .. sourcecode:: python\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> from xrspatial import hillshade\n >>> data = np.array([\n ... [0., 0., 0., 0., 0.],\n ... [0., 1., 0., 2., 0.],\n ... [0., 0., 3., 0., 0.],\n ... [0., 0., 0., 0., 0.],\n ... [0., 0., 0., 0., 0.]])\n >>> n, m = data.shape\n >>> raster = xr.DataArray(data, dims=['y', 'x'], name='raster')\n >>> raster['y'] = np.arange(n)[::-1]\n >>> raster['x'] = np.arange(m)\n >>> hillshade_agg = hillshade(raster)\n >>> print(hillshade_agg)\n <xarray.DataArray 'hillshade' (y: 5, x: 5)>\n array([[ nan, nan, nan, nan, nan],\n [ nan, 0.71130913, 0.44167341, 0.71130913, nan],\n [ nan, 0.95550163, 0.71130913, 0.52478473, nan],\n [ nan, 0.71130913, 0.88382559, 0.71130913, nan],\n [ nan, nan, nan, nan, nan]])\n Coordinates:\n * y (y) int32 4 3 2 1 0\n * x (x) int32 0 1 2 3 4\n "
if (shadows and (not has_rtx())):
raise RuntimeError('Can only calculate shadows if cupy and rtxpy are available')
if isinstance(agg.data, np.ndarray):
out = _run_numpy(agg.data, azimuth, angle_altitude)
elif (has_cuda() and has_cupy() and is_cupy_array(agg.data)):
if (shadows and has_rtx()):
from .gpu_rtx.hillshade import hillshade_rtx
out = hillshade_rtx(agg, azimuth, angle_altitude, shadows=shadows)
else:
out = _run_cupy(agg.data, azimuth, angle_altitude)
elif (has_cuda() and has_cupy() and isinstance(agg.data, da.Array) and is_cupy_backed(agg)):
raise NotImplementedError('Dask/CuPy hillshade not implemented')
elif isinstance(agg.data, da.Array):
out = _run_dask_numpy(agg.data, azimuth, angle_altitude)
else:
raise TypeError('Unsupported Array Type: {}'.format(type(agg.data)))
return xr.DataArray(out, name=name, coords=agg.coords, dims=agg.dims, attrs=agg.attrs)<|docstring|>Calculates, for all cells in the array, an illumination value of
each cell based on illumination from a specific azimuth and
altitude.
Parameters
----------
agg : xarray.DataArray
2D NumPy, CuPy, NumPy-backed Dask, or Cupy-backed Dask array
of elevation values.
angle_altitude : int, default=25
Altitude angle of the sun specified in degrees.
azimuth : int, default=225
The angle between the north vector and the perpendicular
projection of the light source down onto the horizon
specified in degrees.
name : str, default='hillshade'
Name of output DataArray.
shadows : bool, default=False
Whether to calculate shadows or not. Shadows are available
only for Cupy-backed Dask arrays and only if rtxpy is
installed and appropriate graphics hardware is available.
Returns
-------
hillshade_agg : xarray.DataArray, of same type as `agg`
2D aggregate array of illumination values.
References
----------
- GeoExamples: http://geoexamples.blogspot.com/2014/03/shaded-relief-images-using-gdal-python.html # noqa
Examples
--------
.. sourcecode:: python
>>> import numpy as np
>>> import xarray as xr
>>> from xrspatial import hillshade
>>> data = np.array([
... [0., 0., 0., 0., 0.],
... [0., 1., 0., 2., 0.],
... [0., 0., 3., 0., 0.],
... [0., 0., 0., 0., 0.],
... [0., 0., 0., 0., 0.]])
>>> n, m = data.shape
>>> raster = xr.DataArray(data, dims=['y', 'x'], name='raster')
>>> raster['y'] = np.arange(n)[::-1]
>>> raster['x'] = np.arange(m)
>>> hillshade_agg = hillshade(raster)
>>> print(hillshade_agg)
<xarray.DataArray 'hillshade' (y: 5, x: 5)>
array([[ nan, nan, nan, nan, nan],
[ nan, 0.71130913, 0.44167341, 0.71130913, nan],
[ nan, 0.95550163, 0.71130913, 0.52478473, nan],
[ nan, 0.71130913, 0.88382559, 0.71130913, nan],
[ nan, nan, nan, nan, nan]])
Coordinates:
* y (y) int32 4 3 2 1 0
* x (x) int32 0 1 2 3 4<|endoftext|> |
e52db1a15b7ba944cbb46678ab95d52c7cb4cf8a94fdd0613ec6587ac454faf1 | def get_singleton(self):
'If the row only has one column, return that value; otherwise raise.\n\n Raises:\n ValueError, if count of columns is not 1.\n '
only_value = None
for value in six.itervalues(self.ordered_dict):
if (only_value is not None):
raise ValueError(('%r is not a singleton.' % self))
only_value = value
if ((only_value is self.__UnsetSentinel) or (only_value is None)):
raise ValueError(('%r is empty.' % self))
return only_value | If the row only has one column, return that value; otherwise raise.
Raises:
ValueError, if count of columns is not 1. | efilter/ext/row_tuple.py | get_singleton | Onager/dotty | 54 | python | def get_singleton(self):
'If the row only has one column, return that value; otherwise raise.\n\n Raises:\n ValueError, if count of columns is not 1.\n '
only_value = None
for value in six.itervalues(self.ordered_dict):
if (only_value is not None):
raise ValueError(('%r is not a singleton.' % self))
only_value = value
if ((only_value is self.__UnsetSentinel) or (only_value is None)):
raise ValueError(('%r is empty.' % self))
return only_value | def get_singleton(self):
'If the row only has one column, return that value; otherwise raise.\n\n Raises:\n ValueError, if count of columns is not 1.\n '
only_value = None
for value in six.itervalues(self.ordered_dict):
if (only_value is not None):
raise ValueError(('%r is not a singleton.' % self))
only_value = value
if ((only_value is self.__UnsetSentinel) or (only_value is None)):
raise ValueError(('%r is empty.' % self))
return only_value<|docstring|>If the row only has one column, return that value; otherwise raise.
Raises:
ValueError, if count of columns is not 1.<|endoftext|> |
0278d49adc8143160ab048ff5371fdfd224c05a2035a7095185aba63af281917 | @property
def ordered_values(self):
'Return a tuple of values in the order columns were specified.'
return tuple(iter(self)) | Return a tuple of values in the order columns were specified. | efilter/ext/row_tuple.py | ordered_values | Onager/dotty | 54 | python | @property
def ordered_values(self):
return tuple(iter(self)) | @property
def ordered_values(self):
return tuple(iter(self))<|docstring|>Return a tuple of values in the order columns were specified.<|endoftext|> |
f734413f720da01fb5f113a3818084deb3c09003eee056c28c8d41f923b562de | def refresh_cookie():
'\n 刷新搜狗cookie\n '
qs_dict = {'type': TYPE_ARTICLE, 's_from': 'input', 'query': chr(random.randint(19968, 40895)), 'page': 1, 'ie': 'utf8'}
url = f'{SOGOU_BASE_URL}/weixin?{urlencode(qs_dict)}'
resp = get(url)
suid = resp.cookies.get('SUID')
snuid = resp.cookies.get('SNUID')
HEADERS['Cookie'] = f'SNUID={snuid};SUID={suid};' | 刷新搜狗cookie | app/spider/spider.py | refresh_cookie | iszhouhua/weixin-sogou | 1 | python | def refresh_cookie():
'\n \n '
qs_dict = {'type': TYPE_ARTICLE, 's_from': 'input', 'query': chr(random.randint(19968, 40895)), 'page': 1, 'ie': 'utf8'}
url = f'{SOGOU_BASE_URL}/weixin?{urlencode(qs_dict)}'
resp = get(url)
suid = resp.cookies.get('SUID')
snuid = resp.cookies.get('SNUID')
HEADERS['Cookie'] = f'SNUID={snuid};SUID={suid};' | def refresh_cookie():
'\n \n '
qs_dict = {'type': TYPE_ARTICLE, 's_from': 'input', 'query': chr(random.randint(19968, 40895)), 'page': 1, 'ie': 'utf8'}
url = f'{SOGOU_BASE_URL}/weixin?{urlencode(qs_dict)}'
resp = get(url)
suid = resp.cookies.get('SUID')
snuid = resp.cookies.get('SNUID')
HEADERS['Cookie'] = f'SNUID={snuid};SUID={suid};'<|docstring|>刷新搜狗cookie<|endoftext|> |
143bbb3575127e9b8beb5b3d9eae8d989a6b3e996446f59c690f4c83abc1970e | def get(url, is_retry=True):
'发送请求\n\n Parameters\n ----------\n url : str\n 请求的链接\n is_retry : bool, optional\n 遭遇反爬虫时是否重试\n '
resp = requests.get(url, headers=HEADERS)
if (not resp.ok):
raise WeixinSogouException('搜狗接口请求失败.url:{}'.format(url), resp.status_code)
elif ('antispider' in resp.url):
if is_retry:
refresh_cookie()
return get(url, False)
raise AntiSpiderException('被搜狗识别为异常请求.', 403)
resp.encoding = 'utf-8'
return resp | 发送请求
Parameters
----------
url : str
请求的链接
is_retry : bool, optional
遭遇反爬虫时是否重试 | app/spider/spider.py | get | iszhouhua/weixin-sogou | 1 | python | def get(url, is_retry=True):
'发送请求\n\n Parameters\n ----------\n url : str\n 请求的链接\n is_retry : bool, optional\n 遭遇反爬虫时是否重试\n '
resp = requests.get(url, headers=HEADERS)
if (not resp.ok):
raise WeixinSogouException('搜狗接口请求失败.url:{}'.format(url), resp.status_code)
elif ('antispider' in resp.url):
if is_retry:
refresh_cookie()
return get(url, False)
raise AntiSpiderException('被搜狗识别为异常请求.', 403)
resp.encoding = 'utf-8'
return resp | def get(url, is_retry=True):
'发送请求\n\n Parameters\n ----------\n url : str\n 请求的链接\n is_retry : bool, optional\n 遭遇反爬虫时是否重试\n '
resp = requests.get(url, headers=HEADERS)
if (not resp.ok):
raise WeixinSogouException('搜狗接口请求失败.url:{}'.format(url), resp.status_code)
elif ('antispider' in resp.url):
if is_retry:
refresh_cookie()
return get(url, False)
raise AntiSpiderException('被搜狗识别为异常请求.', 403)
resp.encoding = 'utf-8'
return resp<|docstring|>发送请求
Parameters
----------
url : str
请求的链接
is_retry : bool, optional
遭遇反爬虫时是否重试<|endoftext|> |
eaccadf7a6aa2e5dff61e456b2c460b8b63ad7abbaf26843ca6d33cc0b96e5d7 | def search(keyword, page=1, search_type=TYPE_ARTICLE):
'搜索 文章\n\n Parameters\n ----------\n keyword : str or unicode\n 搜索文字\n search_type : int, optional\n 搜索类型 the default is 2\n page : int, optional\n 页数 the default is 1\n\n Returns\n -------\n list[ArticleList]\n or\n list[OfficialAccountList]\n\n Raises\n ------\n WeixinSogouException\n requests error\n '
qs_dict = {'type': search_type, 's_from': 'input', 'query': keyword, 'page': page, 'ie': 'utf8'}
url = f'{SOGOU_BASE_URL}/weixin?{urlencode(qs_dict)}'
resp = get(url)
data_list = (parse.get_article_by_search(resp.text) if (search_type == TYPE_ARTICLE) else parse.get_profile_by_search(resp.text))
if (not data_list):
logging.info(f'关键字【{keyword}】,第{page}页搜索内容为空.search_type:{search_type}')
return data_list | 搜索 文章
Parameters
----------
keyword : str or unicode
搜索文字
search_type : int, optional
搜索类型 the default is 2
page : int, optional
页数 the default is 1
Returns
-------
list[ArticleList]
or
list[OfficialAccountList]
Raises
------
WeixinSogouException
requests error | app/spider/spider.py | search | iszhouhua/weixin-sogou | 1 | python | def search(keyword, page=1, search_type=TYPE_ARTICLE):
'搜索 文章\n\n Parameters\n ----------\n keyword : str or unicode\n 搜索文字\n search_type : int, optional\n 搜索类型 the default is 2\n page : int, optional\n 页数 the default is 1\n\n Returns\n -------\n list[ArticleList]\n or\n list[OfficialAccountList]\n\n Raises\n ------\n WeixinSogouException\n requests error\n '
qs_dict = {'type': search_type, 's_from': 'input', 'query': keyword, 'page': page, 'ie': 'utf8'}
url = f'{SOGOU_BASE_URL}/weixin?{urlencode(qs_dict)}'
resp = get(url)
data_list = (parse.get_article_by_search(resp.text) if (search_type == TYPE_ARTICLE) else parse.get_profile_by_search(resp.text))
if (not data_list):
logging.info(f'关键字【{keyword}】,第{page}页搜索内容为空.search_type:{search_type}')
return data_list | def search(keyword, page=1, search_type=TYPE_ARTICLE):
'搜索 文章\n\n Parameters\n ----------\n keyword : str or unicode\n 搜索文字\n search_type : int, optional\n 搜索类型 the default is 2\n page : int, optional\n 页数 the default is 1\n\n Returns\n -------\n list[ArticleList]\n or\n list[OfficialAccountList]\n\n Raises\n ------\n WeixinSogouException\n requests error\n '
qs_dict = {'type': search_type, 's_from': 'input', 'query': keyword, 'page': page, 'ie': 'utf8'}
url = f'{SOGOU_BASE_URL}/weixin?{urlencode(qs_dict)}'
resp = get(url)
data_list = (parse.get_article_by_search(resp.text) if (search_type == TYPE_ARTICLE) else parse.get_profile_by_search(resp.text))
if (not data_list):
logging.info(f'关键字【{keyword}】,第{page}页搜索内容为空.search_type:{search_type}')
return data_list<|docstring|>搜索 文章
Parameters
----------
keyword : str or unicode
搜索文字
search_type : int, optional
搜索类型 the default is 2
page : int, optional
页数 the default is 1
Returns
-------
list[ArticleList]
or
list[OfficialAccountList]
Raises
------
WeixinSogouException
requests error<|endoftext|> |
f2e5ca6a327a726e46afe642478c5e53d35a7dac635d14aa2920906b7fed63f9 | def get_detail(url, request_type=TYPE_ARTICLE):
'根据临时链接获取文章内容\n\n Parameters\n ----------\n url : str or unicode\n 原文链接,临时链接\n request_type: int, optional\n 链接类型 the default is 2\n Returns\n -------\n ArticleDetail\n\n Raises\n ------\n WeixinSogouException\n '
if re.match('http(s?)://weixin\\.sogou\\.com/', url):
resp = get(url)
url = parse.get_wechat_url(resp.text)
resp = get(url)
parse.check_weixin_error(resp.text)
content_info = (parse.get_article_detail(resp.text) if (request_type == TYPE_ARTICLE) else parse.get_profile_detail(resp.text))
content_info.temp_url = resp.url
return content_info | 根据临时链接获取文章内容
Parameters
----------
url : str or unicode
原文链接,临时链接
request_type: int, optional
链接类型 the default is 2
Returns
-------
ArticleDetail
Raises
------
WeixinSogouException | app/spider/spider.py | get_detail | iszhouhua/weixin-sogou | 1 | python | def get_detail(url, request_type=TYPE_ARTICLE):
'根据临时链接获取文章内容\n\n Parameters\n ----------\n url : str or unicode\n 原文链接,临时链接\n request_type: int, optional\n 链接类型 the default is 2\n Returns\n -------\n ArticleDetail\n\n Raises\n ------\n WeixinSogouException\n '
if re.match('http(s?)://weixin\\.sogou\\.com/', url):
resp = get(url)
url = parse.get_wechat_url(resp.text)
resp = get(url)
parse.check_weixin_error(resp.text)
content_info = (parse.get_article_detail(resp.text) if (request_type == TYPE_ARTICLE) else parse.get_profile_detail(resp.text))
content_info.temp_url = resp.url
return content_info | def get_detail(url, request_type=TYPE_ARTICLE):
'根据临时链接获取文章内容\n\n Parameters\n ----------\n url : str or unicode\n 原文链接,临时链接\n request_type: int, optional\n 链接类型 the default is 2\n Returns\n -------\n ArticleDetail\n\n Raises\n ------\n WeixinSogouException\n '
if re.match('http(s?)://weixin\\.sogou\\.com/', url):
resp = get(url)
url = parse.get_wechat_url(resp.text)
resp = get(url)
parse.check_weixin_error(resp.text)
content_info = (parse.get_article_detail(resp.text) if (request_type == TYPE_ARTICLE) else parse.get_profile_detail(resp.text))
content_info.temp_url = resp.url
return content_info<|docstring|>根据临时链接获取文章内容
Parameters
----------
url : str or unicode
原文链接,临时链接
request_type: int, optional
链接类型 the default is 2
Returns
-------
ArticleDetail
Raises
------
WeixinSogouException<|endoftext|> |
7a90525da91ae4e01a7b7478c5f8bdafbf5fc79ffc9d9fc25e4f078f25065752 | def counting_sort(elements):
'\n Use the simple counting sort algorithm to sort the :param elements.\n :param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()\n :return: the sorted elements in increasing order\n '
length = len(elements)
if ((not length) or (length == 1)):
return elements
mini = maxi = elements[0]
for element in elements:
assert isinstance(element, int)
if (element < mini):
mini = element
if (element > maxi):
maxi = element
all_range = []
for i in range(((maxi - mini) + 1)):
all_range.append(0)
for element in elements:
all_range[(element - mini)] += 1
length = 0
for i in range(len(all_range)):
count = all_range[i]
while (count > 0):
elements[length] = (i + mini)
length += 1
count -= 1
return elements | Use the simple counting sort algorithm to sort the :param elements.
:param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()
:return: the sorted elements in increasing order | distribution.py | counting_sort | CxdInitial/sorting | 0 | python | def counting_sort(elements):
'\n Use the simple counting sort algorithm to sort the :param elements.\n :param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()\n :return: the sorted elements in increasing order\n '
length = len(elements)
if ((not length) or (length == 1)):
return elements
mini = maxi = elements[0]
for element in elements:
assert isinstance(element, int)
if (element < mini):
mini = element
if (element > maxi):
maxi = element
all_range = []
for i in range(((maxi - mini) + 1)):
all_range.append(0)
for element in elements:
all_range[(element - mini)] += 1
length = 0
for i in range(len(all_range)):
count = all_range[i]
while (count > 0):
elements[length] = (i + mini)
length += 1
count -= 1
return elements | def counting_sort(elements):
'\n Use the simple counting sort algorithm to sort the :param elements.\n :param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()\n :return: the sorted elements in increasing order\n '
length = len(elements)
if ((not length) or (length == 1)):
return elements
mini = maxi = elements[0]
for element in elements:
assert isinstance(element, int)
if (element < mini):
mini = element
if (element > maxi):
maxi = element
all_range = []
for i in range(((maxi - mini) + 1)):
all_range.append(0)
for element in elements:
all_range[(element - mini)] += 1
length = 0
for i in range(len(all_range)):
count = all_range[i]
while (count > 0):
elements[length] = (i + mini)
length += 1
count -= 1
return elements<|docstring|>Use the simple counting sort algorithm to sort the :param elements.
:param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()
:return: the sorted elements in increasing order<|endoftext|> |
549a1e9127e4d57d9cb4b588fd52784ffda5e3002f44a3f088a8aaf6459943f5 | def bucket_sort(elements, bucket_size=10):
"\n Use the simple bucket sort algorithm to sort the :param elements.\n :param bucket_size: the distribution buckets' size\n :param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()\n :return: the sorted elements in increasing order\n "
length = len(elements)
if ((not length) or (length == 1)):
return elements
mini = maxi = elements[0]
for element in elements:
assert isinstance(element, int)
if (element < mini):
mini = element
if (element > maxi):
maxi = element
buckets_size = (((maxi - mini) + 1) // bucket_size)
if (((maxi - mini) + 1) % bucket_size):
buckets_size += 1
buckets = []
for i in range(buckets_size):
buckets.append([None, None])
for element in elements:
index = (element // bucket_size)
ptr = buckets[index]
while (ptr[1] and (ptr[1][0] < element)):
ptr = ptr[1]
element = [element, ptr[1]]
ptr[1] = element
length = 0
for bucket in buckets:
ptr = bucket[1]
while ptr:
elements[length] = ptr[0]
length += 1
ptr = ptr[1]
return elements | Use the simple bucket sort algorithm to sort the :param elements.
:param bucket_size: the distribution buckets' size
:param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()
:return: the sorted elements in increasing order | distribution.py | bucket_sort | CxdInitial/sorting | 0 | python | def bucket_sort(elements, bucket_size=10):
"\n Use the simple bucket sort algorithm to sort the :param elements.\n :param bucket_size: the distribution buckets' size\n :param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()\n :return: the sorted elements in increasing order\n "
length = len(elements)
if ((not length) or (length == 1)):
return elements
mini = maxi = elements[0]
for element in elements:
assert isinstance(element, int)
if (element < mini):
mini = element
if (element > maxi):
maxi = element
buckets_size = (((maxi - mini) + 1) // bucket_size)
if (((maxi - mini) + 1) % bucket_size):
buckets_size += 1
buckets = []
for i in range(buckets_size):
buckets.append([None, None])
for element in elements:
index = (element // bucket_size)
ptr = buckets[index]
while (ptr[1] and (ptr[1][0] < element)):
ptr = ptr[1]
element = [element, ptr[1]]
ptr[1] = element
length = 0
for bucket in buckets:
ptr = bucket[1]
while ptr:
elements[length] = ptr[0]
length += 1
ptr = ptr[1]
return elements | def bucket_sort(elements, bucket_size=10):
"\n Use the simple bucket sort algorithm to sort the :param elements.\n :param bucket_size: the distribution buckets' size\n :param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()\n :return: the sorted elements in increasing order\n "
length = len(elements)
if ((not length) or (length == 1)):
return elements
mini = maxi = elements[0]
for element in elements:
assert isinstance(element, int)
if (element < mini):
mini = element
if (element > maxi):
maxi = element
buckets_size = (((maxi - mini) + 1) // bucket_size)
if (((maxi - mini) + 1) % bucket_size):
buckets_size += 1
buckets = []
for i in range(buckets_size):
buckets.append([None, None])
for element in elements:
index = (element // bucket_size)
ptr = buckets[index]
while (ptr[1] and (ptr[1][0] < element)):
ptr = ptr[1]
element = [element, ptr[1]]
ptr[1] = element
length = 0
for bucket in buckets:
ptr = bucket[1]
while ptr:
elements[length] = ptr[0]
length += 1
ptr = ptr[1]
return elements<|docstring|>Use the simple bucket sort algorithm to sort the :param elements.
:param bucket_size: the distribution buckets' size
:param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()
:return: the sorted elements in increasing order<|endoftext|> |
ba3e19300aa05926d1e203de495c99f5ba511add334e0937c2c97d66408f4e26 | def radix_sort(elements):
"\n Use the simple radix sort algorithm to sort the :param elements.\n :param bucket_size: the distribution buckets' size\n :param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()\n :return: the sorted elements in increasing order\n "
length = len(elements)
if ((not length) or (length == 1)):
return elements
maxi = elements[0]
for element in elements:
assert isinstance(element, int)
if (element > maxi):
maxi = element
bits = 0
while (maxi > 0):
maxi //= 10
bits += 1
all_range = [[], [], [], [], [], [], [], [], [], []]
for i in range(2):
for element in elements:
num = ((element // (10 ** i)) % 10)
all_range[num].append(element)
length = 0
for items in all_range:
while items:
elements[length] = items.pop(0)
length += 1
return elements | Use the simple radix sort algorithm to sort the :param elements.
:param bucket_size: the distribution buckets' size
:param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()
:return: the sorted elements in increasing order | distribution.py | radix_sort | CxdInitial/sorting | 0 | python | def radix_sort(elements):
"\n Use the simple radix sort algorithm to sort the :param elements.\n :param bucket_size: the distribution buckets' size\n :param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()\n :return: the sorted elements in increasing order\n "
length = len(elements)
if ((not length) or (length == 1)):
return elements
maxi = elements[0]
for element in elements:
assert isinstance(element, int)
if (element > maxi):
maxi = element
bits = 0
while (maxi > 0):
maxi //= 10
bits += 1
all_range = [[], [], [], [], [], [], [], [], [], []]
for i in range(2):
for element in elements:
num = ((element // (10 ** i)) % 10)
all_range[num].append(element)
length = 0
for items in all_range:
while items:
elements[length] = items.pop(0)
length += 1
return elements | def radix_sort(elements):
"\n Use the simple radix sort algorithm to sort the :param elements.\n :param bucket_size: the distribution buckets' size\n :param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()\n :return: the sorted elements in increasing order\n "
length = len(elements)
if ((not length) or (length == 1)):
return elements
maxi = elements[0]
for element in elements:
assert isinstance(element, int)
if (element > maxi):
maxi = element
bits = 0
while (maxi > 0):
maxi //= 10
bits += 1
all_range = [[], [], [], [], [], [], [], [], [], []]
for i in range(2):
for element in elements:
num = ((element // (10 ** i)) % 10)
all_range[num].append(element)
length = 0
for items in all_range:
while items:
elements[length] = items.pop(0)
length += 1
return elements<|docstring|>Use the simple radix sort algorithm to sort the :param elements.
:param bucket_size: the distribution buckets' size
:param elements: a integer sequence in which the function __get_item__ and __len__ were implemented()
:return: the sorted elements in increasing order<|endoftext|> |
3b9bfdac72ae7006d15214fb2736360190c6d318dc4dbce35e72bc5170f38b6d | def export(args):
'Export HySDS user rules.'
rules = {}
mozart_rules = mozart_es.query(index=USER_RULES_MOZART)
rules['mozart'] = [rule['_source'] for rule in mozart_rules]
logger.debug(('%d mozart user rules found' % len(mozart_rules)))
grq_rules = mozart_es.query(index=USER_RULES_GRQ)
rules['grq'] = [rule['_source'] for rule in grq_rules]
logger.debug(('%d grq user rules found' % len(grq_rules)))
logger.debug('rules: {}'.format(json.dumps(rules, indent=2)))
outfile = normpath(args.outfile)
export_dir = os.path.dirname(outfile)
logger.debug('export_dir: {}'.format(export_dir))
validate_dir(export_dir)
with open(outfile, 'w') as f:
json.dump(rules, f, indent=2, sort_keys=True) | Export HySDS user rules. | sdscli/adapters/hysds/rules.py | export | sdskit/sdscli | 0 | python | def export(args):
rules = {}
mozart_rules = mozart_es.query(index=USER_RULES_MOZART)
rules['mozart'] = [rule['_source'] for rule in mozart_rules]
logger.debug(('%d mozart user rules found' % len(mozart_rules)))
grq_rules = mozart_es.query(index=USER_RULES_GRQ)
rules['grq'] = [rule['_source'] for rule in grq_rules]
logger.debug(('%d grq user rules found' % len(grq_rules)))
logger.debug('rules: {}'.format(json.dumps(rules, indent=2)))
outfile = normpath(args.outfile)
export_dir = os.path.dirname(outfile)
logger.debug('export_dir: {}'.format(export_dir))
validate_dir(export_dir)
with open(outfile, 'w') as f:
json.dump(rules, f, indent=2, sort_keys=True) | def export(args):
rules = {}
mozart_rules = mozart_es.query(index=USER_RULES_MOZART)
rules['mozart'] = [rule['_source'] for rule in mozart_rules]
logger.debug(('%d mozart user rules found' % len(mozart_rules)))
grq_rules = mozart_es.query(index=USER_RULES_GRQ)
rules['grq'] = [rule['_source'] for rule in grq_rules]
logger.debug(('%d grq user rules found' % len(grq_rules)))
logger.debug('rules: {}'.format(json.dumps(rules, indent=2)))
outfile = normpath(args.outfile)
export_dir = os.path.dirname(outfile)
logger.debug('export_dir: {}'.format(export_dir))
validate_dir(export_dir)
with open(outfile, 'w') as f:
json.dump(rules, f, indent=2, sort_keys=True)<|docstring|>Export HySDS user rules.<|endoftext|> |
e23c25aa7effafd08fb4c56d7abda309f92a417c0a8d2246b165f1a0146d502c | def import_rules(args):
'\n Import HySDS user rules.\n rules json structure: {\n "mozart": [...],\n "grq": [...],\n }\n '
rules_file = normpath(args.file)
logger.debug('rules_file: {}'.format(rules_file))
if (not os.path.isfile(rules_file)):
logger.error("HySDS user rules file {} doesn't exist.".format(rules_file))
return 1
with open(rules_file) as f:
user_rules = json.load(f)
logger.debug('rules: {}'.format(json.dumps(rules_file, indent=2, sort_keys=True)))
for rule in user_rules['mozart']:
now = (datetime.utcnow().isoformat() + 'Z')
if (not rule.get('creation_time', None)):
rule['creation_time'] = now
if (not rule.get('modified_time', None)):
rule['modified_time'] = now
result = mozart_es.index_document(index=USER_RULES_MOZART, body=rule)
logger.debug(result)
for rule in user_rules['grq']:
now = (datetime.utcnow().isoformat() + 'Z')
if (not rule.get('creation_time', None)):
rule['creation_time'] = now
if (not rule.get('modified_time', None)):
rule['modified_time'] = now
result = mozart_es.index_document(index=USER_RULES_GRQ, body=rule)
logger.debug(result) | Import HySDS user rules.
rules json structure: {
"mozart": [...],
"grq": [...],
} | sdscli/adapters/hysds/rules.py | import_rules | sdskit/sdscli | 0 | python | def import_rules(args):
'\n Import HySDS user rules.\n rules json structure: {\n "mozart": [...],\n "grq": [...],\n }\n '
rules_file = normpath(args.file)
logger.debug('rules_file: {}'.format(rules_file))
if (not os.path.isfile(rules_file)):
logger.error("HySDS user rules file {} doesn't exist.".format(rules_file))
return 1
with open(rules_file) as f:
user_rules = json.load(f)
logger.debug('rules: {}'.format(json.dumps(rules_file, indent=2, sort_keys=True)))
for rule in user_rules['mozart']:
now = (datetime.utcnow().isoformat() + 'Z')
if (not rule.get('creation_time', None)):
rule['creation_time'] = now
if (not rule.get('modified_time', None)):
rule['modified_time'] = now
result = mozart_es.index_document(index=USER_RULES_MOZART, body=rule)
logger.debug(result)
for rule in user_rules['grq']:
now = (datetime.utcnow().isoformat() + 'Z')
if (not rule.get('creation_time', None)):
rule['creation_time'] = now
if (not rule.get('modified_time', None)):
rule['modified_time'] = now
result = mozart_es.index_document(index=USER_RULES_GRQ, body=rule)
logger.debug(result) | def import_rules(args):
'\n Import HySDS user rules.\n rules json structure: {\n "mozart": [...],\n "grq": [...],\n }\n '
rules_file = normpath(args.file)
logger.debug('rules_file: {}'.format(rules_file))
if (not os.path.isfile(rules_file)):
logger.error("HySDS user rules file {} doesn't exist.".format(rules_file))
return 1
with open(rules_file) as f:
user_rules = json.load(f)
logger.debug('rules: {}'.format(json.dumps(rules_file, indent=2, sort_keys=True)))
for rule in user_rules['mozart']:
now = (datetime.utcnow().isoformat() + 'Z')
if (not rule.get('creation_time', None)):
rule['creation_time'] = now
if (not rule.get('modified_time', None)):
rule['modified_time'] = now
result = mozart_es.index_document(index=USER_RULES_MOZART, body=rule)
logger.debug(result)
for rule in user_rules['grq']:
now = (datetime.utcnow().isoformat() + 'Z')
if (not rule.get('creation_time', None)):
rule['creation_time'] = now
if (not rule.get('modified_time', None)):
rule['modified_time'] = now
result = mozart_es.index_document(index=USER_RULES_GRQ, body=rule)
logger.debug(result)<|docstring|>Import HySDS user rules.
rules json structure: {
"mozart": [...],
"grq": [...],
}<|endoftext|> |
e62ebc786475b8b09ea0a3334750d6e94da829c1ed9aa8da70895ca6dae1a3f7 | def setup_platform(hass, config, add_entities, discovery_info=None):
'Set up Abode switch devices.'
import abodepy.helpers.constants as CONST
import abodepy.helpers.timeline as TIMELINE
data = hass.data[ABODE_DOMAIN]
devices = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_SWITCH):
if (data.is_excluded(device) or data.is_light(device)):
continue
devices.append(AbodeSwitch(data, device))
for automation in data.abode.get_automations(generic_type=CONST.TYPE_AUTOMATION):
if data.is_automation_excluded(automation):
continue
devices.append(AbodeAutomationSwitch(data, automation, TIMELINE.AUTOMATION_EDIT_GROUP))
data.devices.extend(devices)
add_entities(devices) | Set up Abode switch devices. | homeassistant/components/abode/switch.py | setup_platform | arsenalliu123ms/home-assistant | 4 | python | def setup_platform(hass, config, add_entities, discovery_info=None):
import abodepy.helpers.constants as CONST
import abodepy.helpers.timeline as TIMELINE
data = hass.data[ABODE_DOMAIN]
devices = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_SWITCH):
if (data.is_excluded(device) or data.is_light(device)):
continue
devices.append(AbodeSwitch(data, device))
for automation in data.abode.get_automations(generic_type=CONST.TYPE_AUTOMATION):
if data.is_automation_excluded(automation):
continue
devices.append(AbodeAutomationSwitch(data, automation, TIMELINE.AUTOMATION_EDIT_GROUP))
data.devices.extend(devices)
add_entities(devices) | def setup_platform(hass, config, add_entities, discovery_info=None):
import abodepy.helpers.constants as CONST
import abodepy.helpers.timeline as TIMELINE
data = hass.data[ABODE_DOMAIN]
devices = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_SWITCH):
if (data.is_excluded(device) or data.is_light(device)):
continue
devices.append(AbodeSwitch(data, device))
for automation in data.abode.get_automations(generic_type=CONST.TYPE_AUTOMATION):
if data.is_automation_excluded(automation):
continue
devices.append(AbodeAutomationSwitch(data, automation, TIMELINE.AUTOMATION_EDIT_GROUP))
data.devices.extend(devices)
add_entities(devices)<|docstring|>Set up Abode switch devices.<|endoftext|> |
d517112f9e510e2f3cb7504f16c816c7cb3e2ccc6f3e436d31f4474243a94331 | def turn_on(self, **kwargs):
'Turn on the device.'
self._device.switch_on() | Turn on the device. | homeassistant/components/abode/switch.py | turn_on | arsenalliu123ms/home-assistant | 4 | python | def turn_on(self, **kwargs):
self._device.switch_on() | def turn_on(self, **kwargs):
self._device.switch_on()<|docstring|>Turn on the device.<|endoftext|> |
cb91772b783480c174314816d75f65dc1f123197c3148a9d31ebd544362bff32 | def turn_off(self, **kwargs):
'Turn off the device.'
self._device.switch_off() | Turn off the device. | homeassistant/components/abode/switch.py | turn_off | arsenalliu123ms/home-assistant | 4 | python | def turn_off(self, **kwargs):
self._device.switch_off() | def turn_off(self, **kwargs):
self._device.switch_off()<|docstring|>Turn off the device.<|endoftext|> |
2cfa132a2eb2ac92d0fe207dd2260df497d2cdf09e0a0f7b0e2fa1256302ead1 | @property
def is_on(self):
'Return true if device is on.'
return self._device.is_on | Return true if device is on. | homeassistant/components/abode/switch.py | is_on | arsenalliu123ms/home-assistant | 4 | python | @property
def is_on(self):
return self._device.is_on | @property
def is_on(self):
return self._device.is_on<|docstring|>Return true if device is on.<|endoftext|> |
6590897eb80343e8159a3f7f370334927f8aba55271f8b72f63ff21d7f1708b9 | def turn_on(self, **kwargs):
'Turn on the device.'
self._automation.set_active(True) | Turn on the device. | homeassistant/components/abode/switch.py | turn_on | arsenalliu123ms/home-assistant | 4 | python | def turn_on(self, **kwargs):
self._automation.set_active(True) | def turn_on(self, **kwargs):
self._automation.set_active(True)<|docstring|>Turn on the device.<|endoftext|> |
316d1bdeef88856e9e067d9e5ad99bb394b65834baf053725aa7ddb746004ec7 | def turn_off(self, **kwargs):
'Turn off the device.'
self._automation.set_active(False) | Turn off the device. | homeassistant/components/abode/switch.py | turn_off | arsenalliu123ms/home-assistant | 4 | python | def turn_off(self, **kwargs):
self._automation.set_active(False) | def turn_off(self, **kwargs):
self._automation.set_active(False)<|docstring|>Turn off the device.<|endoftext|> |
fd7f8db10322373691a33950b9ed4f69026c5c31d0ae019565722d83c26deab6 | @property
def is_on(self):
'Return True if the binary sensor is on.'
return self._automation.is_active | Return True if the binary sensor is on. | homeassistant/components/abode/switch.py | is_on | arsenalliu123ms/home-assistant | 4 | python | @property
def is_on(self):
return self._automation.is_active | @property
def is_on(self):
return self._automation.is_active<|docstring|>Return True if the binary sensor is on.<|endoftext|> |
0a75011a7b86c941ec95f3ed4e2ad1f8c8274561fb73c12108d0b81fea5c98ce | def __init__(self, color, ks, kd, ka, shininess):
'\n ks -- specular reflection constant\n kd -- diffuse reflection constant\n ka -- ambient reflection constant\n shininess -- shininess constant\n '
self.ks = T.as_tensor_variable(ks)
self.kd = T.as_tensor_variable(kd)
self.ka = T.as_tensor_variable(ka)
self.color = T.as_tensor_variable(color)
self.shininess = T.as_tensor_variable(shininess) | ks -- specular reflection constant
kd -- diffuse reflection constant
ka -- ambient reflection constant
shininess -- shininess constant | scene.py | __init__ | lebek/reversible-raytracer | 15 | python | def __init__(self, color, ks, kd, ka, shininess):
'\n ks -- specular reflection constant\n kd -- diffuse reflection constant\n ka -- ambient reflection constant\n shininess -- shininess constant\n '
self.ks = T.as_tensor_variable(ks)
self.kd = T.as_tensor_variable(kd)
self.ka = T.as_tensor_variable(ka)
self.color = T.as_tensor_variable(color)
self.shininess = T.as_tensor_variable(shininess) | def __init__(self, color, ks, kd, ka, shininess):
'\n ks -- specular reflection constant\n kd -- diffuse reflection constant\n ka -- ambient reflection constant\n shininess -- shininess constant\n '
self.ks = T.as_tensor_variable(ks)
self.kd = T.as_tensor_variable(kd)
self.ka = T.as_tensor_variable(ka)
self.color = T.as_tensor_variable(color)
self.shininess = T.as_tensor_variable(shininess)<|docstring|>ks -- specular reflection constant
kd -- diffuse reflection constant
ka -- ambient reflection constant
shininess -- shininess constant<|endoftext|> |
80e58757172479ece9e2b294f43f80917e01272edc3b6767eb0bf53f6f7fb816 | @patch('charmhelpers.contrib.openstack.utils.lsb_release')
def test_os_codename_from_install_source(self, mocked_lsb):
'Test mapping install source to OpenStack release name'
mocked_lsb.return_value = FAKE_RELEASE
self.assertEquals(openstack.get_os_codename_install_source('distro'), 'essex')
self.assertEquals(openstack.get_os_codename_install_source('distro-proposed'), 'essex')
self.assertEquals(openstack.get_os_codename_install_source('proposed'), 'essex')
src = 'cloud:precise-grizzly'
self.assertEquals(openstack.get_os_codename_install_source(src), 'grizzly')
src = 'cloud:precise-grizzly/proposed'
self.assertEquals(openstack.get_os_codename_install_source(src), 'grizzly')
src = 'ppa:openstack-ubuntu-testing/havana-trunk-testing'
self.assertEquals(openstack.get_os_codename_install_source(src), 'havana')
src = 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-havana main'
self.assertEquals(openstack.get_os_codename_install_source(src), 'havana')
self.assertEquals(openstack.get_os_codename_install_source(None), '') | Test mapping install source to OpenStack release name | tests/contrib/openstack/test_openstack_utils.py | test_os_codename_from_install_source | AurelienLourot/charm-helpers | 15 | python | @patch('charmhelpers.contrib.openstack.utils.lsb_release')
def test_os_codename_from_install_source(self, mocked_lsb):
mocked_lsb.return_value = FAKE_RELEASE
self.assertEquals(openstack.get_os_codename_install_source('distro'), 'essex')
self.assertEquals(openstack.get_os_codename_install_source('distro-proposed'), 'essex')
self.assertEquals(openstack.get_os_codename_install_source('proposed'), 'essex')
src = 'cloud:precise-grizzly'
self.assertEquals(openstack.get_os_codename_install_source(src), 'grizzly')
src = 'cloud:precise-grizzly/proposed'
self.assertEquals(openstack.get_os_codename_install_source(src), 'grizzly')
src = 'ppa:openstack-ubuntu-testing/havana-trunk-testing'
self.assertEquals(openstack.get_os_codename_install_source(src), 'havana')
src = 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-havana main'
self.assertEquals(openstack.get_os_codename_install_source(src), 'havana')
self.assertEquals(openstack.get_os_codename_install_source(None), ) | @patch('charmhelpers.contrib.openstack.utils.lsb_release')
def test_os_codename_from_install_source(self, mocked_lsb):
mocked_lsb.return_value = FAKE_RELEASE
self.assertEquals(openstack.get_os_codename_install_source('distro'), 'essex')
self.assertEquals(openstack.get_os_codename_install_source('distro-proposed'), 'essex')
self.assertEquals(openstack.get_os_codename_install_source('proposed'), 'essex')
src = 'cloud:precise-grizzly'
self.assertEquals(openstack.get_os_codename_install_source(src), 'grizzly')
src = 'cloud:precise-grizzly/proposed'
self.assertEquals(openstack.get_os_codename_install_source(src), 'grizzly')
src = 'ppa:openstack-ubuntu-testing/havana-trunk-testing'
self.assertEquals(openstack.get_os_codename_install_source(src), 'havana')
src = 'deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-havana main'
self.assertEquals(openstack.get_os_codename_install_source(src), 'havana')
self.assertEquals(openstack.get_os_codename_install_source(None), )<|docstring|>Test mapping install source to OpenStack release name<|endoftext|> |
90f11eb6c9d8ede6e87457b66665b7cffc1ccf488fd0e85658538b7d764e2c2e | @patch('charmhelpers.contrib.openstack.utils.lsb_release')
def test_os_codename_from_bad_install_source(self, mocked_lsb):
'Test mapping install source to OpenStack release name'
_fake_release = copy(FAKE_RELEASE)
_fake_release['DISTRIB_CODENAME'] = 'natty'
mocked_lsb.return_value = _fake_release
_e = 'charmhelpers.contrib.openstack.utils.error_out'
with patch(_e) as mocked_err:
openstack.get_os_codename_install_source('distro')
_er = 'Could not derive openstack release for this Ubuntu release: natty'
mocked_err.assert_called_with(_er) | Test mapping install source to OpenStack release name | tests/contrib/openstack/test_openstack_utils.py | test_os_codename_from_bad_install_source | AurelienLourot/charm-helpers | 15 | python | @patch('charmhelpers.contrib.openstack.utils.lsb_release')
def test_os_codename_from_bad_install_source(self, mocked_lsb):
_fake_release = copy(FAKE_RELEASE)
_fake_release['DISTRIB_CODENAME'] = 'natty'
mocked_lsb.return_value = _fake_release
_e = 'charmhelpers.contrib.openstack.utils.error_out'
with patch(_e) as mocked_err:
openstack.get_os_codename_install_source('distro')
_er = 'Could not derive openstack release for this Ubuntu release: natty'
mocked_err.assert_called_with(_er) | @patch('charmhelpers.contrib.openstack.utils.lsb_release')
def test_os_codename_from_bad_install_source(self, mocked_lsb):
_fake_release = copy(FAKE_RELEASE)
_fake_release['DISTRIB_CODENAME'] = 'natty'
mocked_lsb.return_value = _fake_release
_e = 'charmhelpers.contrib.openstack.utils.error_out'
with patch(_e) as mocked_err:
openstack.get_os_codename_install_source('distro')
_er = 'Could not derive openstack release for this Ubuntu release: natty'
mocked_err.assert_called_with(_er)<|docstring|>Test mapping install source to OpenStack release name<|endoftext|> |
d3ad4c7066718383b8c86e6c4ca6aee19bb8dba45663cb3763c714f91fcc2f52 | def test_os_codename_from_version(self):
'Test mapping OpenStack numerical versions to code name'
self.assertEquals(openstack.get_os_codename_version('2013.1'), 'grizzly') | Test mapping OpenStack numerical versions to code name | tests/contrib/openstack/test_openstack_utils.py | test_os_codename_from_version | AurelienLourot/charm-helpers | 15 | python | def test_os_codename_from_version(self):
self.assertEquals(openstack.get_os_codename_version('2013.1'), 'grizzly') | def test_os_codename_from_version(self):
self.assertEquals(openstack.get_os_codename_version('2013.1'), 'grizzly')<|docstring|>Test mapping OpenStack numerical versions to code name<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.