hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f7081f73c9aff0c80634d1d71479fed9e3fe62cf
2,275
py
Python
AppPkg/Applications/Python/Python-2.7.2/Lib/email/iterators.py
CEOALT1/RefindPlusUDK
116b957ad735f96fbb6d80a0ba582046960ba164
[ "BSD-2-Clause" ]
2,757
2018-04-28T21:41:36.000Z
2022-03-29T06:33:36.000Z
AppPkg/Applications/Python/Python-2.7.2/Lib/email/iterators.py
CEOALT1/RefindPlusUDK
116b957ad735f96fbb6d80a0ba582046960ba164
[ "BSD-2-Clause" ]
20
2019-07-23T15:29:32.000Z
2022-01-21T12:53:04.000Z
AppPkg/Applications/Python/Python-2.7.2/Lib/email/iterators.py
CEOALT1/RefindPlusUDK
116b957ad735f96fbb6d80a0ba582046960ba164
[ "BSD-2-Clause" ]
449
2018-05-09T05:54:05.000Z
2022-03-30T14:54:18.000Z
# Copyright (C) 2001-2006 Python Software Foundation # Author: Barry Warsaw # Contact: email-sig@python.org """Various types of useful iterators and generators.""" __all__ = [ 'body_line_iterator', 'typed_subpart_iterator', 'walk', # Do not include _structure() since it's part of the debugging API. ] import sys from cStringIO import StringIO # This function will become a method of the Message class def walk(self): """Walk over the message tree, yielding each subpart. The walk is performed in depth-first order. This method is a generator. """ yield self if self.is_multipart(): for subpart in self.get_payload(): for subsubpart in subpart.walk(): yield subsubpart # These two functions are imported into the Iterators.py interface module. def body_line_iterator(msg, decode=False): """Iterate over the parts, returning string payloads line-by-line. Optional decode (default False) is passed through to .get_payload(). """ for subpart in msg.walk(): payload = subpart.get_payload(decode=decode) if isinstance(payload, basestring): for line in StringIO(payload): yield line def typed_subpart_iterator(msg, maintype='text', subtype=None): """Iterate over the subparts with a given MIME type. Use `maintype' as the main MIME type to match against; this defaults to "text". Optional `subtype' is the MIME subtype to match against; if omitted, only the main type is matched. """ for subpart in msg.walk(): if subpart.get_content_maintype() == maintype: if subtype is None or subpart.get_content_subtype() == subtype: yield subpart def _structure(msg, fp=None, level=0, include_default=False): """A handy debugging aid""" if fp is None: fp = sys.stdout tab = ' ' * (level * 4) print >> fp, tab + msg.get_content_type(), if include_default: print >> fp, '[%s]' % msg.get_default_type() else: print >> fp if msg.is_multipart(): for subpart in msg.get_payload(): _structure(subpart, fp, level+1, include_default)
30.743243
76
0.634286
__all__ = [ 'body_line_iterator', 'typed_subpart_iterator', 'walk', ] import sys from cStringIO import StringIO # This function will become a method of the Message class def walk(self): yield self if self.is_multipart(): for subpart in self.get_payload(): for subsubpart in subpart.walk(): yield subsubpart # These two functions are imported into the Iterators.py interface module. def body_line_iterator(msg, decode=False): for subpart in msg.walk(): payload = subpart.get_payload(decode=decode) if isinstance(payload, basestring): for line in StringIO(payload): yield line def typed_subpart_iterator(msg, maintype='text', subtype=None): for subpart in msg.walk(): if subpart.get_content_maintype() == maintype: if subtype is None or subpart.get_content_subtype() == subtype: yield subpart def _structure(msg, fp=None, level=0, include_default=False): if fp is None: fp = sys.stdout tab = ' ' * (level * 4) print >> fp, tab + msg.get_content_type(), if include_default: print >> fp, '[%s]' % msg.get_default_type() else: print >> fp if msg.is_multipart(): for subpart in msg.get_payload(): _structure(subpart, fp, level+1, include_default)
true
true
f7081fa18c16e5bb9c85414fdde284ae2f611859
766
py
Python
docs/components_page/components/popover/popover.py
imrehg/dash-bootstrap-components
7cf43168808bb88b243e414168dc3bf196fefd84
[ "Apache-2.0" ]
1
2022-01-12T12:36:20.000Z
2022-01-12T12:36:20.000Z
docs/components_page/components/popover/popover.py
imrehg/dash-bootstrap-components
7cf43168808bb88b243e414168dc3bf196fefd84
[ "Apache-2.0" ]
null
null
null
docs/components_page/components/popover/popover.py
imrehg/dash-bootstrap-components
7cf43168808bb88b243e414168dc3bf196fefd84
[ "Apache-2.0" ]
null
null
null
import dash_bootstrap_components as dbc import dash_html_components as html from dash.dependencies import Input, Output, State popover = html.Div( [ dbc.Button( "Click to toggle popover", id="popover-target", color="danger" ), dbc.Popover( [ dbc.PopoverHeader("Popover header"), dbc.PopoverBody("And here's some amazing content. Cool!"), ], id="popover", is_open=False, target="popover-target", ), ] ) @app.callback( Output("popover", "is_open"), [Input("popover-target", "n_clicks")], [State("popover", "is_open")], ) def toggle_popover(n, is_open): if n: return not is_open return is_open
23.9375
74
0.567885
import dash_bootstrap_components as dbc import dash_html_components as html from dash.dependencies import Input, Output, State popover = html.Div( [ dbc.Button( "Click to toggle popover", id="popover-target", color="danger" ), dbc.Popover( [ dbc.PopoverHeader("Popover header"), dbc.PopoverBody("And here's some amazing content. Cool!"), ], id="popover", is_open=False, target="popover-target", ), ] ) @app.callback( Output("popover", "is_open"), [Input("popover-target", "n_clicks")], [State("popover", "is_open")], ) def toggle_popover(n, is_open): if n: return not is_open return is_open
true
true
f708206218289bf8000412c8894fb2157df7e624
1,891
py
Python
count.py
romain-fontugne/roa-counter
35413f036a0a75088ae318dfa3df58b3cbce6095
[ "Apache-2.0" ]
1
2022-01-24T03:09:00.000Z
2022-01-24T03:09:00.000Z
count.py
romain-fontugne/roa-counter
35413f036a0a75088ae318dfa3df58b3cbce6095
[ "Apache-2.0" ]
null
null
null
count.py
romain-fontugne/roa-counter
35413f036a0a75088ae318dfa3df58b3cbce6095
[ "Apache-2.0" ]
null
null
null
from datetime import datetime from matplotlib import pylab as plt from requests_cache import CachedSession CACHE_EXPIRATION_SECS = 3600*24*356 YEAR_RANGE = range(2018, 2022) MARKERS = ["o", "s", "d", "+", "*"] RIRS = { 'AFRINIC': { 'url': 'https://ftp.ripe.net/ripe/rpki/afrinic.tal/', 'marker': 'o', }, 'APNIC': { 'url': 'https://ftp.ripe.net/ripe/rpki/apnic.tal/', 'marker': 's', }, 'ARIN': { 'url': 'https://ftp.ripe.net/ripe/rpki/arin.tal/', 'marker': 'd' }, 'LACNIC': { 'url': 'https://ftp.ripe.net/ripe/rpki/lacnic.tal/', 'marker': '+', }, 'RIPE': { 'url': 'https://ftp.ripe.net/ripe/rpki/ripencc.tal/', 'marker': '*', } } session = CachedSession(ExpirationTime = CACHE_EXPIRATION_SECS) plt.figure(figsize=(7,4)) for rir, rir_info in RIRS.items(): x = [] y = [] for year in YEAR_RANGE: for month in range(1,13): roa_count = -1 # skip the header parsed_url = f'{rir_info["url"]}/{year}/{month:02d}/15/roas.csv' csv = session.get( parsed_url ) if csv.status_code != 200: print(parsed_url) print(csv.status_code) continue for line in csv.iter_lines(decode_unicode=True): roa_count += 1 if roa_count > 0: x.append( datetime(year, month, 15) ) y.append( roa_count ) plt.plot(x, y, label=rir, marker=rir_info['marker']) plt.grid( True ) plt.legend() plt.ylabel('Number of ROAs') plt.xticks(rotation=45) plt.tight_layout() plt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.png') plt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.pdf')
28.223881
76
0.523533
from datetime import datetime from matplotlib import pylab as plt from requests_cache import CachedSession CACHE_EXPIRATION_SECS = 3600*24*356 YEAR_RANGE = range(2018, 2022) MARKERS = ["o", "s", "d", "+", "*"] RIRS = { 'AFRINIC': { 'url': 'https://ftp.ripe.net/ripe/rpki/afrinic.tal/', 'marker': 'o', }, 'APNIC': { 'url': 'https://ftp.ripe.net/ripe/rpki/apnic.tal/', 'marker': 's', }, 'ARIN': { 'url': 'https://ftp.ripe.net/ripe/rpki/arin.tal/', 'marker': 'd' }, 'LACNIC': { 'url': 'https://ftp.ripe.net/ripe/rpki/lacnic.tal/', 'marker': '+', }, 'RIPE': { 'url': 'https://ftp.ripe.net/ripe/rpki/ripencc.tal/', 'marker': '*', } } session = CachedSession(ExpirationTime = CACHE_EXPIRATION_SECS) plt.figure(figsize=(7,4)) for rir, rir_info in RIRS.items(): x = [] y = [] for year in YEAR_RANGE: for month in range(1,13): roa_count = -1 parsed_url = f'{rir_info["url"]}/{year}/{month:02d}/15/roas.csv' csv = session.get( parsed_url ) if csv.status_code != 200: print(parsed_url) print(csv.status_code) continue for line in csv.iter_lines(decode_unicode=True): roa_count += 1 if roa_count > 0: x.append( datetime(year, month, 15) ) y.append( roa_count ) plt.plot(x, y, label=rir, marker=rir_info['marker']) plt.grid( True ) plt.legend() plt.ylabel('Number of ROAs') plt.xticks(rotation=45) plt.tight_layout() plt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.png') plt.savefig(f'roa_count_{YEAR_RANGE[0]}_{YEAR_RANGE[-1]}.pdf')
true
true
f7082128eda5b64f709d36b4c5d806504c82228c
11,398
py
Python
python_poc/adapters/modbus_generic_adapter.py
pervcomp/Procem
6cefbf6c81b51af948feb9510d39820f8e6f113e
[ "MIT" ]
1
2019-01-09T14:38:44.000Z
2019-01-09T14:38:44.000Z
python_poc/adapters/modbus_generic_adapter.py
pervcomp/Procem
6cefbf6c81b51af948feb9510d39820f8e6f113e
[ "MIT" ]
4
2021-03-09T00:03:21.000Z
2022-02-12T05:33:21.000Z
python_poc/adapters/modbus_generic_adapter.py
pervcomp/Procem
6cefbf6c81b51af948feb9510d39820f8e6f113e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """This module contains an adapter for reading Modbus data and sending it to Procem RTL worker.""" # Copyright (c) TUT Tampere University of Technology 2015-2018. # This software has been developed in Procem-project funded by Business Finland. # This code is licensed under the MIT license. # See the LICENSE.txt in the project root for the license terms. # # Main author(s): Ville Heikkila, Otto Hylli, Pekka Itavuo, # Teemu Laukkarinen ja Ulla-Talvikki Virta from pymodbus.client.sync import ModbusTcpClient as ModbusClient import datetime import queue import socket import threading import time import sys try: import adapters.common_utils as common_utils import adapters.modbus_generic_model as modbus_generic_model import adapters.modbus_utils as modbus_utils except: # used when running the module directly import common_utils import modbus_generic_model import modbus_utils PROCEM_SERVER_IP = common_utils.PROCEM_SERVER_IP PROCEM_SERVER_PORT = common_utils.PROCEM_SERVER_PORT # maximum size for UDP payload. Current value taken from mxelectrix_adapter UDP_MAX_SIZE = common_utils.UDP_MAX_SIZE # To reduce UDP traffic buffer the data sending to procem_rtl using this global queue data_queue = queue.Queue() # The default names of the configuration files from where the data model information is read CONFIG_SCHEME_FILE_NAME = "modbus_solarplant_config.json" MEASUREMENT_ID_FILE_NAME = "Solar_Plant_measurement_IDs_v3.csv" # The default name of the model file MODEL_NAME = "modbus_generic_model" # The supported register types and their reading functions SUPPORTED_REGISTER_TYPES = { "input": "read_input_registers", "holding": "read_holding_registers" } def readRegisterGroups(client, device, register_groups, measurement_queue): """Reads several groups of Modbus registers and sends the data to Procem RTL.""" delay = device.delay / 1000 # time interval between reading different registers in seconds unitid = device.unit_id for register_group in register_groups: start_register = register_group.start_register register_count = register_group.register_count register_type = register_group.type if register_type not in SUPPORTED_REGISTER_TYPES: print(common_utils.getTimeString(), "ERROR: Register type", register_type, "not supported") continue try: # time1 = time.time() current_start_register = start_register current_register_count_max = register_count current_register_count = current_register_count_max received_registers = [] timestamps = [] while len(received_registers) < register_count and current_register_count > 0: # time1 = time.time() resp = getattr(client, SUPPORTED_REGISTER_TYPES[register_type])( current_start_register, current_register_count, unit=unitid) tm = int(round(time.time() * 1000)) # time2 = time.time() if getattr(resp, "registers", None) is None: # print("failed: ", unitid, ": ", current_start_register, "-", # current_start_register + current_register_count - 1, # " (", current_register_count, "), read time: ", time2 - time1, sep="") current_register_count_max //= 2 else: # print("success: ", unitid, ": ", current_start_register, "-", # current_start_register + current_register_count - 1, # " (", current_register_count, "), read time: ", time2 - time1, sep="") received_registers += resp.registers timestamps += [tm] * len(resp.registers) current_start_register += len(resp.registers) if len(received_registers) < register_count: old_end_register = current_start_register - 1 (current_start_register, current_end_register) = register_group.getPart( current_start_register, current_register_count_max) if current_start_register is None or current_end_register is None: current_register_count = 0 else: skipped_registers = current_start_register - old_end_register - 1 if skipped_registers > 0: received_registers += [0] * skipped_registers timestamps += [tm] * skipped_registers current_register_count = current_end_register - current_start_register + 1 if len(received_registers) < register_count and current_register_count > 0: time.sleep(delay / 10) if len(received_registers) >= register_count: # time2 = time.time() # print("success: ", unitid, ": registers: ", start_register, "-", # start_register + register_count - 1, ", read time: ", time2 - time1, sep="") measurement_queue.put({ "register_group": register_group, "response_data": received_registers, "timestamps": timestamps}) else: print(common_utils.getTimeString(), " ERROR: (", device.ip, ", ", unitid, "): Failure to read registers: ", start_register, "-", start_register + register_count - 1, " (", resp, ")", sep="") except Exception as error: print(common_utils.getTimeString(), " ERROR: could not read registers ", start_register, "-", start_register + register_count - 1, " from (", device.ip, ", ", unitid, ")", sep="") print(error) # Sleep for a little bit before reading the next register group time.sleep(delay) def sendMeasurementsToProcem(device, measurement_queue): """Sends a collection of measurements to Procem RTL.""" while True: measurement = measurement_queue.get() if measurement is None: break register_group = measurement["register_group"] response_data = measurement["response_data"] timestamps = measurement["timestamps"] for register_id, count in register_group.registers: index = register_id - register_group.start_register register_type = device.registers[register_id] register_values = response_data[index:index + count] timestamp = timestamps[index + count - 1] # parse the data and create a Procem packet and put it in the data queue new_pkt = modbus_utils.getProcemRTLpkt(register_values, register_type, timestamp) data_queue.put(new_pkt) def ModBusWorker(device): """Reads registers periodically from a Modbus device and sends the data to Procem RTL.""" ip = device.ip port = device.port source_ip = device.source_ip source_port = device.source_port interval = device.interval / 1000 # time interval between reading the same register in seconds start_time = time.time() # start the measurement handling thread measurement_queue = queue.Queue() threading.Thread( target=sendMeasurementsToProcem, kwargs={"device": device, "measurement_queue": measurement_queue}, daemon=True).start() kwargs = {"host": ip} if port is not None: kwargs["port"] = port if source_ip is not None: kwargs["source_address"] = (source_ip, source_port) kwargs["timeout"] = 300 client = ModbusClient(**kwargs) client.connect() print(common_utils.getTimeString(), "INFO: Connected ModBus server at address " + ip) try: # First handle the read once registers readRegisterGroups(client, device, device.read_once_groups, measurement_queue) loop_count = 0 day = datetime.date.today().day start_time = time.time() while True: current_day = datetime.date.today().day # Handle the read once register again if it is a new day if current_day != day: print(common_utils.getTimeString(), loop_count, "packages sent from", ip, "on day", day) readRegisterGroups(client, device, device.read_once_groups, measurement_queue) day = current_day start_time += loop_count * interval loop_count = 0 current_time = time.time() sleep_time = loop_count * interval - (current_time - start_time) if sleep_time > 0: time.sleep(sleep_time) readRegisterGroups(client, device, device.groups, measurement_queue) loop_count += 1 # Print the send information once in an hour. if loop_count % 3600 == 0: print(common_utils.getTimeString(), loop_count, "packages sent from", ip, "on day", day) except OSError as err: print(common_utils.getTimeString(), "ERROR: unexpected behavior in thread of IP", ip, "error was", err) finally: # sleep for safety? client.close() print(common_utils.getTimeString(), "INFO: Closing the connection to ModBus server") def startModBusAdapter(data_model): """Starts separate worker thread for each Modbus device in the data model.""" devices = data_model.devices for device_id, device in devices.items(): print(common_utils.getTimeString(), "Starting thread for device", device_id) threading.Thread(target=ModBusWorker, kwargs={"device": device}, daemon=True).start() if __name__ == "__main__": if len(sys.argv) == 3: CONFIG_SCHEME_FILE_NAME = sys.argv[1] MEASUREMENT_ID_FILE_NAME = sys.argv[2] elif len(sys.argv) != 1: print("Start this adapter with 'python3", sys.argv[0], "config_scheme.json measurement_ids.csv' command") print("or use 'python3 ", sys.argv[0], "' to use the default configuration.", sep="") quit() print(common_utils.getTimeString(), "Reading modbus configurations from", CONFIG_SCHEME_FILE_NAME, "and", MEASUREMENT_ID_FILE_NAME) # Read the model name from the configuration file config = common_utils.readConfig(CONFIG_SCHEME_FILE_NAME) MODEL_NAME = config.get("model_name", MODEL_NAME) # import the correct model code and load the model information from the configuration files model = __import__(MODEL_NAME) field_info_class = getattr(model, "getFieldStorage") create_measurement_function = getattr(model, "getCreateFunction") data_model = modbus_generic_model.loadModel( config_filename=CONFIG_SCHEME_FILE_NAME, csv_filename=MEASUREMENT_ID_FILE_NAME, field_model=field_info_class(), create_measurement=create_measurement_function()) # start the Procem send worker that takes the values from data_queue and sends them to procem_rtl threading.Thread(target=common_utils.procemSendWorker, kwargs={"data_queue": data_queue}).start() startModBusAdapter(data_model) while True: txt = input("Press enter key to end:\n\r") if not txt: data_queue.put(None) break
43.503817
113
0.650465
from pymodbus.client.sync import ModbusTcpClient as ModbusClient import datetime import queue import socket import threading import time import sys try: import adapters.common_utils as common_utils import adapters.modbus_generic_model as modbus_generic_model import adapters.modbus_utils as modbus_utils except: import common_utils import modbus_generic_model import modbus_utils PROCEM_SERVER_IP = common_utils.PROCEM_SERVER_IP PROCEM_SERVER_PORT = common_utils.PROCEM_SERVER_PORT UDP_MAX_SIZE = common_utils.UDP_MAX_SIZE data_queue = queue.Queue() CONFIG_SCHEME_FILE_NAME = "modbus_solarplant_config.json" MEASUREMENT_ID_FILE_NAME = "Solar_Plant_measurement_IDs_v3.csv" MODEL_NAME = "modbus_generic_model" SUPPORTED_REGISTER_TYPES = { "input": "read_input_registers", "holding": "read_holding_registers" } def readRegisterGroups(client, device, register_groups, measurement_queue): delay = device.delay / 1000 unitid = device.unit_id for register_group in register_groups: start_register = register_group.start_register register_count = register_group.register_count register_type = register_group.type if register_type not in SUPPORTED_REGISTER_TYPES: print(common_utils.getTimeString(), "ERROR: Register type", register_type, "not supported") continue try: current_start_register = start_register current_register_count_max = register_count current_register_count = current_register_count_max received_registers = [] timestamps = [] while len(received_registers) < register_count and current_register_count > 0: resp = getattr(client, SUPPORTED_REGISTER_TYPES[register_type])( current_start_register, current_register_count, unit=unitid) tm = int(round(time.time() * 1000)) if getattr(resp, "registers", None) is None: current_register_count_max //= 2 else: received_registers += resp.registers timestamps += [tm] * len(resp.registers) current_start_register += len(resp.registers) if len(received_registers) < register_count: old_end_register = current_start_register - 1 (current_start_register, current_end_register) = register_group.getPart( current_start_register, current_register_count_max) if current_start_register is None or current_end_register is None: current_register_count = 0 else: skipped_registers = current_start_register - old_end_register - 1 if skipped_registers > 0: received_registers += [0] * skipped_registers timestamps += [tm] * skipped_registers current_register_count = current_end_register - current_start_register + 1 if len(received_registers) < register_count and current_register_count > 0: time.sleep(delay / 10) if len(received_registers) >= register_count: measurement_queue.put({ "register_group": register_group, "response_data": received_registers, "timestamps": timestamps}) else: print(common_utils.getTimeString(), " ERROR: (", device.ip, ", ", unitid, "): Failure to read registers: ", start_register, "-", start_register + register_count - 1, " (", resp, ")", sep="") except Exception as error: print(common_utils.getTimeString(), " ERROR: could not read registers ", start_register, "-", start_register + register_count - 1, " from (", device.ip, ", ", unitid, ")", sep="") print(error) time.sleep(delay) def sendMeasurementsToProcem(device, measurement_queue): while True: measurement = measurement_queue.get() if measurement is None: break register_group = measurement["register_group"] response_data = measurement["response_data"] timestamps = measurement["timestamps"] for register_id, count in register_group.registers: index = register_id - register_group.start_register register_type = device.registers[register_id] register_values = response_data[index:index + count] timestamp = timestamps[index + count - 1] new_pkt = modbus_utils.getProcemRTLpkt(register_values, register_type, timestamp) data_queue.put(new_pkt) def ModBusWorker(device): ip = device.ip port = device.port source_ip = device.source_ip source_port = device.source_port interval = device.interval / 1000 start_time = time.time() measurement_queue = queue.Queue() threading.Thread( target=sendMeasurementsToProcem, kwargs={"device": device, "measurement_queue": measurement_queue}, daemon=True).start() kwargs = {"host": ip} if port is not None: kwargs["port"] = port if source_ip is not None: kwargs["source_address"] = (source_ip, source_port) kwargs["timeout"] = 300 client = ModbusClient(**kwargs) client.connect() print(common_utils.getTimeString(), "INFO: Connected ModBus server at address " + ip) try: readRegisterGroups(client, device, device.read_once_groups, measurement_queue) loop_count = 0 day = datetime.date.today().day start_time = time.time() while True: current_day = datetime.date.today().day if current_day != day: print(common_utils.getTimeString(), loop_count, "packages sent from", ip, "on day", day) readRegisterGroups(client, device, device.read_once_groups, measurement_queue) day = current_day start_time += loop_count * interval loop_count = 0 current_time = time.time() sleep_time = loop_count * interval - (current_time - start_time) if sleep_time > 0: time.sleep(sleep_time) readRegisterGroups(client, device, device.groups, measurement_queue) loop_count += 1 if loop_count % 3600 == 0: print(common_utils.getTimeString(), loop_count, "packages sent from", ip, "on day", day) except OSError as err: print(common_utils.getTimeString(), "ERROR: unexpected behavior in thread of IP", ip, "error was", err) finally: client.close() print(common_utils.getTimeString(), "INFO: Closing the connection to ModBus server") def startModBusAdapter(data_model): devices = data_model.devices for device_id, device in devices.items(): print(common_utils.getTimeString(), "Starting thread for device", device_id) threading.Thread(target=ModBusWorker, kwargs={"device": device}, daemon=True).start() if __name__ == "__main__": if len(sys.argv) == 3: CONFIG_SCHEME_FILE_NAME = sys.argv[1] MEASUREMENT_ID_FILE_NAME = sys.argv[2] elif len(sys.argv) != 1: print("Start this adapter with 'python3", sys.argv[0], "config_scheme.json measurement_ids.csv' command") print("or use 'python3 ", sys.argv[0], "' to use the default configuration.", sep="") quit() print(common_utils.getTimeString(), "Reading modbus configurations from", CONFIG_SCHEME_FILE_NAME, "and", MEASUREMENT_ID_FILE_NAME) config = common_utils.readConfig(CONFIG_SCHEME_FILE_NAME) MODEL_NAME = config.get("model_name", MODEL_NAME) model = __import__(MODEL_NAME) field_info_class = getattr(model, "getFieldStorage") create_measurement_function = getattr(model, "getCreateFunction") data_model = modbus_generic_model.loadModel( config_filename=CONFIG_SCHEME_FILE_NAME, csv_filename=MEASUREMENT_ID_FILE_NAME, field_model=field_info_class(), create_measurement=create_measurement_function()) threading.Thread(target=common_utils.procemSendWorker, kwargs={"data_queue": data_queue}).start() startModBusAdapter(data_model) while True: txt = input("Press enter key to end:\n\r") if not txt: data_queue.put(None) break
true
true
f70821c9013bef20bbd4b8e6e4c8fe921bf44021
1,306
py
Python
ooobuild/lo/embed/hatch_window_factory.py
Amourspirit/ooo_uno_tmpl
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
[ "Apache-2.0" ]
null
null
null
ooobuild/lo/embed/hatch_window_factory.py
Amourspirit/ooo_uno_tmpl
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
[ "Apache-2.0" ]
null
null
null
ooobuild/lo/embed/hatch_window_factory.py
Amourspirit/ooo_uno_tmpl
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 # # Copyright 2022 :Barry-Thomas-Paul: Moss # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http: // www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Service Class # this is a auto generated file generated by Cheetah # Libre Office Version: 7.3 # Namespace: com.sun.star.embed from .x_hatch_window_factory import XHatchWindowFactory as XHatchWindowFactory_167d0e95 class HatchWindowFactory(XHatchWindowFactory_167d0e95): """ Service Class **since** LibreOffice 4.1 See Also: `API HatchWindowFactory <https://api.libreoffice.org/docs/idl/ref/servicecom_1_1sun_1_1star_1_1embed_1_1HatchWindowFactory.html>`_ """ __ooo_ns__: str = 'com.sun.star.embed' __ooo_full_ns__: str = 'com.sun.star.embed.HatchWindowFactory' __ooo_type_name__: str = 'service' __all__ = ['HatchWindowFactory']
30.372093
138
0.742726
from .x_hatch_window_factory import XHatchWindowFactory as XHatchWindowFactory_167d0e95 class HatchWindowFactory(XHatchWindowFactory_167d0e95): __ooo_ns__: str = 'com.sun.star.embed' __ooo_full_ns__: str = 'com.sun.star.embed.HatchWindowFactory' __ooo_type_name__: str = 'service' __all__ = ['HatchWindowFactory']
true
true
f7082209033fa16bf0b05b5db5d4f2960b3c91f8
4,412
py
Python
hotsos/core/host_helpers/config.py
KellenRenshaw/hotsos
e3fc51ab7f8af606a5846a3486a7fda23d761583
[ "Apache-2.0" ]
6
2021-10-01T19:46:14.000Z
2022-03-31T17:05:08.000Z
hotsos/core/host_helpers/config.py
KellenRenshaw/hotsos
e3fc51ab7f8af606a5846a3486a7fda23d761583
[ "Apache-2.0" ]
111
2021-10-01T18:18:17.000Z
2022-03-29T12:23:20.000Z
hotsos/core/host_helpers/config.py
KellenRenshaw/hotsos
e3fc51ab7f8af606a5846a3486a7fda23d761583
[ "Apache-2.0" ]
10
2021-09-29T14:47:54.000Z
2022-03-18T14:52:16.000Z
import os import re class ConfigBase(object): def __init__(self, path): self.path = path @classmethod def squash_int_range(cls, ilist): """Takes a list of integers and squashes consecutive values into a string range. Returned list contains mix of strings and ints. """ irange = [] rstart = None rprev = None sorted(ilist) for i, value in enumerate(ilist): if rstart is None: if i == (len(ilist) - 1): irange.append(value) break rstart = value if rprev is not None: if rprev != (value - 1): if rstart == rprev: irange.append(rstart) else: irange.append("{}-{}".format(rstart, rprev)) if i == (len(ilist) - 1): irange.append(value) rstart = value elif i == (len(ilist) - 1): irange.append("{}-{}".format(rstart, value)) break rprev = value return ','.join(irange) @classmethod def expand_value_ranges(cls, ranges): """ Takes a string containing ranges of values such as 1-3 and 4,5,6,7 and expands them into a single list. """ if not ranges: return ranges expanded = [] ranges = ranges.split(',') for subrange in ranges: # expand ranges subrange = subrange.partition('-') if subrange[1] == '-': expanded += range(int(subrange[0]), int(subrange[2]) + 1) else: for val in subrange[0].split(): expanded.append(int(val)) return sorted(expanded) @property def exists(self): if os.path.exists(self.path): return True return False def get(self, key, section=None, expand_to_list=False): raise NotImplementedError class SectionalConfigBase(ConfigBase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._sections = {} # this provides an easy sectionless lookup but is prone to collisions. # always returns the last value for key found in config file. self._flattened_config = {} self._load() @staticmethod def bool_str(val): if val.lower() == "true": return True elif val.lower() == "false": return False return val @property def all(self): return self._sections def get(self, key, section=None, expand_to_list=False): """ If section is None use flattened """ if section is None: value = self._flattened_config.get(key) else: value = self._sections.get(section, {}).get(key) if expand_to_list: return self.expand_value_ranges(value) return value @property def dump(self): with open(self.path) as fd: return fd.read() def _load(self): if not self.exists: return current_section = None with open(self.path) as fd: for line in fd: if re.compile(r"^\s*#").search(line): continue # section names are not expected to contain whitespace ret = re.compile(r"^\s*\[(\S+)].*").search(line) if ret: current_section = ret.group(1) self._sections[current_section] = {} continue if current_section is None: continue # key names may contain whitespace # values may contain whitespace expr = r"^\s*(\S+(?:\s+\S+)?)\s*=\s*(.+)\s*" ret = re.compile(expr).search(line) if ret: key = ret.group(1) val = self.bool_str(ret.group(2)) if type(val) == str: val = val.strip() for char in ["'", '"']: val = val.strip(char) self._sections[current_section][key] = val self._flattened_config[key] = val
28.836601
78
0.487307
import os import re class ConfigBase(object): def __init__(self, path): self.path = path @classmethod def squash_int_range(cls, ilist): irange = [] rstart = None rprev = None sorted(ilist) for i, value in enumerate(ilist): if rstart is None: if i == (len(ilist) - 1): irange.append(value) break rstart = value if rprev is not None: if rprev != (value - 1): if rstart == rprev: irange.append(rstart) else: irange.append("{}-{}".format(rstart, rprev)) if i == (len(ilist) - 1): irange.append(value) rstart = value elif i == (len(ilist) - 1): irange.append("{}-{}".format(rstart, value)) break rprev = value return ','.join(irange) @classmethod def expand_value_ranges(cls, ranges): if not ranges: return ranges expanded = [] ranges = ranges.split(',') for subrange in ranges: subrange = subrange.partition('-') if subrange[1] == '-': expanded += range(int(subrange[0]), int(subrange[2]) + 1) else: for val in subrange[0].split(): expanded.append(int(val)) return sorted(expanded) @property def exists(self): if os.path.exists(self.path): return True return False def get(self, key, section=None, expand_to_list=False): raise NotImplementedError class SectionalConfigBase(ConfigBase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._sections = {} self._flattened_config = {} self._load() @staticmethod def bool_str(val): if val.lower() == "true": return True elif val.lower() == "false": return False return val @property def all(self): return self._sections def get(self, key, section=None, expand_to_list=False): if section is None: value = self._flattened_config.get(key) else: value = self._sections.get(section, {}).get(key) if expand_to_list: return self.expand_value_ranges(value) return value @property def dump(self): with open(self.path) as fd: return fd.read() def _load(self): if not self.exists: return current_section = None with open(self.path) as fd: for line in fd: if re.compile(r"^\s*#").search(line): continue ret = re.compile(r"^\s*\[(\S+)].*").search(line) if ret: current_section = ret.group(1) self._sections[current_section] = {} continue if current_section is None: continue expr = r"^\s*(\S+(?:\s+\S+)?)\s*=\s*(.+)\s*" ret = re.compile(expr).search(line) if ret: key = ret.group(1) val = self.bool_str(ret.group(2)) if type(val) == str: val = val.strip() for char in ["'", '"']: val = val.strip(char) self._sections[current_section][key] = val self._flattened_config[key] = val
true
true
f708227bdf1894da2e5054f9584b6866a12bb72f
37,497
py
Python
google/cloud/monitoring_v3/services/alert_policy_service/client.py
nicain/python-monitoring
bda4852bbe6eb28b1c93b4a785eac167b57cb7d8
[ "Apache-2.0" ]
null
null
null
google/cloud/monitoring_v3/services/alert_policy_service/client.py
nicain/python-monitoring
bda4852bbe6eb28b1c93b4a785eac167b57cb7d8
[ "Apache-2.0" ]
null
null
null
google/cloud/monitoring_v3/services/alert_policy_service/client.py
nicain/python-monitoring
bda4852bbe6eb28b1c93b4a785eac167b57cb7d8
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict from distutils import util import os import re from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport import mtls # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.cloud.monitoring_v3.services.alert_policy_service import pagers from google.cloud.monitoring_v3.types import alert from google.cloud.monitoring_v3.types import alert_service from google.cloud.monitoring_v3.types import mutation_record from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import wrappers_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore from .transports.base import AlertPolicyServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import AlertPolicyServiceGrpcTransport from .transports.grpc_asyncio import AlertPolicyServiceGrpcAsyncIOTransport class AlertPolicyServiceClientMeta(type): """Metaclass for the AlertPolicyService client. This provides class-level methods for building and retrieving support objects (e.g. transport) without polluting the client instance objects. """ _transport_registry = ( OrderedDict() ) # type: Dict[str, Type[AlertPolicyServiceTransport]] _transport_registry["grpc"] = AlertPolicyServiceGrpcTransport _transport_registry["grpc_asyncio"] = AlertPolicyServiceGrpcAsyncIOTransport def get_transport_class( cls, label: str = None, ) -> Type[AlertPolicyServiceTransport]: """Returns an appropriate transport class. Args: label: The name of the desired transport. If none is provided, then the first transport in the registry is used. Returns: The transport class to use. """ # If a specific transport is requested, return that one. if label: return cls._transport_registry[label] # No transport is requested; return the default (that is, the first one # in the dictionary). return next(iter(cls._transport_registry.values())) class AlertPolicyServiceClient(metaclass=AlertPolicyServiceClientMeta): """The AlertPolicyService API is used to manage (list, create, delete, edit) alert policies in Stackdriver Monitoring. An alerting policy is a description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. In addition to using this API, alert policies can also be managed through `Stackdriver Monitoring <https://cloud.google.com/monitoring/docs/>`__, which can be reached by clicking the "Monitoring" tab in `Cloud Console <https://console.cloud.google.com/>`__. """ @staticmethod def _get_default_mtls_endpoint(api_endpoint): """Converts api endpoint to mTLS endpoint. Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. Args: api_endpoint (Optional[str]): the api endpoint to convert. Returns: str: converted mTLS api endpoint. """ if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "monitoring.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: AlertPolicyServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: AlertPolicyServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> AlertPolicyServiceTransport: """Returns the transport used by the client instance. Returns: AlertPolicyServiceTransport: The transport used by the client instance. """ return self._transport @staticmethod def alert_policy_path(project: str, alert_policy: str,) -> str: """Returns a fully-qualified alert_policy string.""" return "projects/{project}/alertPolicies/{alert_policy}".format( project=project, alert_policy=alert_policy, ) @staticmethod def parse_alert_policy_path(path: str) -> Dict[str, str]: """Parses a alert_policy path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/alertPolicies/(?P<alert_policy>.+?)$", path ) return m.groupdict() if m else {} @staticmethod def alert_policy_condition_path( project: str, alert_policy: str, condition: str, ) -> str: """Returns a fully-qualified alert_policy_condition string.""" return "projects/{project}/alertPolicies/{alert_policy}/conditions/{condition}".format( project=project, alert_policy=alert_policy, condition=condition, ) @staticmethod def parse_alert_policy_condition_path(path: str) -> Dict[str, str]: """Parses a alert_policy_condition path into its component segments.""" m = re.match( r"^projects/(?P<project>.+?)/alertPolicies/(?P<alert_policy>.+?)/conditions/(?P<condition>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Returns a fully-qualified billing_account string.""" return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: """Parse a billing_account path into its component segments.""" m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: """Returns a fully-qualified folder string.""" return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: """Parse a folder path into its component segments.""" m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: """Returns a fully-qualified organization string.""" return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: """Parse a organization path into its component segments.""" m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: """Returns a fully-qualified project string.""" return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: """Parse a project path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: """Returns a fully-qualified location string.""" return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: """Parse a location path into its component segments.""" m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, AlertPolicyServiceTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the alert policy service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, AlertPolicyServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() # Create SSL credentials for mutual TLS if needed. use_client_cert = bool( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: is_mtls = True client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() if is_mtls: client_cert_source_func = mtls.default_client_cert_source() else: client_cert_source_func = None # Figure out which api endpoint to use. if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": if is_mtls: api_endpoint = self.DEFAULT_MTLS_ENDPOINT else: api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " "values: never, auto, always" ) # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. if isinstance(transport, AlertPolicyServiceTransport): # transport is a AlertPolicyServiceTransport instance. if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, always_use_jwt_access=( Transport == type(self).get_transport_class("grpc") or Transport == type(self).get_transport_class("grpc_asyncio") ), ) def list_alert_policies( self, request: alert_service.ListAlertPoliciesRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAlertPoliciesPager: r"""Lists the existing alerting policies for the workspace. Args: request (google.cloud.monitoring_v3.types.ListAlertPoliciesRequest): The request object. The protocol for the `ListAlertPolicies` request. name (str): Required. The `project <https://cloud.google.com/monitoring/api/v3#project_name>`__ whose alert policies are to be listed. The format is: :: projects/[PROJECT_ID_OR_NUMBER] Note that this field names the parent container in which the alerting policies to be listed are stored. To retrieve a single alerting policy by name, use the [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] operation, instead. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.monitoring_v3.services.alert_policy_service.pagers.ListAlertPoliciesPager: The protocol for the ListAlertPolicies response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a alert_service.ListAlertPoliciesRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, alert_service.ListAlertPoliciesRequest): request = alert_service.ListAlertPoliciesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_alert_policies] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__iter__` convenience method. response = pagers.ListAlertPoliciesPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response def get_alert_policy( self, request: alert_service.GetAlertPolicyRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> alert.AlertPolicy: r"""Gets a single alerting policy. Args: request (google.cloud.monitoring_v3.types.GetAlertPolicyRequest): The request object. The protocol for the `GetAlertPolicy` request. name (str): Required. The alerting policy to retrieve. The format is: :: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.monitoring_v3.types.AlertPolicy: A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see [Introduction to Alerting](\ https://cloud.google.com/monitoring/alerts/). """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a alert_service.GetAlertPolicyRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, alert_service.GetAlertPolicyRequest): request = alert_service.GetAlertPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_alert_policy] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def create_alert_policy( self, request: alert_service.CreateAlertPolicyRequest = None, *, name: str = None, alert_policy: alert.AlertPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> alert.AlertPolicy: r"""Creates a new alerting policy. Args: request (google.cloud.monitoring_v3.types.CreateAlertPolicyRequest): The request object. The protocol for the `CreateAlertPolicy` request. name (str): Required. The `project <https://cloud.google.com/monitoring/api/v3#project_name>`__ in which to create the alerting policy. The format is: :: projects/[PROJECT_ID_OR_NUMBER] Note that this field names the parent container in which the alerting policy will be written, not the name of the created policy. \|name\| must be a host project of a workspace, otherwise INVALID_ARGUMENT error will return. The alerting policy that is returned will have a name that contains a normalized representation of this name as a prefix but adds a suffix of the form ``/alertPolicies/[ALERT_POLICY_ID]``, identifying the policy in the container. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. alert_policy (google.cloud.monitoring_v3.types.AlertPolicy): Required. The requested alerting policy. You should omit the ``name`` field in this policy. The name will be returned in the new policy, including a new ``[ALERT_POLICY_ID]`` value. This corresponds to the ``alert_policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.monitoring_v3.types.AlertPolicy: A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see [Introduction to Alerting](\ https://cloud.google.com/monitoring/alerts/). """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name, alert_policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a alert_service.CreateAlertPolicyRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, alert_service.CreateAlertPolicyRequest): request = alert_service.CreateAlertPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name if alert_policy is not None: request.alert_policy = alert_policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_alert_policy] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response def delete_alert_policy( self, request: alert_service.DeleteAlertPolicyRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes an alerting policy. Args: request (google.cloud.monitoring_v3.types.DeleteAlertPolicyRequest): The request object. The protocol for the `DeleteAlertPolicy` request. name (str): Required. The alerting policy to delete. The format is: :: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy]. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a alert_service.DeleteAlertPolicyRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, alert_service.DeleteAlertPolicyRequest): request = alert_service.DeleteAlertPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_alert_policy] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def update_alert_policy( self, request: alert_service.UpdateAlertPolicyRequest = None, *, update_mask: field_mask_pb2.FieldMask = None, alert_policy: alert.AlertPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> alert.AlertPolicy: r"""Updates an alerting policy. You can either replace the entire policy with a new one or replace only certain fields in the current alerting policy by specifying the fields to be updated via ``updateMask``. Returns the updated alerting policy. Args: request (google.cloud.monitoring_v3.types.UpdateAlertPolicyRequest): The request object. The protocol for the `UpdateAlertPolicy` request. update_mask (google.protobuf.field_mask_pb2.FieldMask): Optional. A list of alerting policy field names. If this field is not empty, each listed field in the existing alerting policy is set to the value of the corresponding field in the supplied policy (``alert_policy``), or to the field's default value if the field is not in the supplied alerting policy. Fields not listed retain their previous value. Examples of valid field masks include ``display_name``, ``documentation``, ``documentation.content``, ``documentation.mime_type``, ``user_labels``, ``user_label.nameofkey``, ``enabled``, ``conditions``, ``combiner``, etc. If this field is empty, then the supplied alerting policy replaces the existing policy. It is the same as deleting the existing policy and adding the supplied policy, except for the following: - The new policy will have the same ``[ALERT_POLICY_ID]`` as the former policy. This gives you continuity with the former policy in your notifications and incidents. - Conditions in the new policy will keep their former ``[CONDITION_ID]`` if the supplied condition includes the ``name`` field with that ``[CONDITION_ID]``. If the supplied condition omits the ``name`` field, then a new ``[CONDITION_ID]`` is created. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. alert_policy (google.cloud.monitoring_v3.types.AlertPolicy): Required. The updated alerting policy or the updated values for the fields listed in ``update_mask``. If ``update_mask`` is not empty, any fields in this policy that are not in ``update_mask`` are ignored. This corresponds to the ``alert_policy`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.monitoring_v3.types.AlertPolicy: A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see [Introduction to Alerting](\ https://cloud.google.com/monitoring/alerts/). """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([update_mask, alert_policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) # Minor optimization to avoid making a copy if the user passes # in a alert_service.UpdateAlertPolicyRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, alert_service.UpdateAlertPolicyRequest): request = alert_service.UpdateAlertPolicyRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if update_mask is not None: request.update_mask = update_mask if alert_policy is not None: request.alert_policy = alert_policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_alert_policy] # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("alert_policy.name", request.alert_policy.name),) ), ) # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-monitoring", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("AlertPolicyServiceClient",)
43.85614
109
0.628317
from collections import OrderedDict from distutils import util import os import re from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials from google.auth.transport import mtls from google.auth.transport.grpc import SslCredentials from google.auth.exceptions import MutualTLSChannelError from google.oauth2 import service_account from google.cloud.monitoring_v3.services.alert_policy_service import pagers from google.cloud.monitoring_v3.types import alert from google.cloud.monitoring_v3.types import alert_service from google.cloud.monitoring_v3.types import mutation_record from google.protobuf import field_mask_pb2 from google.protobuf import wrappers_pb2 from google.rpc import status_pb2 from .transports.base import AlertPolicyServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc import AlertPolicyServiceGrpcTransport from .transports.grpc_asyncio import AlertPolicyServiceGrpcAsyncIOTransport class AlertPolicyServiceClientMeta(type): _transport_registry = ( OrderedDict() ) _transport_registry["grpc"] = AlertPolicyServiceGrpcTransport _transport_registry["grpc_asyncio"] = AlertPolicyServiceGrpcAsyncIOTransport def get_transport_class( cls, label: str = None, ) -> Type[AlertPolicyServiceTransport]: if label: return cls._transport_registry[label] return next(iter(cls._transport_registry.values())) class AlertPolicyServiceClient(metaclass=AlertPolicyServiceClientMeta): @staticmethod def _get_default_mtls_endpoint(api_endpoint): if not api_endpoint: return api_endpoint mtls_endpoint_re = re.compile( r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?" ) m = mtls_endpoint_re.match(api_endpoint) name, mtls, sandbox, googledomain = m.groups() if mtls or not googledomain: return api_endpoint if sandbox: return api_endpoint.replace( "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" ) return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") DEFAULT_ENDPOINT = "monitoring.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( DEFAULT_ENDPOINT ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): credentials = service_account.Credentials.from_service_account_info(info) kwargs["credentials"] = credentials return cls(*args, **kwargs) @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file @property def transport(self) -> AlertPolicyServiceTransport: return self._transport @staticmethod def alert_policy_path(project: str, alert_policy: str,) -> str: return "projects/{project}/alertPolicies/{alert_policy}".format( project=project, alert_policy=alert_policy, ) @staticmethod def parse_alert_policy_path(path: str) -> Dict[str, str]: m = re.match( r"^projects/(?P<project>.+?)/alertPolicies/(?P<alert_policy>.+?)$", path ) return m.groupdict() if m else {} @staticmethod def alert_policy_condition_path( project: str, alert_policy: str, condition: str, ) -> str: return "projects/{project}/alertPolicies/{alert_policy}/conditions/{condition}".format( project=project, alert_policy=alert_policy, condition=condition, ) @staticmethod def parse_alert_policy_condition_path(path: str) -> Dict[str, str]: m = re.match( r"^projects/(?P<project>.+?)/alertPolicies/(?P<alert_policy>.+?)/conditions/(?P<condition>.+?)$", path, ) return m.groupdict() if m else {} @staticmethod def common_billing_account_path(billing_account: str,) -> str: return "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @staticmethod def parse_common_billing_account_path(path: str) -> Dict[str, str]: m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_folder_path(folder: str,) -> str: return "folders/{folder}".format(folder=folder,) @staticmethod def parse_common_folder_path(path: str) -> Dict[str, str]: m = re.match(r"^folders/(?P<folder>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_organization_path(organization: str,) -> str: return "organizations/{organization}".format(organization=organization,) @staticmethod def parse_common_organization_path(path: str) -> Dict[str, str]: m = re.match(r"^organizations/(?P<organization>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_project_path(project: str,) -> str: return "projects/{project}".format(project=project,) @staticmethod def parse_common_project_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P<project>.+?)$", path) return m.groupdict() if m else {} @staticmethod def common_location_path(project: str, location: str,) -> str: return "projects/{project}/locations/{location}".format( project=project, location=location, ) @staticmethod def parse_common_location_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path) return m.groupdict() if m else {} def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, AlertPolicyServiceTransport, None] = None, client_options: Optional[client_options_lib.ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: if isinstance(client_options, dict): client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = bool( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: is_mtls = True client_cert_source_func = client_options.client_cert_source else: is_mtls = mtls.has_default_client_cert_source() if is_mtls: client_cert_source_func = mtls.default_client_cert_source() else: client_cert_source_func = None if client_options.api_endpoint is not None: api_endpoint = client_options.api_endpoint else: use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": if is_mtls: api_endpoint = self.DEFAULT_MTLS_ENDPOINT else: api_endpoint = self.DEFAULT_ENDPOINT else: raise MutualTLSChannelError( "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " "values: never, auto, always" ) if isinstance(transport, AlertPolicyServiceTransport): if credentials or client_options.credentials_file: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) if client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) self._transport = transport else: Transport = type(self).get_transport_class(transport) self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, always_use_jwt_access=( Transport == type(self).get_transport_class("grpc") or Transport == type(self).get_transport_class("grpc_asyncio") ), ) def list_alert_policies( self, request: alert_service.ListAlertPoliciesRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAlertPoliciesPager: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) if not isinstance(request, alert_service.ListAlertPoliciesRequest): request = alert_service.ListAlertPoliciesRequest(request) if name is not None: request.name = name rpc = self._transport._wrapped_methods[self._transport.list_alert_policies] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) response = pagers.ListAlertPoliciesPager( method=rpc, request=request, response=response, metadata=metadata, ) return response def get_alert_policy( self, request: alert_service.GetAlertPolicyRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> alert.AlertPolicy: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) if not isinstance(request, alert_service.GetAlertPolicyRequest): request = alert_service.GetAlertPolicyRequest(request) if name is not None: request.name = name rpc = self._transport._wrapped_methods[self._transport.get_alert_policy] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response def create_alert_policy( self, request: alert_service.CreateAlertPolicyRequest = None, *, name: str = None, alert_policy: alert.AlertPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> alert.AlertPolicy: has_flattened_params = any([name, alert_policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) if not isinstance(request, alert_service.CreateAlertPolicyRequest): request = alert_service.CreateAlertPolicyRequest(request) if name is not None: request.name = name if alert_policy is not None: request.alert_policy = alert_policy rpc = self._transport._wrapped_methods[self._transport.create_alert_policy] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response def delete_alert_policy( self, request: alert_service.DeleteAlertPolicyRequest = None, *, name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) if not isinstance(request, alert_service.DeleteAlertPolicyRequest): request = alert_service.DeleteAlertPolicyRequest(request) if name is not None: request.name = name rpc = self._transport._wrapped_methods[self._transport.delete_alert_policy] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) rpc( request, retry=retry, timeout=timeout, metadata=metadata, ) def update_alert_policy( self, request: alert_service.UpdateAlertPolicyRequest = None, *, update_mask: field_mask_pb2.FieldMask = None, alert_policy: alert.AlertPolicy = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> alert.AlertPolicy: has_flattened_params = any([update_mask, alert_policy]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) if not isinstance(request, alert_service.UpdateAlertPolicyRequest): request = alert_service.UpdateAlertPolicyRequest(request) if update_mask is not None: request.update_mask = update_mask if alert_policy is not None: request.alert_policy = alert_policy rpc = self._transport._wrapped_methods[self._transport.update_alert_policy] metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata( (("alert_policy.name", request.alert_policy.name),) ), ) response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) return response try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution( "google-cloud-monitoring", ).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("AlertPolicyServiceClient",)
true
true
f708235276521e1055a17f1e583e94359571464b
407
py
Python
malib/algorithm/ddpg/__init__.py
zbzhu99/malib
5be07ac00761a34fb095adb2b3018a798ceea256
[ "MIT" ]
null
null
null
malib/algorithm/ddpg/__init__.py
zbzhu99/malib
5be07ac00761a34fb095adb2b3018a798ceea256
[ "MIT" ]
null
null
null
malib/algorithm/ddpg/__init__.py
zbzhu99/malib
5be07ac00761a34fb095adb2b3018a798ceea256
[ "MIT" ]
null
null
null
from .policy import DDPG from .trainer import DDPGTrainer from .loss import DDPGLoss NAME = "DDPG" LOSS = DDPGLoss TRAINER = DDPGTrainer POLICY = DDPG CONFIG = { "training": { "update_interval": 1, "batch_size": 1024, "tau": 0.01, "optimizer": "Adam", "actor_lr": 1e-2, "critic_lr": 1e-2, "grad_norm_clipping": 0.5, }, "policy": {}, }
16.958333
34
0.565111
from .policy import DDPG from .trainer import DDPGTrainer from .loss import DDPGLoss NAME = "DDPG" LOSS = DDPGLoss TRAINER = DDPGTrainer POLICY = DDPG CONFIG = { "training": { "update_interval": 1, "batch_size": 1024, "tau": 0.01, "optimizer": "Adam", "actor_lr": 1e-2, "critic_lr": 1e-2, "grad_norm_clipping": 0.5, }, "policy": {}, }
true
true
f708236a902da8255569087afbd1a500653aa211
7,208
py
Python
modules/android.py
inzanez/pdfium-lib
f4e6fbb3b29c100ff3f291944944fd7e38fafbcd
[ "MIT" ]
69
2021-01-27T18:53:22.000Z
2022-02-25T00:41:41.000Z
modules/android.py
inzanez/pdfium-lib
f4e6fbb3b29c100ff3f291944944fd7e38fafbcd
[ "MIT" ]
31
2021-01-23T17:14:46.000Z
2022-03-04T18:06:23.000Z
modules/android.py
inzanez/pdfium-lib
f4e6fbb3b29c100ff3f291944944fd7e38fafbcd
[ "MIT" ]
19
2021-01-27T18:57:07.000Z
2022-01-04T02:56:03.000Z
import glob import os import tarfile from subprocess import check_call import modules.config as c import modules.functions as f def run_task_build_pdfium(): f.debug("Building PDFium...") target = "android" build_dir = os.path.join("build", target) f.create_dir(build_dir) target_dir = os.path.join(build_dir, "pdfium") f.remove_dir(target_dir) cwd = build_dir command = " ".join( [ "gclient", "config", "--unmanaged", "https://pdfium.googlesource.com/pdfium.git", ] ) check_call(command, cwd=cwd, shell=True) gclient_file = os.path.join(build_dir, ".gclient") f.append_to_file(gclient_file, "target_os = [ 'android' ]") cwd = build_dir command = " ".join(["gclient", "sync"]) check_call(command, cwd=cwd, shell=True) cwd = target_dir command = " ".join(["git", "checkout", c.pdfium_git_commit]) check_call(command, cwd=cwd, shell=True) def run_task_patch(): f.debug("Patching...") source_dir = os.path.join("build", "android", "pdfium") # build gn source_file = os.path.join( source_dir, "BUILD.gn", ) if f.file_line_has_content(source_file, 25, " ]\n"): f.replace_line_in_file(source_file, 25, ' "FPDFSDK_EXPORTS",\n ]\n') f.debug("Applied: Build GN") else: f.debug("Skipped: Build GN") # build gn flags source_file = os.path.join( source_dir, "BUILD.gn", ) if f.file_line_has_content(source_file, 19, " cflags = []\n"): f.replace_line_in_file( source_file, 19, ' cflags = [ "-fvisibility=default" ]\n' ) f.debug("Applied: Build GN Flags") else: f.debug("Skipped: Build GN Flags") pass def run_task_build(): f.debug("Building libraries...") current_dir = os.getcwd() # configs for config in c.configurations_android: # targets for target in c.targets_android: main_dir = os.path.join( "build", target["target_os"], "pdfium", "out", "{0}-{1}-{2}".format(target["target_os"], target["target_cpu"], config), ) f.remove_dir(main_dir) f.create_dir(main_dir) os.chdir( os.path.join( "build", target["target_os"], "pdfium", ) ) # generating files... f.debug( 'Generating files to arch "{0}" and configuration "{1}"...'.format( target["target_cpu"], config ) ) arg_is_debug = "true" if config == "debug" else "false" args = [] args.append('target_os="{0}"'.format(target["pdfium_os"])) args.append('target_cpu="{0}"'.format(target["target_cpu"])) args.append("use_goma=false") args.append("is_debug={0}".format(arg_is_debug)) args.append("pdf_use_skia=false") args.append("pdf_use_skia_paths=false") args.append("pdf_enable_xfa=false") args.append("pdf_enable_v8=false") args.append("is_component_build=true") args.append("pdf_is_standalone=true") args.append("pdf_bundle_freetype=true") if config == "release": args.append("symbol_level=0") args_str = " ".join(args) command = " ".join( [ "gn", "gen", "out/{0}-{1}-{2}".format( target["target_os"], target["target_cpu"], config ), "--args='{0}'".format(args_str), ] ) check_call(command, shell=True) # compiling... f.debug( 'Compiling to arch "{0}" and configuration "{1}"...'.format( target["target_cpu"], config ) ) command = " ".join( [ "ninja", "-C", "out/{0}-{1}-{2}".format( target["target_os"], target["target_cpu"], config ), "pdfium", "-v", ] ) check_call(command, shell=True) os.chdir(current_dir) def run_task_install(): f.debug("Installing libraries...") # configs for config in c.configurations_android: f.remove_dir(os.path.join("build", "android", config)) f.create_dir(os.path.join("build", "android", config)) # targets for target in c.targets_android: out_dir = "{0}-{1}-{2}".format( target["target_os"], target["target_cpu"], config ) source_lib_dir = os.path.join("build", "android", "pdfium", "out", out_dir) lib_dir = os.path.join("build", "android", config, "lib") target_dir = os.path.join(lib_dir, target["android_cpu"]) f.remove_dir(target_dir) f.create_dir(target_dir) for basename in os.listdir(source_lib_dir): if basename.endswith(".so"): pathname = os.path.join(source_lib_dir, basename) if os.path.isfile(pathname): f.copy_file2(pathname, target_dir) # include include_dir = os.path.join("build", "android", "pdfium", "public") target_include_dir = os.path.join("build", "android", config, "include") f.remove_dir(target_include_dir) f.create_dir(target_include_dir) for basename in os.listdir(include_dir): if basename.endswith(".h"): pathname = os.path.join(include_dir, basename) if os.path.isfile(pathname): f.copy_file2(pathname, target_include_dir) def run_task_test(): f.debug("Testing...") for config in c.configurations_android: for target in c.targets_android: lib_dir = os.path.join( "build", "android", config, "lib", target["android_cpu"] ) command = " ".join(["file", os.path.join(lib_dir, "libpdfium.so")]) check_call(command, shell=True) def run_task_archive(): f.debug("Archiving...") current_dir = os.getcwd() lib_dir = os.path.join(current_dir, "build", "android") output_filename = os.path.join(current_dir, "android.tgz") tar = tarfile.open(output_filename, "w:gz") for configuration in c.configurations_android: tar.add( name=os.path.join(lib_dir, configuration), arcname=os.path.basename(os.path.join(lib_dir, configuration)), filter=lambda x: ( None if "_" in x.name and not x.name.endswith(".h") and not x.name.endswith(".so") and os.path.isfile(x.name) else x ), ) tar.close()
29.182186
88
0.517064
import glob import os import tarfile from subprocess import check_call import modules.config as c import modules.functions as f def run_task_build_pdfium(): f.debug("Building PDFium...") target = "android" build_dir = os.path.join("build", target) f.create_dir(build_dir) target_dir = os.path.join(build_dir, "pdfium") f.remove_dir(target_dir) cwd = build_dir command = " ".join( [ "gclient", "config", "--unmanaged", "https://pdfium.googlesource.com/pdfium.git", ] ) check_call(command, cwd=cwd, shell=True) gclient_file = os.path.join(build_dir, ".gclient") f.append_to_file(gclient_file, "target_os = [ 'android' ]") cwd = build_dir command = " ".join(["gclient", "sync"]) check_call(command, cwd=cwd, shell=True) cwd = target_dir command = " ".join(["git", "checkout", c.pdfium_git_commit]) check_call(command, cwd=cwd, shell=True) def run_task_patch(): f.debug("Patching...") source_dir = os.path.join("build", "android", "pdfium") source_file = os.path.join( source_dir, "BUILD.gn", ) if f.file_line_has_content(source_file, 25, " ]\n"): f.replace_line_in_file(source_file, 25, ' "FPDFSDK_EXPORTS",\n ]\n') f.debug("Applied: Build GN") else: f.debug("Skipped: Build GN") source_file = os.path.join( source_dir, "BUILD.gn", ) if f.file_line_has_content(source_file, 19, " cflags = []\n"): f.replace_line_in_file( source_file, 19, ' cflags = [ "-fvisibility=default" ]\n' ) f.debug("Applied: Build GN Flags") else: f.debug("Skipped: Build GN Flags") pass def run_task_build(): f.debug("Building libraries...") current_dir = os.getcwd() for config in c.configurations_android: for target in c.targets_android: main_dir = os.path.join( "build", target["target_os"], "pdfium", "out", "{0}-{1}-{2}".format(target["target_os"], target["target_cpu"], config), ) f.remove_dir(main_dir) f.create_dir(main_dir) os.chdir( os.path.join( "build", target["target_os"], "pdfium", ) ) f.debug( 'Generating files to arch "{0}" and configuration "{1}"...'.format( target["target_cpu"], config ) ) arg_is_debug = "true" if config == "debug" else "false" args = [] args.append('target_os="{0}"'.format(target["pdfium_os"])) args.append('target_cpu="{0}"'.format(target["target_cpu"])) args.append("use_goma=false") args.append("is_debug={0}".format(arg_is_debug)) args.append("pdf_use_skia=false") args.append("pdf_use_skia_paths=false") args.append("pdf_enable_xfa=false") args.append("pdf_enable_v8=false") args.append("is_component_build=true") args.append("pdf_is_standalone=true") args.append("pdf_bundle_freetype=true") if config == "release": args.append("symbol_level=0") args_str = " ".join(args) command = " ".join( [ "gn", "gen", "out/{0}-{1}-{2}".format( target["target_os"], target["target_cpu"], config ), "--args='{0}'".format(args_str), ] ) check_call(command, shell=True) f.debug( 'Compiling to arch "{0}" and configuration "{1}"...'.format( target["target_cpu"], config ) ) command = " ".join( [ "ninja", "-C", "out/{0}-{1}-{2}".format( target["target_os"], target["target_cpu"], config ), "pdfium", "-v", ] ) check_call(command, shell=True) os.chdir(current_dir) def run_task_install(): f.debug("Installing libraries...") for config in c.configurations_android: f.remove_dir(os.path.join("build", "android", config)) f.create_dir(os.path.join("build", "android", config)) for target in c.targets_android: out_dir = "{0}-{1}-{2}".format( target["target_os"], target["target_cpu"], config ) source_lib_dir = os.path.join("build", "android", "pdfium", "out", out_dir) lib_dir = os.path.join("build", "android", config, "lib") target_dir = os.path.join(lib_dir, target["android_cpu"]) f.remove_dir(target_dir) f.create_dir(target_dir) for basename in os.listdir(source_lib_dir): if basename.endswith(".so"): pathname = os.path.join(source_lib_dir, basename) if os.path.isfile(pathname): f.copy_file2(pathname, target_dir) include_dir = os.path.join("build", "android", "pdfium", "public") target_include_dir = os.path.join("build", "android", config, "include") f.remove_dir(target_include_dir) f.create_dir(target_include_dir) for basename in os.listdir(include_dir): if basename.endswith(".h"): pathname = os.path.join(include_dir, basename) if os.path.isfile(pathname): f.copy_file2(pathname, target_include_dir) def run_task_test(): f.debug("Testing...") for config in c.configurations_android: for target in c.targets_android: lib_dir = os.path.join( "build", "android", config, "lib", target["android_cpu"] ) command = " ".join(["file", os.path.join(lib_dir, "libpdfium.so")]) check_call(command, shell=True) def run_task_archive(): f.debug("Archiving...") current_dir = os.getcwd() lib_dir = os.path.join(current_dir, "build", "android") output_filename = os.path.join(current_dir, "android.tgz") tar = tarfile.open(output_filename, "w:gz") for configuration in c.configurations_android: tar.add( name=os.path.join(lib_dir, configuration), arcname=os.path.basename(os.path.join(lib_dir, configuration)), filter=lambda x: ( None if "_" in x.name and not x.name.endswith(".h") and not x.name.endswith(".so") and os.path.isfile(x.name) else x ), ) tar.close()
true
true
f708236d0f78899c556252304ef35f5ad77e4c31
16,269
py
Python
rlkit/torch/networks.py
NagisaZj/ProMP
539739ae2b7d5fdcad00855da695f643b23df4b3
[ "MIT" ]
null
null
null
rlkit/torch/networks.py
NagisaZj/ProMP
539739ae2b7d5fdcad00855da695f643b23df4b3
[ "MIT" ]
null
null
null
rlkit/torch/networks.py
NagisaZj/ProMP
539739ae2b7d5fdcad00855da695f643b23df4b3
[ "MIT" ]
null
null
null
""" General networks for pytorch. Algorithm-specific networks should go else-where. """ import torch from torch import nn as nn from torch.nn import functional as F from rlkit.policies.base import Policy from rlkit.torch import pytorch_util as ptu from rlkit.torch.core import PyTorchModule from rlkit.torch.data_management.normalizer import TorchFixedNormalizer from rlkit.torch.modules import LayerNorm import math def identity(x): return x class Mlp(PyTorchModule): def __init__( self, hidden_sizes, output_size, input_size, init_w=3e-3, hidden_activation=F.relu, output_activation=identity, hidden_init=ptu.fanin_init, b_init_value=0.1, layer_norm=False, layer_norm_kwargs=None, ): self.save_init_params(locals()) super().__init__() if layer_norm_kwargs is None: layer_norm_kwargs = dict() self.input_size = input_size self.output_size = output_size self.hidden_sizes = hidden_sizes self.hidden_activation = hidden_activation self.output_activation = output_activation self.layer_norm = layer_norm self.fcs = [] self.layer_norms = [] in_size = input_size for i, next_size in enumerate(hidden_sizes): fc = nn.Linear(in_size, next_size) in_size = next_size hidden_init(fc.weight) fc.bias.data.fill_(b_init_value) self.__setattr__("fc{}".format(i), fc) self.fcs.append(fc) if self.layer_norm: ln = LayerNorm(next_size) self.__setattr__("layer_norm{}".format(i), ln) self.layer_norms.append(ln) self.last_fc = nn.Linear(in_size, output_size) self.last_fc.weight.data.uniform_(-init_w, init_w) self.last_fc.bias.data.uniform_(-init_w, init_w) def forward(self, input, return_preactivations=False): h = input for i, fc in enumerate(self.fcs): h = fc(h) if self.layer_norm and i < len(self.fcs) - 1: h = self.layer_norms[i](h) h = self.hidden_activation(h) preactivation = self.last_fc(h) output = self.output_activation(preactivation) if return_preactivations: return output, preactivation else: return output class FlattenMlp(Mlp): """ if there are multiple inputs, concatenate along dim 1 """ def forward(self, *inputs, **kwargs): flat_inputs = torch.cat(inputs, dim=1) return super().forward(flat_inputs, **kwargs) class MlpPolicy(Mlp, Policy): """ A simpler interface for creating policies. """ def __init__( self, *args, obs_normalizer: TorchFixedNormalizer = None, **kwargs ): self.save_init_params(locals()) super().__init__(*args, **kwargs) self.obs_normalizer = obs_normalizer def forward(self, obs, **kwargs): if self.obs_normalizer: obs = self.obs_normalizer.normalize(obs) return super().forward(obs, **kwargs) def get_action(self, obs_np): actions = self.get_actions(obs_np[None]) return actions[0, :], {} def get_actions(self, obs): return self.eval_np(obs) class TanhMlpPolicy(MlpPolicy): """ A helper class since most policies have a tanh output activation. """ def __init__(self, *args, **kwargs): self.save_init_params(locals()) super().__init__(*args, output_activation=torch.tanh, **kwargs) class MlpEncoder(FlattenMlp): ''' encode context via MLP ''' def reset(self, num_tasks=1): pass def forward_seq(self,context): t,b,_ = context.size() input = context.view(t*b,-1) out = self.forward(input) return out.view(t,b,-1) class RecurrentEncoder(FlattenMlp): ''' encode context via recurrent network ''' def __init__(self, *args, **kwargs ): self.save_init_params(locals()) super().__init__(*args, **kwargs) self.hidden_dim = self.hidden_sizes[-1] self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim)) # input should be (task, seq, feat) and hidden should be (task, 1, feat) self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True) def forward(self, in_, return_preactivations=False): # expects inputs of dimension (task, seq, feat) task, seq, feat = in_.size() out = in_.view(task * seq, feat) # embed with MLP for i, fc in enumerate(self.fcs): out = fc(out) out = self.hidden_activation(out) out = out.view(task, seq, -1) out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device))) self.hidden = hn # take the last hidden state to predict z out = out[:, -1, :] # output layer preactivation = self.last_fc(out) output = self.output_activation(preactivation) if return_preactivations: return output, preactivation else: return output def reset(self, num_tasks=1): self.hidden = self.hidden.new_full((1, num_tasks, self.hidden_dim), 0) class RNN(FlattenMlp): ''' encode context via recurrent network ''' def __init__(self, *args, **kwargs ): self.save_init_params(locals()) super().__init__(*args, **kwargs) self.hidden_dim = self.hidden_sizes[-1] self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim)) # input should be (task, seq, feat) and hidden should be (task, 1, feat) self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True) def inner_forward(self, in_, return_preactivations=False): # expects inputs of dimension (task, seq, feat) task, seq, feat = in_.size() out = in_.view(task * seq, feat) # embed with MLP for i, fc in enumerate(self.fcs): out = fc(out) out = self.hidden_activation(out) out = out.view(task, seq, -1) out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device))) self.hidden = hn # take the last hidden state to predict z out = out.contiguous() out = out.view(task * seq, -1) # output layer #preactivation = self.last_fc(out) #output = self.output_activation(preactivation) if return_preactivations: return out, out else: return out def forward(self, in_, return_preactivations=False): # expects inputs of dimension (task, seq, feat) task, seq, feat = in_.size() out = in_.view(task * seq, feat) # embed with MLP for i, fc in enumerate(self.fcs): out = fc(out) out = self.hidden_activation(out) out = out.view(task, seq, -1) out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device))) self.hidden = hn # take the last hidden state to predict z out = out.contiguous() out = out.view(task * seq, -1) # output layer preactivation = self.last_fc(out) output = self.output_activation(preactivation) if return_preactivations: return output, output else: return output def inner_reset(self, num_tasks=1): self.hidden = self.hidden.new_full((1, num_tasks, self.hidden_dim), 0) class SnailEncoder(FlattenMlp): def __init__(self, input_length, *args, **kwargs ): self.save_init_params(locals()) super().__init__(*args, **kwargs) self.hidden_dim = self.hidden_sizes[-1] self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim)) self.input_length = input_length # input should be (task, seq, feat) and hidden should be (1, task, feat) #self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True) layer_count = math.ceil(math.log(input_length)/math.log(2)) self.TC1 = TCBlock(self.hidden_dim,input_length,16) self.atten1 = AttentionBlock(self.hidden_dim+16*layer_count,32,32) self.TC2 = TCBlock(self.hidden_dim+16*layer_count+32,input_length,16) self.atten2 = AttentionBlock(self.hidden_dim+16*layer_count*2+32,32,32) self.out_layer = nn.Linear(self.hidden_dim+16*layer_count*2+32+32,self.output_size) self.var_start = int(self.output_size / 2) def forward(self, in_, return_preactivations=False): # expects inputs of dimension (task, seq, feat) task, seq, feat = in_.size() out = in_.view(task * seq, feat) # embed with MLP for i, fc in enumerate(self.fcs): out = fc(out) out = self.hidden_activation(out) out = out.view(task, seq, -1) out = out.permute(0,2,1) #print(out.shape) out = self.TC1(out) out = self.atten1(out) out = self.TC2(out) out = self.atten2(out) out = out[:, :, -1] #print('o',out.shape) # output layer preactivation = self.out_layer(out) output = self.output_activation(preactivation) #temp = F.softplus(output[..., self.var_start:]) #output[..., self.var_start:] = temp if return_preactivations: return output, preactivation else: return output def forward_seq(self, in_, return_preactivations=False): # expects inputs of dimension (task, seq, feat) task, seq, feat = in_.size() in_ = in_.contiguous() out = in_.view(task * seq, feat) # embed with MLP for i, fc in enumerate(self.fcs): out = fc(out) out = self.hidden_activation(out) out = out.view(task, seq, -1) out = out.permute(0,2,1) #print(out.shape) out = self.TC1(out) out = self.atten1(out) out = self.TC2(out) out = self.atten2(out) out = out.permute(0,2,1) out = out.view(task * seq,-1) preactivation = self.out_layer(out) output = self.output_activation(preactivation) #temp = F.softplus(output[..., self.var_start:]) #output[..., self.var_start:] = temp #output = output.view(task,seq,-1) if return_preactivations: return output, preactivation else: return output def reset(self,num_tasks=1): return class MyMlpEncoder(FlattenMlp): ''' encode context via MLP ''' def reset(self, num_tasks=1): pass def forward_seq(self,context): t,b,_ = context.size() input = context.view(t*b,-1) out = self.forward(input) return out def forward(self,context): t,b,_ = context.size() input = context.view(t*b,-1) out = self.forward(input) return out class CausalConv1d(nn.Module): """A 1D causal convolution layer. Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions per step, and T is the number of steps. Output: (B, D_out, T), where B is the minibatch size, D_out is the number of dimensions in the output, and T is the number of steps. Arguments: in_channels (int): number of input channels out_channels (int): number of output channels """ def __init__(self, in_channels, out_channels, dilation=1): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d( in_channels, out_channels, 2, padding = self.padding, dilation = dilation ) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] class DenseBlock(nn.Module): """Two parallel 1D causal convolution layers w/tanh and sigmoid activations Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions of the input, and T is the number of steps. Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the number of dimensions of the input, `F` is the number of filters, and `T` is the length of the input sequence. Arguments: in_channels (int): number of input channels filters (int): number of filters per channel """ def __init__(self, in_channels, filters, dilation=1): super(DenseBlock, self).__init__() self.causal_conv1 = CausalConv1d( in_channels, filters, dilation=dilation ) self.causal_conv2 = CausalConv1d( in_channels, filters, dilation=dilation ) def forward(self, minibatch): tanh = F.tanh(self.causal_conv1(minibatch)) sig = F.sigmoid(self.causal_conv2(minibatch)) out = torch.cat([minibatch, tanh*sig], dim=1) return out class TCBlock(nn.Module): """A stack of DenseBlocks which dilates to desired sequence length The TCBlock adds `ceil(log_2(seq_len))*filters` channels to the output. Input: (B, D_in, T), where B is the minibatch size, D_in is the number of dimensions of the input, and T is the number of steps. Output: (B, D_in+F, T), where where `B` is the minibatch size, `D_in` is the number of dimensions of the input, `F` is the number of filters, and `T` is the length of the input sequence. Arguments: in_channels (int): channels for the input seq_len (int): length of the sequence. The number of denseblock layers is log base 2 of `seq_len`. filters (int): number of filters per channel """ def __init__(self, in_channels, seq_len, filters): super(TCBlock, self).__init__() layer_count = math.ceil(math.log(seq_len)/math.log(2)) blocks = [] channel_count = in_channels for layer in range(layer_count): block = DenseBlock(channel_count, filters, dilation=2**layer) blocks.append(block) channel_count += filters self.blocks = nn.Sequential(*blocks) def forward(self, minibatch): return self.blocks(minibatch) class AttentionBlock(nn.Module): """An attention mechanism similar to Vaswani et al (2017) The input of the AttentionBlock is `BxDxT` where `B` is the input minibatch size, `D` is the dimensions of each feature, `T` is the length of the sequence. The output of the AttentionBlock is `Bx(D+V)xT` where `V` is the size of the attention values. Arguments: input_dims (int): the number of dimensions (or channels) of each element in the input sequence k_size (int): the size of the attention keys v_size (int): the size of the attention values """ def __init__(self, input_dims, k_size, v_size): super(AttentionBlock, self).__init__() self.key_layer = nn.Linear(input_dims, k_size) self.query_layer = nn.Linear(input_dims, k_size) self.value_layer = nn.Linear(input_dims, v_size) self.sqrt_k = math.sqrt(k_size) def forward(self, minibatch): minibatch = minibatch.permute(0,2,1) keys = self.key_layer(minibatch) queries = self.query_layer(minibatch) values = self.value_layer(minibatch) logits = torch.bmm(queries, keys.transpose(2,1)) mask = logits.data.new(logits.size(1), logits.size(2)).fill_(1).byte() mask = torch.triu(mask, 1) mask = mask.unsqueeze(0).expand_as(logits) logits.data.masked_fill_(mask, float('-inf')) probs = F.softmax(logits / self.sqrt_k, dim=2) read = torch.bmm(probs, values) return torch.cat([minibatch, read], dim=2).permute(0,2,1)
32.933198
101
0.605262
import torch from torch import nn as nn from torch.nn import functional as F from rlkit.policies.base import Policy from rlkit.torch import pytorch_util as ptu from rlkit.torch.core import PyTorchModule from rlkit.torch.data_management.normalizer import TorchFixedNormalizer from rlkit.torch.modules import LayerNorm import math def identity(x): return x class Mlp(PyTorchModule): def __init__( self, hidden_sizes, output_size, input_size, init_w=3e-3, hidden_activation=F.relu, output_activation=identity, hidden_init=ptu.fanin_init, b_init_value=0.1, layer_norm=False, layer_norm_kwargs=None, ): self.save_init_params(locals()) super().__init__() if layer_norm_kwargs is None: layer_norm_kwargs = dict() self.input_size = input_size self.output_size = output_size self.hidden_sizes = hidden_sizes self.hidden_activation = hidden_activation self.output_activation = output_activation self.layer_norm = layer_norm self.fcs = [] self.layer_norms = [] in_size = input_size for i, next_size in enumerate(hidden_sizes): fc = nn.Linear(in_size, next_size) in_size = next_size hidden_init(fc.weight) fc.bias.data.fill_(b_init_value) self.__setattr__("fc{}".format(i), fc) self.fcs.append(fc) if self.layer_norm: ln = LayerNorm(next_size) self.__setattr__("layer_norm{}".format(i), ln) self.layer_norms.append(ln) self.last_fc = nn.Linear(in_size, output_size) self.last_fc.weight.data.uniform_(-init_w, init_w) self.last_fc.bias.data.uniform_(-init_w, init_w) def forward(self, input, return_preactivations=False): h = input for i, fc in enumerate(self.fcs): h = fc(h) if self.layer_norm and i < len(self.fcs) - 1: h = self.layer_norms[i](h) h = self.hidden_activation(h) preactivation = self.last_fc(h) output = self.output_activation(preactivation) if return_preactivations: return output, preactivation else: return output class FlattenMlp(Mlp): def forward(self, *inputs, **kwargs): flat_inputs = torch.cat(inputs, dim=1) return super().forward(flat_inputs, **kwargs) class MlpPolicy(Mlp, Policy): def __init__( self, *args, obs_normalizer: TorchFixedNormalizer = None, **kwargs ): self.save_init_params(locals()) super().__init__(*args, **kwargs) self.obs_normalizer = obs_normalizer def forward(self, obs, **kwargs): if self.obs_normalizer: obs = self.obs_normalizer.normalize(obs) return super().forward(obs, **kwargs) def get_action(self, obs_np): actions = self.get_actions(obs_np[None]) return actions[0, :], {} def get_actions(self, obs): return self.eval_np(obs) class TanhMlpPolicy(MlpPolicy): def __init__(self, *args, **kwargs): self.save_init_params(locals()) super().__init__(*args, output_activation=torch.tanh, **kwargs) class MlpEncoder(FlattenMlp): def reset(self, num_tasks=1): pass def forward_seq(self,context): t,b,_ = context.size() input = context.view(t*b,-1) out = self.forward(input) return out.view(t,b,-1) class RecurrentEncoder(FlattenMlp): def __init__(self, *args, **kwargs ): self.save_init_params(locals()) super().__init__(*args, **kwargs) self.hidden_dim = self.hidden_sizes[-1] self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim)) self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True) def forward(self, in_, return_preactivations=False): task, seq, feat = in_.size() out = in_.view(task * seq, feat) for i, fc in enumerate(self.fcs): out = fc(out) out = self.hidden_activation(out) out = out.view(task, seq, -1) out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device))) self.hidden = hn out = out[:, -1, :] preactivation = self.last_fc(out) output = self.output_activation(preactivation) if return_preactivations: return output, preactivation else: return output def reset(self, num_tasks=1): self.hidden = self.hidden.new_full((1, num_tasks, self.hidden_dim), 0) class RNN(FlattenMlp): def __init__(self, *args, **kwargs ): self.save_init_params(locals()) super().__init__(*args, **kwargs) self.hidden_dim = self.hidden_sizes[-1] self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim)) self.lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, num_layers=1, batch_first=True) def inner_forward(self, in_, return_preactivations=False): task, seq, feat = in_.size() out = in_.view(task * seq, feat) for i, fc in enumerate(self.fcs): out = fc(out) out = self.hidden_activation(out) out = out.view(task, seq, -1) out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device))) self.hidden = hn out = out.contiguous() out = out.view(task * seq, -1) if return_preactivations: return out, out else: return out def forward(self, in_, return_preactivations=False): task, seq, feat = in_.size() out = in_.view(task * seq, feat) for i, fc in enumerate(self.fcs): out = fc(out) out = self.hidden_activation(out) out = out.view(task, seq, -1) out, (hn, cn) = self.lstm(out, (self.hidden, torch.zeros(self.hidden.size()).to(ptu.device))) self.hidden = hn out = out.contiguous() out = out.view(task * seq, -1) preactivation = self.last_fc(out) output = self.output_activation(preactivation) if return_preactivations: return output, output else: return output def inner_reset(self, num_tasks=1): self.hidden = self.hidden.new_full((1, num_tasks, self.hidden_dim), 0) class SnailEncoder(FlattenMlp): def __init__(self, input_length, *args, **kwargs ): self.save_init_params(locals()) super().__init__(*args, **kwargs) self.hidden_dim = self.hidden_sizes[-1] self.register_buffer('hidden', torch.zeros(1, 1, self.hidden_dim)) self.input_length = input_length layer_count = math.ceil(math.log(input_length)/math.log(2)) self.TC1 = TCBlock(self.hidden_dim,input_length,16) self.atten1 = AttentionBlock(self.hidden_dim+16*layer_count,32,32) self.TC2 = TCBlock(self.hidden_dim+16*layer_count+32,input_length,16) self.atten2 = AttentionBlock(self.hidden_dim+16*layer_count*2+32,32,32) self.out_layer = nn.Linear(self.hidden_dim+16*layer_count*2+32+32,self.output_size) self.var_start = int(self.output_size / 2) def forward(self, in_, return_preactivations=False): task, seq, feat = in_.size() out = in_.view(task * seq, feat) for i, fc in enumerate(self.fcs): out = fc(out) out = self.hidden_activation(out) out = out.view(task, seq, -1) out = out.permute(0,2,1) out = self.TC1(out) out = self.atten1(out) out = self.TC2(out) out = self.atten2(out) out = out[:, :, -1] preactivation = self.out_layer(out) output = self.output_activation(preactivation) if return_preactivations: return output, preactivation else: return output def forward_seq(self, in_, return_preactivations=False): task, seq, feat = in_.size() in_ = in_.contiguous() out = in_.view(task * seq, feat) for i, fc in enumerate(self.fcs): out = fc(out) out = self.hidden_activation(out) out = out.view(task, seq, -1) out = out.permute(0,2,1) out = self.TC1(out) out = self.atten1(out) out = self.TC2(out) out = self.atten2(out) out = out.permute(0,2,1) out = out.view(task * seq,-1) preactivation = self.out_layer(out) output = self.output_activation(preactivation) if return_preactivations: return output, preactivation else: return output def reset(self,num_tasks=1): return class MyMlpEncoder(FlattenMlp): def reset(self, num_tasks=1): pass def forward_seq(self,context): t,b,_ = context.size() input = context.view(t*b,-1) out = self.forward(input) return out def forward(self,context): t,b,_ = context.size() input = context.view(t*b,-1) out = self.forward(input) return out class CausalConv1d(nn.Module): def __init__(self, in_channels, out_channels, dilation=1): super(CausalConv1d, self).__init__() self.padding = dilation self.causal_conv = nn.Conv1d( in_channels, out_channels, 2, padding = self.padding, dilation = dilation ) def forward(self, minibatch): return self.causal_conv(minibatch)[:, :, :-self.padding] class DenseBlock(nn.Module): def __init__(self, in_channels, filters, dilation=1): super(DenseBlock, self).__init__() self.causal_conv1 = CausalConv1d( in_channels, filters, dilation=dilation ) self.causal_conv2 = CausalConv1d( in_channels, filters, dilation=dilation ) def forward(self, minibatch): tanh = F.tanh(self.causal_conv1(minibatch)) sig = F.sigmoid(self.causal_conv2(minibatch)) out = torch.cat([minibatch, tanh*sig], dim=1) return out class TCBlock(nn.Module): def __init__(self, in_channels, seq_len, filters): super(TCBlock, self).__init__() layer_count = math.ceil(math.log(seq_len)/math.log(2)) blocks = [] channel_count = in_channels for layer in range(layer_count): block = DenseBlock(channel_count, filters, dilation=2**layer) blocks.append(block) channel_count += filters self.blocks = nn.Sequential(*blocks) def forward(self, minibatch): return self.blocks(minibatch) class AttentionBlock(nn.Module): def __init__(self, input_dims, k_size, v_size): super(AttentionBlock, self).__init__() self.key_layer = nn.Linear(input_dims, k_size) self.query_layer = nn.Linear(input_dims, k_size) self.value_layer = nn.Linear(input_dims, v_size) self.sqrt_k = math.sqrt(k_size) def forward(self, minibatch): minibatch = minibatch.permute(0,2,1) keys = self.key_layer(minibatch) queries = self.query_layer(minibatch) values = self.value_layer(minibatch) logits = torch.bmm(queries, keys.transpose(2,1)) mask = logits.data.new(logits.size(1), logits.size(2)).fill_(1).byte() mask = torch.triu(mask, 1) mask = mask.unsqueeze(0).expand_as(logits) logits.data.masked_fill_(mask, float('-inf')) probs = F.softmax(logits / self.sqrt_k, dim=2) read = torch.bmm(probs, values) return torch.cat([minibatch, read], dim=2).permute(0,2,1)
true
true
f70824aa75c16cfcf7a1046989c882d7009972df
1,551
py
Python
src/gists-gihub/9ba0c82b846dba2566fbf72d06be0b65/test01.py
MrPivato/arquivos-de-procedencia-duvidosa
62ee2cdef0d1b3a25566671411575db1916706f9
[ "MIT" ]
null
null
null
src/gists-gihub/9ba0c82b846dba2566fbf72d06be0b65/test01.py
MrPivato/arquivos-de-procedencia-duvidosa
62ee2cdef0d1b3a25566671411575db1916706f9
[ "MIT" ]
null
null
null
src/gists-gihub/9ba0c82b846dba2566fbf72d06be0b65/test01.py
MrPivato/arquivos-de-procedencia-duvidosa
62ee2cdef0d1b3a25566671411575db1916706f9
[ "MIT" ]
null
null
null
''' multilinha comentario aqui ''' a = 10 b = 2 val1 = 123456 val2 = "sopa de ....." val3 = 123.123 print("\n") # operadores logicos print(a == b) # vc ja sabe oq eh print(a != b) # vc ja sabe oq eh print(a < b) # vc ja sabe oq eh print(a >= b) # vc ja sabe oq eh print(a <= b) # vc ja sabe oq eh print("\n") # maths print(a**b) # pot print(a**(a + b)) # pot e parentss print(a**(a + b) % 7) #pot e mod print("\n") #alguns tipos print(type(val1)) # diz tipo da var print(type(val2)) # diz tipo da var print(type(val3)) # diz tipo da var print(type(a)) # diz tipo da var print(type(b)) # diz tipo da var print("\n") #strings palavra = 'tecnologicamente_avancada' # strigs sao objetos em python print(palavra[0]) # podem ser acessadas as letras desta forma print(palavra[1]) # de veras print(palavra[2]) # muito print(palavra[3]) # tecnologicamente avancada print(palavra[4]) print(2 * palavra[5]) # 2x oq esta na pos 5 print(palavra[5:10]) # aquilo que estra entre a pos 5 e 10 print(palavra[:10]) # aquilo que vem antes da pos 10 print(palavra[10:]) # aquilo que ve dps da pos 10 print(palavra[1:10:2]) # entre pos 1 e 10 com increento de 2 print(palavra[15::-1]) # da pos 15 pra baixo, ao contrario print("\n") #listas lista = [1, 2, 3, 4, 5, 6, 7, 8, 9] print(type(lista)) print(lista[0] + lista[1]) # soma, para elementos dentro das listas lista = lista + [0, 1, 2, 3] # juntar listas print(lista) print(lista[-1]) # ultimo endereco print(lista[-2]) # penultimo endereco, etc... ''' CONTINUAR LISTAS '''
24.619048
68
0.643456
a = 10 b = 2 val1 = 123456 val2 = "sopa de ....." val3 = 123.123 print("\n") print(a == b) print(a != b) print(a < b) print(a >= b) print(a <= b) print("\n") print(a**b) print(a**(a + b)) print(a**(a + b) % 7) print("\n") print(type(val1)) print(type(val2)) print(type(val3)) print(type(a)) print(type(b)) print("\n") palavra = 'tecnologicamente_avancada' print(palavra[0]) print(palavra[1]) print(palavra[2]) print(palavra[3]) print(palavra[4]) print(2 * palavra[5]) print(palavra[5:10]) print(palavra[:10]) print(palavra[10:]) print(palavra[1:10:2]) print(palavra[15::-1]) print("\n") lista = [1, 2, 3, 4, 5, 6, 7, 8, 9] print(type(lista)) print(lista[0] + lista[1]) lista = lista + [0, 1, 2, 3] print(lista) print(lista[-1]) print(lista[-2])
true
true
f70824ffe664a17d8bdf048e954d141fb89d84df
832
py
Python
setup.py
gruentee/fever-api
58811ef7527482b77b28d291993afa2c63aa60c0
[ "Apache-2.0" ]
null
null
null
setup.py
gruentee/fever-api
58811ef7527482b77b28d291993afa2c63aa60c0
[ "Apache-2.0" ]
null
null
null
setup.py
gruentee/fever-api
58811ef7527482b77b28d291993afa2c63aa60c0
[ "Apache-2.0" ]
null
null
null
from setuptools import setup, find_packages import sys with open('requirements.txt') as f: reqs = f.read() reqs = reqs.strip().split('\n') install = [req for req in reqs if not req.startswith("git+git://")] depends = [req.replace("git+git://", "git+http://") for req in reqs if req.startswith("git+git://")] setup( name='fever-api', version='0.0.0', author='James Thorne', author_email='james@jamesthorne.co.uk', url='https://jamesthorne.co.uk', description='Fact Extraction and VERification API', long_description="readme", license="Apache 2.0", python_requires='>=3.5', package_dir={'fever': 'src/fever', 'fever.api': 'src/fever/api'}, packages=['fever', 'fever.api' ], install_requires=install, dependency_links=depends, )
26.83871
100
0.620192
from setuptools import setup, find_packages import sys with open('requirements.txt') as f: reqs = f.read() reqs = reqs.strip().split('\n') install = [req for req in reqs if not req.startswith("git+git://")] depends = [req.replace("git+git://", "git+http://") for req in reqs if req.startswith("git+git://")] setup( name='fever-api', version='0.0.0', author='James Thorne', author_email='james@jamesthorne.co.uk', url='https://jamesthorne.co.uk', description='Fact Extraction and VERification API', long_description="readme", license="Apache 2.0", python_requires='>=3.5', package_dir={'fever': 'src/fever', 'fever.api': 'src/fever/api'}, packages=['fever', 'fever.api' ], install_requires=install, dependency_links=depends, )
true
true
f708250955cabd36d394352f565efdbc6e091c63
2,213
py
Python
tests/text/test_sentsplit.py
evendrow/ifcc
ae6ea6f19028ba5d367c086fe94cfe4237829059
[ "Apache-2.0" ]
43
2020-10-21T03:25:21.000Z
2022-03-26T08:13:06.000Z
tests/text/test_sentsplit.py
evendrow/ifcc
ae6ea6f19028ba5d367c086fe94cfe4237829059
[ "Apache-2.0" ]
8
2020-12-04T15:06:45.000Z
2022-03-28T12:18:14.000Z
tests/text/test_sentsplit.py
evendrow/ifcc
ae6ea6f19028ba5d367c086fe94cfe4237829059
[ "Apache-2.0" ]
10
2020-11-13T03:46:09.000Z
2022-02-05T21:39:52.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import unittest from clinicgen.text.sentsplit import LineBreakSplitter, NLTKSentenceSplitter, SpaCySentenceSplitter, StanzaSentenceSplitter class TestLineBreakSplitter(unittest.TestCase): def test_split(self): splitter = LineBreakSplitter() text = 'Hello NLP! Running a test\nof sentence splitting. Line breaks are considered as sentence boundaries.' sents = splitter.split(text) self.assertEqual(len(sents), 2) self.assertTrue(sents[0].startswith('Hello')) self.assertTrue(sents[1].startswith('of')) class TestNLTKSentenceSplitter(unittest.TestCase): def test_split(self): splitter = NLTKSentenceSplitter() text = 'Hello NLP! Running a test\nof sentence splitting. Line breaks are considered as sentence boundaries.' sents = splitter.split(text) self.assertEqual(len(sents), 4) self.assertTrue(sents[0].startswith('Hello')) self.assertTrue(sents[1].startswith('Running')) self.assertTrue(sents[2].startswith('of')) self.assertTrue(sents[3].startswith('Line')) class TestSpaCySentenceSplitter(unittest.TestCase): def test_split(self): splitter = SpaCySentenceSplitter('en_core_web_sm') text = 'Hello NLP! Running a test\nof sentence splitting. Line breaks are considered as sentence boundaries.' sents = splitter.split(text) self.assertEqual(len(sents), 4) self.assertTrue(sents[0].startswith('Hello')) self.assertTrue(sents[1].startswith('Running')) self.assertTrue(sents[2].startswith('of')) self.assertTrue(sents[3].startswith('Line')) class TestStanzaSentenceSplitter(unittest.TestCase): def test_split(self): splitter = StanzaSentenceSplitter() text = 'Hello NLP! Running a test\nof sentence splitting. Line breaks are considered as sentence boundaries.' sents = splitter.split(text) self.assertEqual(len(sents), 4) self.assertTrue(sents[0].startswith('Hello')) self.assertTrue(sents[1].startswith('Running')) self.assertTrue(sents[2].startswith('of')) self.assertTrue(sents[3].startswith('Line'))
43.392157
123
0.694984
import unittest from clinicgen.text.sentsplit import LineBreakSplitter, NLTKSentenceSplitter, SpaCySentenceSplitter, StanzaSentenceSplitter class TestLineBreakSplitter(unittest.TestCase): def test_split(self): splitter = LineBreakSplitter() text = 'Hello NLP! Running a test\nof sentence splitting. Line breaks are considered as sentence boundaries.' sents = splitter.split(text) self.assertEqual(len(sents), 2) self.assertTrue(sents[0].startswith('Hello')) self.assertTrue(sents[1].startswith('of')) class TestNLTKSentenceSplitter(unittest.TestCase): def test_split(self): splitter = NLTKSentenceSplitter() text = 'Hello NLP! Running a test\nof sentence splitting. Line breaks are considered as sentence boundaries.' sents = splitter.split(text) self.assertEqual(len(sents), 4) self.assertTrue(sents[0].startswith('Hello')) self.assertTrue(sents[1].startswith('Running')) self.assertTrue(sents[2].startswith('of')) self.assertTrue(sents[3].startswith('Line')) class TestSpaCySentenceSplitter(unittest.TestCase): def test_split(self): splitter = SpaCySentenceSplitter('en_core_web_sm') text = 'Hello NLP! Running a test\nof sentence splitting. Line breaks are considered as sentence boundaries.' sents = splitter.split(text) self.assertEqual(len(sents), 4) self.assertTrue(sents[0].startswith('Hello')) self.assertTrue(sents[1].startswith('Running')) self.assertTrue(sents[2].startswith('of')) self.assertTrue(sents[3].startswith('Line')) class TestStanzaSentenceSplitter(unittest.TestCase): def test_split(self): splitter = StanzaSentenceSplitter() text = 'Hello NLP! Running a test\nof sentence splitting. Line breaks are considered as sentence boundaries.' sents = splitter.split(text) self.assertEqual(len(sents), 4) self.assertTrue(sents[0].startswith('Hello')) self.assertTrue(sents[1].startswith('Running')) self.assertTrue(sents[2].startswith('of')) self.assertTrue(sents[3].startswith('Line'))
true
true
f70825c17a7334392f3af2030a67a375fb7d0b26
5,698
py
Python
cruft/_commands/utils/generate.py
lkubb/cruft
0c6de85d974197969c0e65913857eaa36b788e5e
[ "MIT" ]
293
2020-08-18T05:52:45.000Z
2022-03-31T20:39:43.000Z
cruft/_commands/utils/generate.py
lkubb/cruft
0c6de85d974197969c0e65913857eaa36b788e5e
[ "MIT" ]
102
2020-08-28T16:38:34.000Z
2022-03-31T11:01:41.000Z
cruft/_commands/utils/generate.py
lkubb/cruft
0c6de85d974197969c0e65913857eaa36b788e5e
[ "MIT" ]
36
2020-08-28T16:34:10.000Z
2022-03-31T21:55:53.000Z
import os import stat from pathlib import Path from shutil import move, rmtree from typing import Optional, Set, Union from warnings import warn from cookiecutter.generate import generate_files from git import Repo from .cookiecutter import CookiecutterContext, generate_cookiecutter_context from .cruft import CruftState from .iohelper import AltTemporaryDirectory try: import toml except ImportError: # pragma: no cover toml = None # type: ignore def cookiecutter_template( output_dir: Path, repo: Repo, cruft_state: CruftState, project_dir: Path = Path("."), cookiecutter_input: bool = False, checkout: Optional[str] = None, deleted_paths: Set[Path] = None, update_deleted_paths: bool = False, ) -> CookiecutterContext: """Generate a clean cookiecutter template in output_dir.""" if deleted_paths is None: deleted_paths = set() pyproject_file = project_dir / "pyproject.toml" commit = checkout or repo.remotes.origin.refs["HEAD"] repo.head.reset(commit=commit, working_tree=True) context = _generate_output(cruft_state, Path(repo.working_dir), cookiecutter_input, output_dir) # Get all paths that we are supposed to skip before generating the diff and applying updates skip_paths = _get_skip_paths(cruft_state, pyproject_file) # We also get the list of paths that were deleted from the project # directory but were present in the template that the project is linked against # This is to avoid introducing changes that won't apply cleanly to the current project. if update_deleted_paths: deleted_paths.update(_get_deleted_files(output_dir, project_dir)) # We now remove skipped and deleted paths from the project _remove_paths(output_dir, skip_paths | deleted_paths) # type: ignore return context ##################################### # Generating clean outputs for diff # ##################################### def _generate_output( cruft_state: CruftState, project_dir: Path, cookiecutter_input: bool, output_dir: Path ) -> CookiecutterContext: inner_dir = project_dir / (cruft_state.get("directory") or "") new_context = generate_cookiecutter_context( cruft_state["template"], inner_dir, extra_context=cruft_state["context"]["cookiecutter"], no_input=not cookiecutter_input, ) # This generates the cookiecutter template. # Unfortunately, cookiecutter doesn't let us output the template in an # arbitrary directory. It insists on creating the initial project directory. # Therefore we have to move the directory content to the expected output_dir. # See https://github.com/cookiecutter/cookiecutter/pull/907 output_dir.mkdir(parents=True, exist_ok=True) with AltTemporaryDirectory() as tmpdir_: tmpdir = Path(tmpdir_) # Kindly ask cookiecutter to generate the template template_dir = generate_files( repo_dir=inner_dir, context=new_context, overwrite_if_exists=True, output_dir=tmpdir ) template_dir = Path(template_dir) # Move the template content to the output directory for name in os.listdir(template_dir): move(str(template_dir / name), str(output_dir)) return new_context ############################## # Removing unnecessary files # ############################## def _get_skip_paths(cruft_state: CruftState, pyproject_file: Path) -> Set[Path]: skip_cruft = cruft_state.get("skip", []) if toml and pyproject_file.is_file(): pyproject_cruft = toml.loads(pyproject_file.read_text()).get("tool", {}).get("cruft", {}) skip_cruft.extend(pyproject_cruft.get("skip", [])) elif pyproject_file.is_file(): warn( "pyproject.toml is present in repo, but `toml` package is not installed. " "Cruft configuration may be ignored." ) return set(map(Path, skip_cruft)) def _get_deleted_files(template_dir: Path, project_dir: Path): cwd = Path.cwd() os.chdir(template_dir) template_paths = set(Path(".").glob("**/*")) os.chdir(cwd) os.chdir(project_dir) deleted_paths = set(filter(lambda path: not path.exists(), template_paths)) os.chdir(cwd) return deleted_paths def _remove_readonly(func, path, _): # pragma: no cov_4_nix """Clear the readonly bit and reattempt the removal.""" os.chmod(path, stat.S_IWRITE) # WINDOWS func(path) def _remove_single_path(path: Path): if path.is_dir(): try: rmtree(path, ignore_errors=False, onerror=_remove_readonly) except Exception: # pragma: no cover raise Exception("Failed to remove directory.") # rmtree(path) elif path.is_file(): # path.unlink() try: path.unlink() except PermissionError: # pragma: no cov_4_nix path.chmod(stat.S_IWRITE) path.unlink() except Exception as exc: # pragma: no cover raise Exception("Failed to remove file.") from exc def _remove_paths(root: Path, paths_to_remove: Set[Union[Path, str]]): # There is some redundancy here in chmoding dirs and/or files differently. abs_paths_to_remove = [] for path_to_remove in paths_to_remove: if isinstance(path_to_remove, Path): abs_paths_to_remove.append(root / path_to_remove) elif isinstance(path_to_remove, str): # assumes the string is a glob-pattern abs_paths_to_remove += list(root.glob(path_to_remove)) else: warn(f"{path_to_remove} is not a Path object or a string glob-pattern") for path in abs_paths_to_remove: _remove_single_path(path)
35.836478
99
0.68252
import os import stat from pathlib import Path from shutil import move, rmtree from typing import Optional, Set, Union from warnings import warn from cookiecutter.generate import generate_files from git import Repo from .cookiecutter import CookiecutterContext, generate_cookiecutter_context from .cruft import CruftState from .iohelper import AltTemporaryDirectory try: import toml except ImportError: toml = None def cookiecutter_template( output_dir: Path, repo: Repo, cruft_state: CruftState, project_dir: Path = Path("."), cookiecutter_input: bool = False, checkout: Optional[str] = None, deleted_paths: Set[Path] = None, update_deleted_paths: bool = False, ) -> CookiecutterContext: if deleted_paths is None: deleted_paths = set() pyproject_file = project_dir / "pyproject.toml" commit = checkout or repo.remotes.origin.refs["HEAD"] repo.head.reset(commit=commit, working_tree=True) context = _generate_output(cruft_state, Path(repo.working_dir), cookiecutter_input, output_dir) skip_paths = _get_skip_paths(cruft_state, pyproject_file) if update_deleted_paths: deleted_paths.update(_get_deleted_files(output_dir, project_dir)) # We now remove skipped and deleted paths from the project _remove_paths(output_dir, skip_paths | deleted_paths) # type: ignore return context ##################################### # Generating clean outputs for diff # ##################################### def _generate_output( cruft_state: CruftState, project_dir: Path, cookiecutter_input: bool, output_dir: Path ) -> CookiecutterContext: inner_dir = project_dir / (cruft_state.get("directory") or "") new_context = generate_cookiecutter_context( cruft_state["template"], inner_dir, extra_context=cruft_state["context"]["cookiecutter"], no_input=not cookiecutter_input, ) # This generates the cookiecutter template. # Unfortunately, cookiecutter doesn't let us output the template in an output_dir.mkdir(parents=True, exist_ok=True) with AltTemporaryDirectory() as tmpdir_: tmpdir = Path(tmpdir_) template_dir = generate_files( repo_dir=inner_dir, context=new_context, overwrite_if_exists=True, output_dir=tmpdir ) template_dir = Path(template_dir) for name in os.listdir(template_dir): move(str(template_dir / name), str(output_dir)) return new_context def _get_skip_paths(cruft_state: CruftState, pyproject_file: Path) -> Set[Path]: skip_cruft = cruft_state.get("skip", []) if toml and pyproject_file.is_file(): pyproject_cruft = toml.loads(pyproject_file.read_text()).get("tool", {}).get("cruft", {}) skip_cruft.extend(pyproject_cruft.get("skip", [])) elif pyproject_file.is_file(): warn( "pyproject.toml is present in repo, but `toml` package is not installed. " "Cruft configuration may be ignored." ) return set(map(Path, skip_cruft)) def _get_deleted_files(template_dir: Path, project_dir: Path): cwd = Path.cwd() os.chdir(template_dir) template_paths = set(Path(".").glob("**/*")) os.chdir(cwd) os.chdir(project_dir) deleted_paths = set(filter(lambda path: not path.exists(), template_paths)) os.chdir(cwd) return deleted_paths def _remove_readonly(func, path, _): os.chmod(path, stat.S_IWRITE) func(path) def _remove_single_path(path: Path): if path.is_dir(): try: rmtree(path, ignore_errors=False, onerror=_remove_readonly) except Exception: raise Exception("Failed to remove directory.") elif path.is_file(): try: path.unlink() except PermissionError: path.chmod(stat.S_IWRITE) path.unlink() except Exception as exc: raise Exception("Failed to remove file.") from exc def _remove_paths(root: Path, paths_to_remove: Set[Union[Path, str]]): abs_paths_to_remove = [] for path_to_remove in paths_to_remove: if isinstance(path_to_remove, Path): abs_paths_to_remove.append(root / path_to_remove) elif isinstance(path_to_remove, str): abs_paths_to_remove += list(root.glob(path_to_remove)) else: warn(f"{path_to_remove} is not a Path object or a string glob-pattern") for path in abs_paths_to_remove: _remove_single_path(path)
true
true
f70827e38991a94ce5912d7f8da1ac9aef505f01
316
py
Python
ML_CW1/assgn_1_part_1/2_multiple_variables/plot_cost.py
ShellySrivastava/Machine-Learning
bfdea30c06abe4228c103ae525adcf990015983f
[ "MIT" ]
null
null
null
ML_CW1/assgn_1_part_1/2_multiple_variables/plot_cost.py
ShellySrivastava/Machine-Learning
bfdea30c06abe4228c103ae525adcf990015983f
[ "MIT" ]
null
null
null
ML_CW1/assgn_1_part_1/2_multiple_variables/plot_cost.py
ShellySrivastava/Machine-Learning
bfdea30c06abe4228c103ae525adcf990015983f
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt import os def plot_cost(cost): fig, ax1 = plt.subplots() ax1.set_xlabel('Iterations') ax1.set_ylabel('Cost') plt.plot(cost) fig.tight_layout() plot_filename = os.path.join(os.getcwd(), 'figures', 'cost.png') plt.savefig(plot_filename) plt.show()
22.571429
68
0.661392
import matplotlib.pyplot as plt import os def plot_cost(cost): fig, ax1 = plt.subplots() ax1.set_xlabel('Iterations') ax1.set_ylabel('Cost') plt.plot(cost) fig.tight_layout() plot_filename = os.path.join(os.getcwd(), 'figures', 'cost.png') plt.savefig(plot_filename) plt.show()
true
true
f7082804270c08c354dbffdcaf9e8d5dc7390978
34
py
Python
settable_generator/__init__.py
markusschmaus/map_structure
f1417230cff2657b28dcb44f70ef38ed02d7ba7d
[ "MIT" ]
null
null
null
settable_generator/__init__.py
markusschmaus/map_structure
f1417230cff2657b28dcb44f70ef38ed02d7ba7d
[ "MIT" ]
null
null
null
settable_generator/__init__.py
markusschmaus/map_structure
f1417230cff2657b28dcb44f70ef38ed02d7ba7d
[ "MIT" ]
null
null
null
from .settable_generator import *
17
33
0.823529
from .settable_generator import *
true
true
f70828b88a42c74567ef99b3f01c0cad6c366906
2,053
py
Python
tensorflow_datasets/image/__init__.py
ubershmekel/datasets
555220a3bf048a1bed6aed5db97696fb83088b83
[ "Apache-2.0" ]
null
null
null
tensorflow_datasets/image/__init__.py
ubershmekel/datasets
555220a3bf048a1bed6aed5db97696fb83088b83
[ "Apache-2.0" ]
null
null
null
tensorflow_datasets/image/__init__.py
ubershmekel/datasets
555220a3bf048a1bed6aed5db97696fb83088b83
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2019 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image datasets.""" from tensorflow_datasets.image.cats_vs_dogs import CatsVsDogs from tensorflow_datasets.image.celeba import CelebA from tensorflow_datasets.image.celebahq import CelebAHq from tensorflow_datasets.image.chexpert import Chexpert from tensorflow_datasets.image.cifar import Cifar10 from tensorflow_datasets.image.cifar import Cifar100 from tensorflow_datasets.image.coco import Coco2014 from tensorflow_datasets.image.colorectal_histology import ColorectalHistology from tensorflow_datasets.image.colorectal_histology import ColorectalHistologyLarge from tensorflow_datasets.image.diabetic_retinopathy_detection import DiabeticRetinopathyDetection from tensorflow_datasets.image.flowers import TFFlowers from tensorflow_datasets.image.horses_or_humans import HorsesOrHumans from tensorflow_datasets.image.image_folder import ImageLabelFolder from tensorflow_datasets.image.imagenet import Imagenet2012 from tensorflow_datasets.image.lsun import Lsun from tensorflow_datasets.image.mnist import FashionMNIST from tensorflow_datasets.image.mnist import KMNIST from tensorflow_datasets.image.mnist import MNIST from tensorflow_datasets.image.omniglot import Omniglot from tensorflow_datasets.image.open_images import OpenImagesV4 from tensorflow_datasets.image.quickdraw import QuickdrawBitmap from tensorflow_datasets.image.rock_paper_scissors import RockPaperScissors from tensorflow_datasets.image.svhn import SvhnCropped
50.073171
97
0.857282
from tensorflow_datasets.image.cats_vs_dogs import CatsVsDogs from tensorflow_datasets.image.celeba import CelebA from tensorflow_datasets.image.celebahq import CelebAHq from tensorflow_datasets.image.chexpert import Chexpert from tensorflow_datasets.image.cifar import Cifar10 from tensorflow_datasets.image.cifar import Cifar100 from tensorflow_datasets.image.coco import Coco2014 from tensorflow_datasets.image.colorectal_histology import ColorectalHistology from tensorflow_datasets.image.colorectal_histology import ColorectalHistologyLarge from tensorflow_datasets.image.diabetic_retinopathy_detection import DiabeticRetinopathyDetection from tensorflow_datasets.image.flowers import TFFlowers from tensorflow_datasets.image.horses_or_humans import HorsesOrHumans from tensorflow_datasets.image.image_folder import ImageLabelFolder from tensorflow_datasets.image.imagenet import Imagenet2012 from tensorflow_datasets.image.lsun import Lsun from tensorflow_datasets.image.mnist import FashionMNIST from tensorflow_datasets.image.mnist import KMNIST from tensorflow_datasets.image.mnist import MNIST from tensorflow_datasets.image.omniglot import Omniglot from tensorflow_datasets.image.open_images import OpenImagesV4 from tensorflow_datasets.image.quickdraw import QuickdrawBitmap from tensorflow_datasets.image.rock_paper_scissors import RockPaperScissors from tensorflow_datasets.image.svhn import SvhnCropped
true
true
f708296b42266a152e2db50c2035ba466629cfe8
4,729
py
Python
src/TOU.py
e2thenegpii/EnergyCalc
6036b08d01eafae33e80e8754c0e0215c78db6fe
[ "MIT" ]
null
null
null
src/TOU.py
e2thenegpii/EnergyCalc
6036b08d01eafae33e80e8754c0e0215c78db6fe
[ "MIT" ]
null
null
null
src/TOU.py
e2thenegpii/EnergyCalc
6036b08d01eafae33e80e8754c0e0215c78db6fe
[ "MIT" ]
null
null
null
from enum import Enum from datetime import datetime, date from dateutil.relativedelta import relativedelta, MO import argparse import holidays import pandas as pd class BGEHolidays(holidays.HolidayBase): def _populate(self, year): holidays.UnitedStates._populate(self, year) # Remove Martin Luther King Day self.pop(date(year, 1, 1) + relativedelta(weekday=MO(+3)), None) # Remove Columbus Day self.pop(date(year, 10, 1) + relativedelta(weekday=MO(+2)), None) # Remove Veterans Day self.pop(date(year, 11, 11), None) # Add good friday self[holidays.easter(year) + relativedelta(days=-2)] = 'Good Friday' class TimeOfUse(Enum): peak = 0 shoulder = 1 offpeak = 2 class Season(Enum): Winter = 0 Summer = 1 @classmethod def get(cls, dt): d = dt.date() if date(dt.year, 6, 1) <= d and date(dt.year, 9, 30) >= d: return cls.Summer return cls.Winter class Schedule(Enum): R = 'R' RL = 'RL' EV = 'EV' EVP = 'EVP' def getTOU(self, dt): d = dt.date() t = dt.time() bge_holidays = BGEHolidays(dt.year) if self == self.R: return TimeOfUse.offpeak elif self == self.RL: if Season.get(dt) == Season.Summer: if (t.hour >=10 and t.hour < 20) and \ (dt.weekday() < 5) and \ (d not in bge_holidays): return TimeOfUse.peak elif ((t.hour >= 7 and t.hour < 10) or (t.hour >= 20 and t.hour < 23)) and \ (dt.weekday() < 5) and \ (d not in bge_holidays): return TimeOfUse.shoulder else: return TimeOfUse.offpeak else: if ((t.hour >= 7 and t.hour < 11) or (t.hour >= 17 and t.hour < 21)) and \ (dt.weekday() < 5) and \ (d not in bge_holidays): return TimeOfUse.peak elif (t.hour >= 11 and t.hour < 17) and \ (dt.weekday() < 5) and \ (d not in bge_holidays): return TimeOfUse.shoulder else: return TimeOfUse.offpeak elif self in (self.EV, self.EVP): if Season.get(dt) == Season.Summer: if (t.hour >= 10 and t.hour < 20) and \ (dt.weekday() < 5) and \ (d not in bge_holidays): return TimeOfUse.peak else: return TimeOfUse.offpeak else: if ((t.hour >= 7 and t.hour < 11) or (t.hour >= 17 and t.hour < 21)) and \ (dt.weekday() < 5) and \ (d not in bge_holidays): return TimeOfUse.peak else: return TimeOfUse.offpeak rates = { (Schedule.R, Season.Summer, TimeOfUse.offpeak): .06722, (Schedule.R, Season.Winter, TimeOfUse.offpeak): .07805, (Schedule.RL, Season.Summer, TimeOfUse.peak): .08465, (Schedule.RL, Season.Summer, TimeOfUse.shoulder): .06069, (Schedule.RL, Season.Summer, TimeOfUse.offpeak): .05744, (Schedule.RL, Season.Winter, TimeOfUse.peak): .09053, (Schedule.RL, Season.Winter, TimeOfUse.shoulder): .07944, (Schedule.RL, Season.Winter, TimeOfUse.offpeak): .07166, (Schedule.EV, Season.Summer, TimeOfUse.peak): .1227, (Schedule.EV, Season.Summer, TimeOfUse.offpeak): .03886, (Schedule.EV, Season.Winter, TimeOfUse.peak): .18474, (Schedule.EV, Season.Winter, TimeOfUse.offpeak): .0426, (Schedule.EVP, Season.Summer, TimeOfUse.peak): .03886, (Schedule.EVP, Season.Summer, TimeOfUse.offpeak): .03886, (Schedule.EVP, Season.Winter, TimeOfUse.peak): .0426, (Schedule.EVP, Season.Winter, TimeOfUse.offpeak): .0426 } def get_rate(dt, schedule = Schedule.R): bge_holidays = BGEHolidays(dt.year) season = Season.get(dt) tou = schedule.getTOU(dt) return rates[(schedule, season, tou)] def process_row(x): dt = x['DATE_START TIME'] val = x['USAGE'] return pd.Series([dt] + [get_rate(dt, x) * (val + .0700) for x in Schedule], index=['DATE_START TIME'] + [x.value for x in Schedule]) def main(): parser = argparse.ArgumentParser() parser.add_argument('input_file', type=argparse.FileType('r')) args = parser.parse_args() df = pd.read_csv(args.input_file, parse_dates=[['DATE', 'START TIME']])[['DATE_START TIME', 'USAGE']] schedules = df.apply(process_row, axis=1) print(schedules[['R', 'RL', 'EV', 'EVP']].sum()) if __name__ == '__main__': main()
34.021583
137
0.554663
from enum import Enum from datetime import datetime, date from dateutil.relativedelta import relativedelta, MO import argparse import holidays import pandas as pd class BGEHolidays(holidays.HolidayBase): def _populate(self, year): holidays.UnitedStates._populate(self, year) self.pop(date(year, 1, 1) + relativedelta(weekday=MO(+3)), None) self.pop(date(year, 10, 1) + relativedelta(weekday=MO(+2)), None) self.pop(date(year, 11, 11), None) self[holidays.easter(year) + relativedelta(days=-2)] = 'Good Friday' class TimeOfUse(Enum): peak = 0 shoulder = 1 offpeak = 2 class Season(Enum): Winter = 0 Summer = 1 @classmethod def get(cls, dt): d = dt.date() if date(dt.year, 6, 1) <= d and date(dt.year, 9, 30) >= d: return cls.Summer return cls.Winter class Schedule(Enum): R = 'R' RL = 'RL' EV = 'EV' EVP = 'EVP' def getTOU(self, dt): d = dt.date() t = dt.time() bge_holidays = BGEHolidays(dt.year) if self == self.R: return TimeOfUse.offpeak elif self == self.RL: if Season.get(dt) == Season.Summer: if (t.hour >=10 and t.hour < 20) and \ (dt.weekday() < 5) and \ (d not in bge_holidays): return TimeOfUse.peak elif ((t.hour >= 7 and t.hour < 10) or (t.hour >= 20 and t.hour < 23)) and \ (dt.weekday() < 5) and \ (d not in bge_holidays): return TimeOfUse.shoulder else: return TimeOfUse.offpeak else: if ((t.hour >= 7 and t.hour < 11) or (t.hour >= 17 and t.hour < 21)) and \ (dt.weekday() < 5) and \ (d not in bge_holidays): return TimeOfUse.peak elif (t.hour >= 11 and t.hour < 17) and \ (dt.weekday() < 5) and \ (d not in bge_holidays): return TimeOfUse.shoulder else: return TimeOfUse.offpeak elif self in (self.EV, self.EVP): if Season.get(dt) == Season.Summer: if (t.hour >= 10 and t.hour < 20) and \ (dt.weekday() < 5) and \ (d not in bge_holidays): return TimeOfUse.peak else: return TimeOfUse.offpeak else: if ((t.hour >= 7 and t.hour < 11) or (t.hour >= 17 and t.hour < 21)) and \ (dt.weekday() < 5) and \ (d not in bge_holidays): return TimeOfUse.peak else: return TimeOfUse.offpeak rates = { (Schedule.R, Season.Summer, TimeOfUse.offpeak): .06722, (Schedule.R, Season.Winter, TimeOfUse.offpeak): .07805, (Schedule.RL, Season.Summer, TimeOfUse.peak): .08465, (Schedule.RL, Season.Summer, TimeOfUse.shoulder): .06069, (Schedule.RL, Season.Summer, TimeOfUse.offpeak): .05744, (Schedule.RL, Season.Winter, TimeOfUse.peak): .09053, (Schedule.RL, Season.Winter, TimeOfUse.shoulder): .07944, (Schedule.RL, Season.Winter, TimeOfUse.offpeak): .07166, (Schedule.EV, Season.Summer, TimeOfUse.peak): .1227, (Schedule.EV, Season.Summer, TimeOfUse.offpeak): .03886, (Schedule.EV, Season.Winter, TimeOfUse.peak): .18474, (Schedule.EV, Season.Winter, TimeOfUse.offpeak): .0426, (Schedule.EVP, Season.Summer, TimeOfUse.peak): .03886, (Schedule.EVP, Season.Summer, TimeOfUse.offpeak): .03886, (Schedule.EVP, Season.Winter, TimeOfUse.peak): .0426, (Schedule.EVP, Season.Winter, TimeOfUse.offpeak): .0426 } def get_rate(dt, schedule = Schedule.R): bge_holidays = BGEHolidays(dt.year) season = Season.get(dt) tou = schedule.getTOU(dt) return rates[(schedule, season, tou)] def process_row(x): dt = x['DATE_START TIME'] val = x['USAGE'] return pd.Series([dt] + [get_rate(dt, x) * (val + .0700) for x in Schedule], index=['DATE_START TIME'] + [x.value for x in Schedule]) def main(): parser = argparse.ArgumentParser() parser.add_argument('input_file', type=argparse.FileType('r')) args = parser.parse_args() df = pd.read_csv(args.input_file, parse_dates=[['DATE', 'START TIME']])[['DATE_START TIME', 'USAGE']] schedules = df.apply(process_row, axis=1) print(schedules[['R', 'RL', 'EV', 'EVP']].sum()) if __name__ == '__main__': main()
true
true
f7082a6f9e21d7c86d50e21ff2f273fcab648c48
110
py
Python
oppurtunity/admin.py
ParasBotadra/djangocrm
aeb5f6af35e74c4509ef5731549ce4ed445263d7
[ "MIT" ]
2
2018-02-15T15:33:00.000Z
2018-02-15T16:29:12.000Z
oppurtunity/admin.py
ParasBotadra/djangocrm
aeb5f6af35e74c4509ef5731549ce4ed445263d7
[ "MIT" ]
1
2018-08-31T08:54:22.000Z
2018-08-31T08:54:22.000Z
oppurtunity/admin.py
ParasBotadra/djangocrm
aeb5f6af35e74c4509ef5731549ce4ed445263d7
[ "MIT" ]
12
2017-11-02T22:32:32.000Z
2018-04-12T05:13:25.000Z
from django.contrib import admin from oppurtunity.models import Opportunity admin.site.register(Opportunity)
22
42
0.854545
from django.contrib import admin from oppurtunity.models import Opportunity admin.site.register(Opportunity)
true
true
f7082ad992bcca14397fb0a69109b4b7dd0df0b1
6,541
py
Python
FRAMS_STUDENT.py
AshwinRameshP/AttendanceSystem_FaceRecognition
23c590c10ac296816d7cff23445d28c3863d0138
[ "MIT" ]
1
2021-02-17T19:36:20.000Z
2021-02-17T19:36:20.000Z
FRAMS_STUDENT.py
AshwinRameshP/AttendanceSystem_FaceRecognition
23c590c10ac296816d7cff23445d28c3863d0138
[ "MIT" ]
null
null
null
FRAMS_STUDENT.py
AshwinRameshP/AttendanceSystem_FaceRecognition
23c590c10ac296816d7cff23445d28c3863d0138
[ "MIT" ]
null
null
null
import tkinter as tk from tkinter import * import cv2 import csv import os import numpy as np from PIL import Image,ImageTk import pandas as pd import datetime import time ##Error screen2 def del_sc2(): sc2.destroy() def err_screen1(): global sc2 sc2 = tk.Tk() sc2.geometry('300x100') sc2.iconbitmap('FRAMS.ico') sc2.title('Warning!!') sc2.configure(background='snow') Label(sc2,text='Please enter your subject name!!!',fg='red',bg='white',font=('times', 16, ' bold ')).pack() Button(sc2,text='OK',command=del_sc2,fg="black" ,bg="lawn green" ,width=9 ,height=1, activebackground = "Red" ,font=('times', 15, ' bold ')).place(x=90,y= 50) def Fillattendances(): sub = tx.get() now = time.time() ###For calculate seconds of video future = now + 20 if time.time() < future: if sub == '': err_screen1() else: recognizer = cv2.face.LBPHFaceRecognizer_create() # cv2.createLBPHFaceRecognizer() try: recognizer.read("TrainingImageLabel\Trainner.yml") except: e = 'Model not found,Please train model' Notifica.configure(text=e, bg="red", fg="black", width=33, font=('times', 15, 'bold')) Notifica.place(x=20, y=250) harcascadePath = "haarcascade_frontalface_default.xml" faceCascade = cv2.CascadeClassifier(harcascadePath) df = pd.read_csv("StudentDetails\StudentDetails.csv") cam = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_SIMPLEX col_names = ['Enrollment', 'Name', 'Date', 'Time'] attendance = pd.DataFrame(columns=col_names) while True: ret, im = cam.read() gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, 1.2, 5) for (x, y, w, h) in faces: global Id Id, conf = recognizer.predict(gray[y:y + h, x:x + w]) if (conf < 70): print(conf) global Subject global aa global date global timeStamp Subject = tx.get() ts = time.time() date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d') timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S') aa = df.loc[df['Enrollment'] == Id]['Name'].values global tt tt = str(Id) + "-" + aa En = '15624031' + str(Id) attendance.loc[len(attendance)] = [Id, aa, date, timeStamp] cv2.rectangle(im, (x, y), (x + w, y + h), (0, 260, 0), 7) cv2.putText(im, str(tt), (x + h, y), font, 1, (255, 255, 0,), 4) else: Id = 'Unknown' tt = str(Id) cv2.rectangle(im, (x, y), (x + w, y + h), (0, 25, 255), 7) cv2.putText(im, str(tt), (x + h, y), font, 1, (0, 25, 255), 4) if time.time() > future: break attendance = attendance.drop_duplicates(['Enrollment'], keep='first') cv2.imshow('Filling attedance..', im) key = cv2.waitKey(30) & 0xff if key == 27: break ts = time.time() date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d') timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S') Hour, Minute, Second = timeStamp.split(":") fileName = "Attendance/" + Subject + "_" + date + "_" + Hour + "-" + Minute + "-" + Second + ".csv" attendance = attendance.drop_duplicates(['Enrollment'], keep='first') print(attendance) attendance.to_csv(fileName, index=False) M = 'Attendance filled Successfully' Notifica.configure(text=M, bg="Green", fg="white", width=33, font=('times', 15, 'bold')) Notifica.place(x=20, y=250) cam.release() cv2.destroyAllWindows() import csv import tkinter root = tkinter.Tk() root.title("Attendance of " + Subject) root.configure(background='snow') cs = './' + fileName with open(cs, newline="") as file: reader = csv.reader(file) r = 0 for col in reader: c = 0 for row in col: # i've added some styling label = tkinter.Label(root, width=8, height=1, fg="black", font=('times', 15, ' bold '), bg="lawn green", text=row, relief=tkinter.RIDGE) label.grid(row=r, column=c) c += 1 r += 1 root.mainloop() print(attendance) if __name__ == '__main__': ###windo is frame for subject choosing windo = tk.Tk() windo.iconbitmap('FRAMS.ico') windo.title("Enter subject name...") windo.geometry('580x320') windo.configure(background='snow') Notifica = tk.Label(windo, text="Attendance filled Successfully", bg="Green", fg="white", width=33, height=2, font=('times', 15, 'bold')) def Attf(): import subprocess subprocess.Popen( r'explorer /select,".\Attendance\Manually Attendance\"') # open attendance sheet window attf = tk.Button(windo, text="Check Sheets", command=Attf, fg="black", bg="lawn green", width=12, height=1, activebackground="Red", font=('times', 14, ' bold ')) attf.place(x=430, y=255) sub = tk.Label(windo, text="Enter Subject", width=15, height=2, fg="white", bg="blue2", font=('times', 15, ' bold ')) sub.place(x=30, y=100) tx = tk.Entry(windo, width=20, bg="yellow", fg="red", font=('times', 23, ' bold ')) tx.place(x=250, y=105) fill_a = tk.Button(windo, text="Fill Attendance", fg="white", command=Fillattendances, bg="deep pink", width=20, height=2, activebackground="Red", font=('times', 15, ' bold ')) fill_a.place(x=250, y=160) windo.mainloop()
41.138365
165
0.504663
import tkinter as tk from tkinter import * import cv2 import csv import os import numpy as np from PIL import Image,ImageTk import pandas as pd import datetime import time def del_sc2(): sc2.destroy() def err_screen1(): global sc2 sc2 = tk.Tk() sc2.geometry('300x100') sc2.iconbitmap('FRAMS.ico') sc2.title('Warning!!') sc2.configure(background='snow') Label(sc2,text='Please enter your subject name!!!',fg='red',bg='white',font=('times', 16, ' bold ')).pack() Button(sc2,text='OK',command=del_sc2,fg="black" ,bg="lawn green" ,width=9 ,height=1, activebackground = "Red" ,font=('times', 15, ' bold ')).place(x=90,y= 50) def Fillattendances(): sub = tx.get() now = time.time() future = now + 20 if time.time() < future: if sub == '': err_screen1() else: recognizer = cv2.face.LBPHFaceRecognizer_create() try: recognizer.read("TrainingImageLabel\Trainner.yml") except: e = 'Model not found,Please train model' Notifica.configure(text=e, bg="red", fg="black", width=33, font=('times', 15, 'bold')) Notifica.place(x=20, y=250) harcascadePath = "haarcascade_frontalface_default.xml" faceCascade = cv2.CascadeClassifier(harcascadePath) df = pd.read_csv("StudentDetails\StudentDetails.csv") cam = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_SIMPLEX col_names = ['Enrollment', 'Name', 'Date', 'Time'] attendance = pd.DataFrame(columns=col_names) while True: ret, im = cam.read() gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray, 1.2, 5) for (x, y, w, h) in faces: global Id Id, conf = recognizer.predict(gray[y:y + h, x:x + w]) if (conf < 70): print(conf) global Subject global aa global date global timeStamp Subject = tx.get() ts = time.time() date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d') timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S') aa = df.loc[df['Enrollment'] == Id]['Name'].values global tt tt = str(Id) + "-" + aa En = '15624031' + str(Id) attendance.loc[len(attendance)] = [Id, aa, date, timeStamp] cv2.rectangle(im, (x, y), (x + w, y + h), (0, 260, 0), 7) cv2.putText(im, str(tt), (x + h, y), font, 1, (255, 255, 0,), 4) else: Id = 'Unknown' tt = str(Id) cv2.rectangle(im, (x, y), (x + w, y + h), (0, 25, 255), 7) cv2.putText(im, str(tt), (x + h, y), font, 1, (0, 25, 255), 4) if time.time() > future: break attendance = attendance.drop_duplicates(['Enrollment'], keep='first') cv2.imshow('Filling attedance..', im) key = cv2.waitKey(30) & 0xff if key == 27: break ts = time.time() date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d') timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S') Hour, Minute, Second = timeStamp.split(":") fileName = "Attendance/" + Subject + "_" + date + "_" + Hour + "-" + Minute + "-" + Second + ".csv" attendance = attendance.drop_duplicates(['Enrollment'], keep='first') print(attendance) attendance.to_csv(fileName, index=False) M = 'Attendance filled Successfully' Notifica.configure(text=M, bg="Green", fg="white", width=33, font=('times', 15, 'bold')) Notifica.place(x=20, y=250) cam.release() cv2.destroyAllWindows() import csv import tkinter root = tkinter.Tk() root.title("Attendance of " + Subject) root.configure(background='snow') cs = './' + fileName with open(cs, newline="") as file: reader = csv.reader(file) r = 0 for col in reader: c = 0 for row in col: label = tkinter.Label(root, width=8, height=1, fg="black", font=('times', 15, ' bold '), bg="lawn green", text=row, relief=tkinter.RIDGE) label.grid(row=r, column=c) c += 1 r += 1 root.mainloop() print(attendance) if __name__ == '__main__': ###windo is frame for subject choosing windo = tk.Tk() windo.iconbitmap('FRAMS.ico') windo.title("Enter subject name...") windo.geometry('580x320') windo.configure(background='snow') Notifica = tk.Label(windo, text="Attendance filled Successfully", bg="Green", fg="white", width=33, height=2, font=('times', 15, 'bold')) def Attf(): import subprocess subprocess.Popen( r'explorer /select,".\Attendance\Manually Attendance\"') # open attendance sheet window attf = tk.Button(windo, text="Check Sheets", command=Attf, fg="black", bg="lawn green", width=12, height=1, activebackground="Red", font=('times', 14, ' bold ')) attf.place(x=430, y=255) sub = tk.Label(windo, text="Enter Subject", width=15, height=2, fg="white", bg="blue2", font=('times', 15, ' bold ')) sub.place(x=30, y=100) tx = tk.Entry(windo, width=20, bg="yellow", fg="red", font=('times', 23, ' bold ')) tx.place(x=250, y=105) fill_a = tk.Button(windo, text="Fill Attendance", fg="white", command=Fillattendances, bg="deep pink", width=20, height=2, activebackground="Red", font=('times', 15, ' bold ')) fill_a.place(x=250, y=160) windo.mainloop()
true
true
f7082d5845fd1efee69baec36ac43ffaecac25f9
9,352
py
Python
tensorflow_text/python/ops/bert_tokenizer.py
hanneshapke/text
8bebbbe28749de5509be474bc475cef83490f013
[ "Apache-2.0" ]
null
null
null
tensorflow_text/python/ops/bert_tokenizer.py
hanneshapke/text
8bebbbe28749de5509be474bc475cef83490f013
[ "Apache-2.0" ]
null
null
null
tensorflow_text/python/ops/bert_tokenizer.py
hanneshapke/text
8bebbbe28749de5509be474bc475cef83490f013
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2020 TF.Text Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Basic tokenization ops for BERT preprocessing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import string_ops from tensorflow_text.python.ops import regex_split_ops from tensorflow_text.python.ops.normalize_ops import case_fold_utf8 from tensorflow_text.python.ops.normalize_ops import normalize_utf8 from tensorflow_text.python.ops.tokenization import TokenizerWithOffsets from tensorflow_text.python.ops.wordpiece_tokenizer import WordpieceTokenizer _DELIM_REGEX = [ r"\s+", r"|".join([ r"[!-/]", r"[:-@]", r"[\[-`]", r"[{-~]", r"[\p{P}]", ]), r"|".join([ r"[\x{4E00}-\x{9FFF}]", r"[\x{3400}-\x{4DBF}]", r"[\x{20000}-\x{2A6DF}]", r"[\x{2A700}-\x{2B73F}]", r"[\x{2B740}-\x{2B81F}]", r"[\x{2B820}-\x{2CEAF}]", r"[\x{F900}-\x{FAFF}]", r"[\x{2F800}-\x{2FA1F}]", ]), ] _DELIM_REGEX_PATTERN = "|".join(_DELIM_REGEX) _KEEP_DELIM_NO_WHITESPACE = copy.deepcopy(_DELIM_REGEX) _KEEP_DELIM_NO_WHITESPACE.remove(r"\s+") _UNUSED_TOKEN_REGEX = "\\[unused\\d+\\]" _KEEP_DELIM_NO_WHITESPACE_PATTERN = "|".join(_KEEP_DELIM_NO_WHITESPACE) class BasicTokenizer(TokenizerWithOffsets): r"""Basic tokenizer for for tokenizing text. A basic tokenizer that tokenizes using some deterministic rules: - For most languages, this tokenizer will split on whitespace. - For Chinese, Japanese, and Korean characters, this tokenizer will split on Unicode characters. Attributes: lower_case: bool - If true, a preprocessing step is added to lowercase the text, apply NFD normalization, and strip accents characters. keep_whitespace: bool - If true, preserves whitespace characters instead of stripping them away. normalization_form: If true and lower_case=False, the input text will be normalized to `normalization_form`. See normalize_utf8() op for a list of valid values. preserve_unused_token: If true, text in the regex format "\\[unused\\d+\\]" will be treated as a token and thus remain preserved as is to be looked up in the vocabulary. """ def __init__(self, lower_case=False, keep_whitespace=False, normalization_form=None, preserve_unused_token=False): self._lower_case = lower_case if not keep_whitespace: self._keep_delim_regex_pattern = _KEEP_DELIM_NO_WHITESPACE_PATTERN else: self._keep_delim_regex_pattern = _DELIM_REGEX_PATTERN self._normalization_form = normalization_form if preserve_unused_token: self._delim_regex_pattern = "|".join( [_UNUSED_TOKEN_REGEX, _DELIM_REGEX_PATTERN]) self._keep_delim_regex_pattern = "|".join( [_UNUSED_TOKEN_REGEX, self._keep_delim_regex_pattern]) else: self._delim_regex_pattern = _DELIM_REGEX_PATTERN def tokenize(self, text_input): tokens, _, _ = self.tokenize_with_offsets(text_input) return tokens def tokenize_with_offsets(self, text_input): """Performs basic word tokenization for BERT. Args: text_input: A `Tensor` or `RaggedTensor` of untokenized UTF-8 strings. Returns: A `RaggedTensor` of tokenized strings from text_input. """ # lowercase and strip accents (if option is set) if self._lower_case: text_input = case_fold_utf8(text_input) text_input = normalize_utf8(text_input, "NFD") text_input = string_ops.regex_replace(text_input, r"\p{Mn}", "") else: # utf8 normalization if self._normalization_form is not None: text_input = normalize_utf8(text_input, self._normalization_form) # strip out control characters text_input = string_ops.regex_replace(text_input, r"\p{Cc}|\p{Cf}", " ") return regex_split_ops.regex_split_with_offsets( text_input, self._delim_regex_pattern, self._keep_delim_regex_pattern, "BertBasicTokenizer") class BertTokenizer(TokenizerWithOffsets): r"""Tokenizer used for BERT. This tokenizer applies an end-to-end, text string to wordpiece tokenization. It first applies basic tokenization, and then follwed by wordpiece tokenization. See BasicTokenizer and WordpieceTokenizer for their respective details. Attributes: vocab_lookup_table: A lookup table implementing the LookupInterface containing the vocabulary of subwords or a string which is the file path to the vocab.txt file. suffix_indicator: (optional) The characters prepended to a wordpiece to indicate that it is a suffix to another subword. Default is '##'. max_bytes_per_word: (optional) Max size of input token. Default is 100. max_chars_per_token: (optional) Max size of subwords, excluding suffix indicator. If known, providing this improves the efficiency of decoding long words. token_out_type: (optional) The type of the token to return. This can be `tf.int64` IDs, or `tf.string` subwords. The default is `tf.int64`. unknown_token: (optional) The value to use when an unknown token is found. Default is "[UNK]". If this is set to a string, and `token_out_type` is `tf.int64`, the `vocab_lookup_table` is used to convert the `unknown_token` to an integer. If this is set to `None`, out-of-vocabulary tokens are left as is. split_unknown_characters: (optional) Whether to split out single unknown characters as subtokens. If False (default), words containing unknown characters will be treated as single unknown tokens. lower_case: bool - If true, a preprocessing step is added to lowercase the text, apply NFD normalization, and strip accents characters. keep_whitespace: bool - If true, preserves whitespace characters instead of stripping them away. normalization_form: If true and lower_case=False, the input text will be normalized to `normalization_form`. See normalize_utf8() op for a list of valid values. preserve_unused_token: If true, text in the regex format `\\[unused\\d+\\]` will be treated as a token and thus remain preserved as is to be looked up in the vocabulary. """ def __init__(self, vocab_lookup_table, suffix_indicator="##", max_bytes_per_word=100, max_chars_per_token=None, token_out_type=dtypes.int64, unknown_token="[UNK]", split_unknown_characters=False, lower_case=False, keep_whitespace=False, normalization_form=None, preserve_unused_token=False): if isinstance(vocab_lookup_table, str) or isinstance( vocab_lookup_table, ops.Tensor): init = lookup_ops.TextFileIdTableInitializer(vocab_lookup_table) vocab_lookup_table = lookup_ops.StaticVocabularyTableV1( init, num_oov_buckets=1, lookup_key_dtype=dtypes.string) print("Before ", type(lower_case)) if isinstance(lower_case, ops.Tensor): lower_case = tf.compat.v1.get_default_session().run(lower_case) print("After ", type(lower_case)) self._basic_tokenizer = BasicTokenizer(lower_case, keep_whitespace, normalization_form, preserve_unused_token) self._wordpiece_tokenizer = WordpieceTokenizer( vocab_lookup_table, suffix_indicator, max_bytes_per_word, max_chars_per_token, token_out_type, unknown_token, split_unknown_characters) def tokenize_with_offsets(self, text_input): tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input) wordpieces, wp_begin, wp_end = ( self._wordpiece_tokenizer.tokenize_with_offsets(tokens)) begin_expanded = array_ops.expand_dims(begin, axis=2) final_begin = begin_expanded + wp_begin final_end = begin_expanded + wp_end return wordpieces, final_begin, final_end def tokenize(self, text_input): """Performs untokenized text to wordpiece tokenization for BERT. Args: text_input: input: A `Tensor` or `RaggedTensor` of untokenized UTF-8 strings. Returns: A `RaggedTensor` of tokens where `tokens[i1...iN, j]` is the string contents (or ID in the vocab_lookup_table representing that string) of the `jth` token in `input[i1...iN]` """ tokens = self._basic_tokenizer.tokenize(text_input) return self._wordpiece_tokenizer.tokenize(tokens)
41.017544
80
0.70648
from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import string_ops from tensorflow_text.python.ops import regex_split_ops from tensorflow_text.python.ops.normalize_ops import case_fold_utf8 from tensorflow_text.python.ops.normalize_ops import normalize_utf8 from tensorflow_text.python.ops.tokenization import TokenizerWithOffsets from tensorflow_text.python.ops.wordpiece_tokenizer import WordpieceTokenizer _DELIM_REGEX = [ r"\s+", r"|".join([ r"[!-/]", r"[:-@]", r"[\[-`]", r"[{-~]", r"[\p{P}]", ]), r"|".join([ r"[\x{4E00}-\x{9FFF}]", r"[\x{3400}-\x{4DBF}]", r"[\x{20000}-\x{2A6DF}]", r"[\x{2A700}-\x{2B73F}]", r"[\x{2B740}-\x{2B81F}]", r"[\x{2B820}-\x{2CEAF}]", r"[\x{F900}-\x{FAFF}]", r"[\x{2F800}-\x{2FA1F}]", ]), ] _DELIM_REGEX_PATTERN = "|".join(_DELIM_REGEX) _KEEP_DELIM_NO_WHITESPACE = copy.deepcopy(_DELIM_REGEX) _KEEP_DELIM_NO_WHITESPACE.remove(r"\s+") _UNUSED_TOKEN_REGEX = "\\[unused\\d+\\]" _KEEP_DELIM_NO_WHITESPACE_PATTERN = "|".join(_KEEP_DELIM_NO_WHITESPACE) class BasicTokenizer(TokenizerWithOffsets): def __init__(self, lower_case=False, keep_whitespace=False, normalization_form=None, preserve_unused_token=False): self._lower_case = lower_case if not keep_whitespace: self._keep_delim_regex_pattern = _KEEP_DELIM_NO_WHITESPACE_PATTERN else: self._keep_delim_regex_pattern = _DELIM_REGEX_PATTERN self._normalization_form = normalization_form if preserve_unused_token: self._delim_regex_pattern = "|".join( [_UNUSED_TOKEN_REGEX, _DELIM_REGEX_PATTERN]) self._keep_delim_regex_pattern = "|".join( [_UNUSED_TOKEN_REGEX, self._keep_delim_regex_pattern]) else: self._delim_regex_pattern = _DELIM_REGEX_PATTERN def tokenize(self, text_input): tokens, _, _ = self.tokenize_with_offsets(text_input) return tokens def tokenize_with_offsets(self, text_input): if self._lower_case: text_input = case_fold_utf8(text_input) text_input = normalize_utf8(text_input, "NFD") text_input = string_ops.regex_replace(text_input, r"\p{Mn}", "") else: if self._normalization_form is not None: text_input = normalize_utf8(text_input, self._normalization_form) text_input = string_ops.regex_replace(text_input, r"\p{Cc}|\p{Cf}", " ") return regex_split_ops.regex_split_with_offsets( text_input, self._delim_regex_pattern, self._keep_delim_regex_pattern, "BertBasicTokenizer") class BertTokenizer(TokenizerWithOffsets): def __init__(self, vocab_lookup_table, suffix_indicator="##", max_bytes_per_word=100, max_chars_per_token=None, token_out_type=dtypes.int64, unknown_token="[UNK]", split_unknown_characters=False, lower_case=False, keep_whitespace=False, normalization_form=None, preserve_unused_token=False): if isinstance(vocab_lookup_table, str) or isinstance( vocab_lookup_table, ops.Tensor): init = lookup_ops.TextFileIdTableInitializer(vocab_lookup_table) vocab_lookup_table = lookup_ops.StaticVocabularyTableV1( init, num_oov_buckets=1, lookup_key_dtype=dtypes.string) print("Before ", type(lower_case)) if isinstance(lower_case, ops.Tensor): lower_case = tf.compat.v1.get_default_session().run(lower_case) print("After ", type(lower_case)) self._basic_tokenizer = BasicTokenizer(lower_case, keep_whitespace, normalization_form, preserve_unused_token) self._wordpiece_tokenizer = WordpieceTokenizer( vocab_lookup_table, suffix_indicator, max_bytes_per_word, max_chars_per_token, token_out_type, unknown_token, split_unknown_characters) def tokenize_with_offsets(self, text_input): tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input) wordpieces, wp_begin, wp_end = ( self._wordpiece_tokenizer.tokenize_with_offsets(tokens)) begin_expanded = array_ops.expand_dims(begin, axis=2) final_begin = begin_expanded + wp_begin final_end = begin_expanded + wp_end return wordpieces, final_begin, final_end def tokenize(self, text_input): tokens = self._basic_tokenizer.tokenize(text_input) return self._wordpiece_tokenizer.tokenize(tokens)
true
true
f7082efb53a2eda2c514e37d2265b58567d794a1
29,415
py
Python
python/ccxt/bithumb.py
DanPmkr/ccxt
0a6cc2b805323b5ec226feff2347f07194104364
[ "MIT" ]
null
null
null
python/ccxt/bithumb.py
DanPmkr/ccxt
0a6cc2b805323b5ec226feff2347f07194104364
[ "MIT" ]
null
null
null
python/ccxt/bithumb.py
DanPmkr/ccxt
0a6cc2b805323b5ec226feff2347f07194104364
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: # https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code from ccxt.base.exchange import Exchange import base64 import hashlib from ccxt.base.errors import ExchangeError from ccxt.base.errors import AuthenticationError from ccxt.base.errors import PermissionDenied from ccxt.base.errors import ArgumentsRequired from ccxt.base.errors import BadRequest from ccxt.base.errors import InvalidAddress from ccxt.base.errors import InvalidOrder from ccxt.base.errors import ExchangeNotAvailable from ccxt.base.decimal_to_precision import TRUNCATE from ccxt.base.decimal_to_precision import DECIMAL_PLACES from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS class bithumb(Exchange): def describe(self): return self.deep_extend(super(bithumb, self).describe(), { 'id': 'bithumb', 'name': 'Bithumb', 'countries': ['KR'], # South Korea 'rateLimit': 500, 'has': { 'cancelOrder': True, 'CORS': True, 'createMarketOrder': True, 'createOrder': True, 'fetchBalance': True, 'fetchMarkets': True, 'fetchOpenOrders': True, 'fetchOrder': True, 'fetchOrderBook': True, 'fetchTicker': True, 'fetchTickers': True, 'fetchTrades': True, 'withdraw': True, }, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/30597177-ea800172-9d5e-11e7-804c-b9d4fa9b56b0.jpg', 'api': { 'public': 'https://api.bithumb.com/public', 'private': 'https://api.bithumb.com', }, 'www': 'https://www.bithumb.com', 'doc': 'https://apidocs.bithumb.com', 'fees': 'https://en.bithumb.com/customer_support/info_fee', }, 'api': { 'public': { 'get': [ 'ticker/{currency}', 'ticker/all', 'orderbook/{currency}', 'orderbook/all', 'transaction_history/{currency}', 'transaction_history/all', ], }, 'private': { 'post': [ 'info/account', 'info/balance', 'info/wallet_address', 'info/ticker', 'info/orders', 'info/user_transactions', 'info/order_detail', 'trade/place', 'trade/cancel', 'trade/btc_withdrawal', 'trade/krw_deposit', 'trade/krw_withdrawal', 'trade/market_buy', 'trade/market_sell', ], }, }, 'fees': { 'trading': { 'maker': 0.25 / 100, 'taker': 0.25 / 100, }, }, 'precisionMode': SIGNIFICANT_DIGITS, 'exceptions': { 'Bad Request(SSL)': BadRequest, 'Bad Request(Bad Method)': BadRequest, 'Bad Request.(Auth Data)': AuthenticationError, # {"status": "5100", "message": "Bad Request.(Auth Data)"} 'Not Member': AuthenticationError, 'Invalid Apikey': AuthenticationError, # {"status":"5300","message":"Invalid Apikey"} 'Method Not Allowed.(Access IP)': PermissionDenied, 'Method Not Allowed.(BTC Adress)': InvalidAddress, 'Method Not Allowed.(Access)': PermissionDenied, 'Database Fail': ExchangeNotAvailable, 'Invalid Parameter': BadRequest, '5600': ExchangeError, 'Unknown Error': ExchangeError, 'After May 23th, recent_transactions is no longer, hence users will not be able to connect to recent_transactions': ExchangeError, # {"status":"5100","message":"After May 23th, recent_transactions is no longer, hence users will not be able to connect to recent_transactions"} }, }) def amount_to_precision(self, symbol, amount): return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES) def fetch_markets(self, params={}): response = self.publicGetTickerAll(params) data = self.safe_value(response, 'data') currencyIds = list(data.keys()) result = [] quote = self.safe_currency_code('KRW') for i in range(0, len(currencyIds)): currencyId = currencyIds[i] if currencyId == 'date': continue market = data[currencyId] base = self.safe_currency_code(currencyId) symbol = currencyId + '/' + quote active = True if isinstance(market, list): numElements = len(market) if numElements == 0: active = False result.append({ 'id': currencyId, 'symbol': symbol, 'base': base, 'quote': quote, 'info': market, 'active': active, 'precision': { 'amount': 4, 'price': 4, }, 'limits': { 'amount': { 'min': None, 'max': None, }, 'price': { 'min': None, 'max': None, }, 'cost': { 'min': 500, 'max': 5000000000, }, }, 'baseId': None, 'quoteId': None, }) return result def fetch_balance(self, params={}): self.load_markets() request = { 'currency': 'ALL', } response = self.privatePostInfoBalance(self.extend(request, params)) result = {'info': response} balances = self.safe_value(response, 'data') codes = list(self.currencies.keys()) for i in range(0, len(codes)): code = codes[i] account = self.account() currency = self.currency(code) lowerCurrencyId = self.safe_string_lower(currency, 'id') account['total'] = self.safe_float(balances, 'total_' + lowerCurrencyId) account['used'] = self.safe_float(balances, 'in_use_' + lowerCurrencyId) account['free'] = self.safe_float(balances, 'available_' + lowerCurrencyId) result[code] = account return self.parse_balance(result) def fetch_order_book(self, symbol, limit=None, params={}): self.load_markets() market = self.market(symbol) request = { 'currency': market['base'], } if limit is not None: request['count'] = limit # default 30, max 30 response = self.publicGetOrderbookCurrency(self.extend(request, params)) # # { # "status":"0000", # "data":{ # "timestamp":"1587621553942", # "payment_currency":"KRW", # "order_currency":"BTC", # "bids":[ # {"price":"8652000","quantity":"0.0043"}, # {"price":"8651000","quantity":"0.0049"}, # {"price":"8650000","quantity":"8.4791"}, # ], # "asks":[ # {"price":"8654000","quantity":"0.119"}, # {"price":"8655000","quantity":"0.254"}, # {"price":"8658000","quantity":"0.119"}, # ] # } # } # data = self.safe_value(response, 'data', {}) timestamp = self.safe_integer(data, 'timestamp') return self.parse_order_book(data, timestamp, 'bids', 'asks', 'price', 'quantity') def parse_ticker(self, ticker, market=None): # # fetchTicker, fetchTickers # # { # "opening_price":"227100", # "closing_price":"228400", # "min_price":"222300", # "max_price":"230000", # "units_traded":"82618.56075337", # "acc_trade_value":"18767376138.6031", # "prev_closing_price":"227100", # "units_traded_24H":"151871.13484676", # "acc_trade_value_24H":"34247610416.8974", # "fluctate_24H":"8700", # "fluctate_rate_24H":"3.96", # "date":"1587710327264", # fetchTickers inject self # } # timestamp = self.safe_integer(ticker, 'date') symbol = None if market is not None: symbol = market['symbol'] open = self.safe_float(ticker, 'opening_price') close = self.safe_float(ticker, 'closing_price') change = None percentage = None average = None if (close is not None) and (open is not None): change = close - open if open > 0: percentage = change / open * 100 average = self.sum(open, close) / 2 baseVolume = self.safe_float(ticker, 'units_traded_24H') quoteVolume = self.safe_float(ticker, 'acc_trade_value_24H') vwap = self.vwap(baseVolume, quoteVolume) return { 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': self.safe_float(ticker, 'max_price'), 'low': self.safe_float(ticker, 'min_price'), 'bid': self.safe_float(ticker, 'buy_price'), 'bidVolume': None, 'ask': self.safe_float(ticker, 'sell_price'), 'askVolume': None, 'vwap': vwap, 'open': open, 'close': close, 'last': close, 'previousClose': None, 'change': change, 'percentage': percentage, 'average': average, 'baseVolume': baseVolume, 'quoteVolume': quoteVolume, 'info': ticker, } def fetch_tickers(self, symbols=None, params={}): self.load_markets() response = self.publicGetTickerAll(params) # # { # "status":"0000", # "data":{ # "BTC":{ # "opening_price":"9045000", # "closing_price":"9132000", # "min_price":"8938000", # "max_price":"9168000", # "units_traded":"4619.79967497", # "acc_trade_value":"42021363832.5187", # "prev_closing_price":"9041000", # "units_traded_24H":"8793.5045804", # "acc_trade_value_24H":"78933458515.4962", # "fluctate_24H":"530000", # "fluctate_rate_24H":"6.16" # }, # "date":"1587710878669" # } # } # result = {} data = self.safe_value(response, 'data', {}) timestamp = self.safe_integer(data, 'date') tickers = self.omit(data, 'date') ids = list(tickers.keys()) for i in range(0, len(ids)): id = ids[i] symbol = id market = None if id in self.markets_by_id: market = self.markets_by_id[id] symbol = market['symbol'] ticker = tickers[id] isArray = isinstance(ticker, list) if not isArray: ticker['date'] = timestamp result[symbol] = self.parse_ticker(ticker, market) return result def fetch_ticker(self, symbol, params={}): self.load_markets() market = self.market(symbol) request = { 'currency': market['base'], } response = self.publicGetTickerCurrency(self.extend(request, params)) # # { # "status":"0000", # "data":{ # "opening_price":"227100", # "closing_price":"228400", # "min_price":"222300", # "max_price":"230000", # "units_traded":"82618.56075337", # "acc_trade_value":"18767376138.6031", # "prev_closing_price":"227100", # "units_traded_24H":"151871.13484676", # "acc_trade_value_24H":"34247610416.8974", # "fluctate_24H":"8700", # "fluctate_rate_24H":"3.96", # "date":"1587710327264" # } # } # data = self.safe_value(response, 'data', {}) return self.parse_ticker(data, market) def parse_trade(self, trade, market=None): # # fetchTrades(public) # # { # "transaction_date":"2020-04-23 22:21:46", # "type":"ask", # "units_traded":"0.0125", # "price":"8667000", # "total":"108337" # } # # fetchOrder(private) # # { # "transaction_date": "1572497603902030", # "price": "8601000", # "units": "0.005", # "fee_currency": "KRW", # "fee": "107.51", # "total": "43005" # } # # a workaround for their bug in date format, hours are not 0-padded timestamp = None transactionDatetime = self.safe_string(trade, 'transaction_date') if transactionDatetime is not None: parts = transactionDatetime.split(' ') numParts = len(parts) if numParts > 1: transactionDate = parts[0] transactionTime = parts[1] if len(transactionTime) < 8: transactionTime = '0' + transactionTime timestamp = self.parse8601(transactionDate + ' ' + transactionTime) else: timestamp = self.safe_integer_product(trade, 'transaction_date', 0.001) if timestamp is not None: timestamp -= 9 * 3600000 # they report UTC + 9 hours, server in Korean timezone type = None side = self.safe_string(trade, 'type') side = 'sell' if (side == 'ask') else 'buy' id = self.safe_string(trade, 'cont_no') symbol = None if market is not None: symbol = market['symbol'] price = self.safe_float(trade, 'price') amount = self.safe_float(trade, 'units_traded') cost = self.safe_float(trade, 'total') if cost is None: if amount is not None: if price is not None: cost = price * amount fee = None feeCost = self.safe_float(trade, 'fee') if feeCost is not None: feeCurrencyId = self.safe_string(trade, 'fee_currency') feeCurrencyCode = self.common_currency_code(feeCurrencyId) fee = { 'cost': feeCost, 'currency': feeCurrencyCode, } return { 'id': id, 'info': trade, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'symbol': symbol, 'order': None, 'type': type, 'side': side, 'takerOrMaker': None, 'price': price, 'amount': amount, 'cost': cost, 'fee': fee, } def fetch_trades(self, symbol, since=None, limit=None, params={}): self.load_markets() market = self.market(symbol) request = { 'currency': market['base'], } if limit is None: request['count'] = limit # default 20, max 100 response = self.publicGetTransactionHistoryCurrency(self.extend(request, params)) # # { # "status":"0000", # "data":[ # { # "transaction_date":"2020-04-23 22:21:46", # "type":"ask", # "units_traded":"0.0125", # "price":"8667000", # "total":"108337" # }, # ] # } # data = self.safe_value(response, 'data', []) return self.parse_trades(data, market, since, limit) def create_order(self, symbol, type, side, amount, price=None, params={}): self.load_markets() market = self.market(symbol) request = { 'order_currency': market['id'], 'Payment_currency': market['quote'], 'units': amount, } method = 'privatePostTradePlace' if type == 'limit': request['price'] = price request['type'] = 'bid' if (side == 'buy') else 'ask' else: method = 'privatePostTradeMarket' + self.capitalize(side) response = getattr(self, method)(self.extend(request, params)) id = self.safe_string(response, 'order_id') if id is None: raise InvalidOrder(self.id + ' createOrder did not return an order id') return { 'info': response, 'symbol': symbol, 'type': type, 'side': side, 'id': id, } def fetch_order(self, id, symbol=None, params={}): if symbol is None: raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument') self.load_markets() market = self.market(symbol) request = { 'order_id': id, 'count': 1, 'order_currency': market['base'], 'payment_currency': market['quote'], } response = self.privatePostInfoOrderDetail(self.extend(request, params)) # # { # "status": "0000", # "data": { # "transaction_date": "1572497603668315", # "type": "bid", # "order_status": "Completed", # "order_currency": "BTC", # "payment_currency": "KRW", # "order_price": "8601000", # "order_qty": "0.007", # "cancel_date": "", # "cancel_type": "", # "contract": [ # { # "transaction_date": "1572497603902030", # "price": "8601000", # "units": "0.005", # "fee_currency": "KRW", # "fee": "107.51", # "total": "43005" # }, # ] # } # } # data = self.safe_value(response, 'data') return self.parse_order(self.extend(data, {'order_id': id}), market) def parse_order_status(self, status): statuses = { 'Pending': 'open', 'Completed': 'closed', 'Cancel': 'canceled', } return self.safe_string(statuses, status, status) def parse_order(self, order, market=None): # # fetchOrder # # { # "transaction_date": "1572497603668315", # "type": "bid", # "order_status": "Completed", # "order_currency": "BTC", # "payment_currency": "KRW", # "order_price": "8601000", # "order_qty": "0.007", # "cancel_date": "", # "cancel_type": "", # "contract": [ # { # "transaction_date": "1572497603902030", # "price": "8601000", # "units": "0.005", # "fee_currency": "KRW", # "fee": "107.51", # "total": "43005" # }, # ] # } # # fetchOpenOrders # # { # "order_currency": "BTC", # "payment_currency": "KRW", # "order_id": "C0101000007408440032", # "order_date": "1571728739360570", # "type": "bid", # "units": "5.0", # "units_remaining": "5.0", # "price": "501000", # } # timestamp = self.safe_integer_product(order, 'order_date', 0.001) sideProperty = self.safe_value_2(order, 'type', 'side') side = 'buy' if (sideProperty == 'bid') else 'sell' status = self.parse_order_status(self.safe_string(order, 'order_status')) price = self.safe_float_2(order, 'order_price', 'price') type = 'limit' if price == 0: price = None type = 'market' amount = self.safe_float_2(order, 'order_qty', 'units') remaining = self.safe_float(order, 'units_remaining') if remaining is None: if status == 'closed': remaining = 0 else: remaining = amount filled = None if (amount is not None) and (remaining is not None): filled = amount - remaining symbol = None baseId = self.safe_string(order, 'order_currency') quoteId = self.safe_string(order, 'payment_currency') base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) if (base is not None) and (quote is not None): symbol = base + '/' + quote if (symbol is None) and (market is not None): symbol = market['symbol'] rawTrades = self.safe_value(order, 'contract') trades = None id = self.safe_string(order, 'order_id') if rawTrades is not None: trades = self.parse_trades(rawTrades, market, None, None, { 'side': side, 'symbol': symbol, 'order': id, }) return { 'info': order, 'id': id, 'clientOrderId': None, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'lastTradeTimestamp': None, 'symbol': symbol, 'type': type, 'side': side, 'price': price, 'amount': amount, 'cost': None, 'average': None, 'filled': filled, 'remaining': remaining, 'status': status, 'fee': None, 'trades': trades, } def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): if symbol is None: raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a symbol argument') self.load_markets() market = self.market(symbol) if limit is None: limit = 100 request = { 'count': limit, 'order_currency': market['base'], 'payment_currency': market['quote'], } if since is not None: request['after'] = since response = self.privatePostInfoOrders(self.extend(request, params)) # # { # "status": "0000", # "data": [ # { # "order_currency": "BTC", # "payment_currency": "KRW", # "order_id": "C0101000007408440032", # "order_date": "1571728739360570", # "type": "bid", # "units": "5.0", # "units_remaining": "5.0", # "price": "501000", # } # ] # } # data = self.safe_value(response, 'data', []) return self.parse_orders(data, market, since, limit) def cancel_order(self, id, symbol=None, params={}): side_in_params = ('side' in params) if not side_in_params: raise ArgumentsRequired(self.id + ' cancelOrder requires a `symbol` argument and a `side` parameter(sell or buy)') if symbol is None: raise ArgumentsRequired(self.id + ' cancelOrder requires a `symbol` argument and a `side` parameter(sell or buy)') market = self.market(symbol) side = 'bid' if (params['side'] == 'buy') else 'ask' params = self.omit(params, ['side', 'currency']) # https://github.com/ccxt/ccxt/issues/6771 request = { 'order_id': id, 'type': side, 'order_currency': market['base'], 'payment_currency': market['quote'], } return self.privatePostTradeCancel(self.extend(request, params)) def cancel_unified_order(self, order, params={}): request = { 'side': order['side'], } return self.cancel_order(order['id'], order['symbol'], self.extend(request, params)) def withdraw(self, code, amount, address, tag=None, params={}): self.check_address(address) self.load_markets() currency = self.currency(code) request = { 'units': amount, 'address': address, 'currency': currency['id'], } if currency == 'XRP' or currency == 'XMR': destination = self.safe_string(params, 'destination') if (tag is None) and (destination is None): raise ArgumentsRequired(self.id + ' ' + code + ' withdraw() requires a tag argument or an extra destination param') elif tag is not None: request['destination'] = tag response = self.privatePostTradeBtcWithdrawal(self.extend(request, params)) return { 'info': response, 'id': None, } def nonce(self): return self.milliseconds() def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): endpoint = '/' + self.implode_params(path, params) url = self.urls['api'][api] + endpoint query = self.omit(params, self.extract_params(path)) if api == 'public': if query: url += '?' + self.urlencode(query) else: self.check_required_credentials() body = self.urlencode(self.extend({ 'endpoint': endpoint, }, query)) nonce = str(self.nonce()) auth = endpoint + "\0" + body + "\0" + nonce # eslint-disable-line quotes signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512) signature64 = self.decode(base64.b64encode(self.encode(signature))) headers = { 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'Api-Key': self.apiKey, 'Api-Sign': str(signature64), 'Api-Nonce': nonce, } return {'url': url, 'method': method, 'body': body, 'headers': headers} def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody): if response is None: return # fallback to default error handler if 'status' in response: # # {"status":"5100","message":"After May 23th, recent_transactions is no longer, hence users will not be able to connect to recent_transactions"} # status = self.safe_string(response, 'status') message = self.safe_string(response, 'message') if status is not None: if status == '0000': return # no error feedback = self.id + ' ' + body self.throw_exactly_matched_exception(self.exceptions, status, feedback) self.throw_exactly_matched_exception(self.exceptions, message, feedback) raise ExchangeError(feedback) def request(self, path, api='public', method='GET', params={}, headers=None, body=None): response = self.fetch2(path, api, method, params, headers, body) if 'status' in response: if response['status'] == '0000': return response raise ExchangeError(self.id + ' ' + self.json(response)) return response
39.115691
292
0.478599
from ccxt.base.exchange import Exchange import base64 import hashlib from ccxt.base.errors import ExchangeError from ccxt.base.errors import AuthenticationError from ccxt.base.errors import PermissionDenied from ccxt.base.errors import ArgumentsRequired from ccxt.base.errors import BadRequest from ccxt.base.errors import InvalidAddress from ccxt.base.errors import InvalidOrder from ccxt.base.errors import ExchangeNotAvailable from ccxt.base.decimal_to_precision import TRUNCATE from ccxt.base.decimal_to_precision import DECIMAL_PLACES from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS class bithumb(Exchange): def describe(self): return self.deep_extend(super(bithumb, self).describe(), { 'id': 'bithumb', 'name': 'Bithumb', 'countries': ['KR'], 'rateLimit': 500, 'has': { 'cancelOrder': True, 'CORS': True, 'createMarketOrder': True, 'createOrder': True, 'fetchBalance': True, 'fetchMarkets': True, 'fetchOpenOrders': True, 'fetchOrder': True, 'fetchOrderBook': True, 'fetchTicker': True, 'fetchTickers': True, 'fetchTrades': True, 'withdraw': True, }, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/30597177-ea800172-9d5e-11e7-804c-b9d4fa9b56b0.jpg', 'api': { 'public': 'https://api.bithumb.com/public', 'private': 'https://api.bithumb.com', }, 'www': 'https://www.bithumb.com', 'doc': 'https://apidocs.bithumb.com', 'fees': 'https://en.bithumb.com/customer_support/info_fee', }, 'api': { 'public': { 'get': [ 'ticker/{currency}', 'ticker/all', 'orderbook/{currency}', 'orderbook/all', 'transaction_history/{currency}', 'transaction_history/all', ], }, 'private': { 'post': [ 'info/account', 'info/balance', 'info/wallet_address', 'info/ticker', 'info/orders', 'info/user_transactions', 'info/order_detail', 'trade/place', 'trade/cancel', 'trade/btc_withdrawal', 'trade/krw_deposit', 'trade/krw_withdrawal', 'trade/market_buy', 'trade/market_sell', ], }, }, 'fees': { 'trading': { 'maker': 0.25 / 100, 'taker': 0.25 / 100, }, }, 'precisionMode': SIGNIFICANT_DIGITS, 'exceptions': { 'Bad Request(SSL)': BadRequest, 'Bad Request(Bad Method)': BadRequest, 'Bad Request.(Auth Data)': AuthenticationError, 'Not Member': AuthenticationError, 'Invalid Apikey': AuthenticationError, 'Method Not Allowed.(Access IP)': PermissionDenied, 'Method Not Allowed.(BTC Adress)': InvalidAddress, 'Method Not Allowed.(Access)': PermissionDenied, 'Database Fail': ExchangeNotAvailable, 'Invalid Parameter': BadRequest, '5600': ExchangeError, 'Unknown Error': ExchangeError, 'After May 23th, recent_transactions is no longer, hence users will not be able to connect to recent_transactions': ExchangeError, }, }) def amount_to_precision(self, symbol, amount): return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES) def fetch_markets(self, params={}): response = self.publicGetTickerAll(params) data = self.safe_value(response, 'data') currencyIds = list(data.keys()) result = [] quote = self.safe_currency_code('KRW') for i in range(0, len(currencyIds)): currencyId = currencyIds[i] if currencyId == 'date': continue market = data[currencyId] base = self.safe_currency_code(currencyId) symbol = currencyId + '/' + quote active = True if isinstance(market, list): numElements = len(market) if numElements == 0: active = False result.append({ 'id': currencyId, 'symbol': symbol, 'base': base, 'quote': quote, 'info': market, 'active': active, 'precision': { 'amount': 4, 'price': 4, }, 'limits': { 'amount': { 'min': None, 'max': None, }, 'price': { 'min': None, 'max': None, }, 'cost': { 'min': 500, 'max': 5000000000, }, }, 'baseId': None, 'quoteId': None, }) return result def fetch_balance(self, params={}): self.load_markets() request = { 'currency': 'ALL', } response = self.privatePostInfoBalance(self.extend(request, params)) result = {'info': response} balances = self.safe_value(response, 'data') codes = list(self.currencies.keys()) for i in range(0, len(codes)): code = codes[i] account = self.account() currency = self.currency(code) lowerCurrencyId = self.safe_string_lower(currency, 'id') account['total'] = self.safe_float(balances, 'total_' + lowerCurrencyId) account['used'] = self.safe_float(balances, 'in_use_' + lowerCurrencyId) account['free'] = self.safe_float(balances, 'available_' + lowerCurrencyId) result[code] = account return self.parse_balance(result) def fetch_order_book(self, symbol, limit=None, params={}): self.load_markets() market = self.market(symbol) request = { 'currency': market['base'], } if limit is not None: request['count'] = limit response = self.publicGetOrderbookCurrency(self.extend(request, params)) data = self.safe_value(response, 'data', {}) timestamp = self.safe_integer(data, 'timestamp') return self.parse_order_book(data, timestamp, 'bids', 'asks', 'price', 'quantity') def parse_ticker(self, ticker, market=None): timestamp = self.safe_integer(ticker, 'date') symbol = None if market is not None: symbol = market['symbol'] open = self.safe_float(ticker, 'opening_price') close = self.safe_float(ticker, 'closing_price') change = None percentage = None average = None if (close is not None) and (open is not None): change = close - open if open > 0: percentage = change / open * 100 average = self.sum(open, close) / 2 baseVolume = self.safe_float(ticker, 'units_traded_24H') quoteVolume = self.safe_float(ticker, 'acc_trade_value_24H') vwap = self.vwap(baseVolume, quoteVolume) return { 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': self.safe_float(ticker, 'max_price'), 'low': self.safe_float(ticker, 'min_price'), 'bid': self.safe_float(ticker, 'buy_price'), 'bidVolume': None, 'ask': self.safe_float(ticker, 'sell_price'), 'askVolume': None, 'vwap': vwap, 'open': open, 'close': close, 'last': close, 'previousClose': None, 'change': change, 'percentage': percentage, 'average': average, 'baseVolume': baseVolume, 'quoteVolume': quoteVolume, 'info': ticker, } def fetch_tickers(self, symbols=None, params={}): self.load_markets() response = self.publicGetTickerAll(params) result = {} data = self.safe_value(response, 'data', {}) timestamp = self.safe_integer(data, 'date') tickers = self.omit(data, 'date') ids = list(tickers.keys()) for i in range(0, len(ids)): id = ids[i] symbol = id market = None if id in self.markets_by_id: market = self.markets_by_id[id] symbol = market['symbol'] ticker = tickers[id] isArray = isinstance(ticker, list) if not isArray: ticker['date'] = timestamp result[symbol] = self.parse_ticker(ticker, market) return result def fetch_ticker(self, symbol, params={}): self.load_markets() market = self.market(symbol) request = { 'currency': market['base'], } response = self.publicGetTickerCurrency(self.extend(request, params)) data = self.safe_value(response, 'data', {}) return self.parse_ticker(data, market) def parse_trade(self, trade, market=None): timestamp = None transactionDatetime = self.safe_string(trade, 'transaction_date') if transactionDatetime is not None: parts = transactionDatetime.split(' ') numParts = len(parts) if numParts > 1: transactionDate = parts[0] transactionTime = parts[1] if len(transactionTime) < 8: transactionTime = '0' + transactionTime timestamp = self.parse8601(transactionDate + ' ' + transactionTime) else: timestamp = self.safe_integer_product(trade, 'transaction_date', 0.001) if timestamp is not None: timestamp -= 9 * 3600000 type = None side = self.safe_string(trade, 'type') side = 'sell' if (side == 'ask') else 'buy' id = self.safe_string(trade, 'cont_no') symbol = None if market is not None: symbol = market['symbol'] price = self.safe_float(trade, 'price') amount = self.safe_float(trade, 'units_traded') cost = self.safe_float(trade, 'total') if cost is None: if amount is not None: if price is not None: cost = price * amount fee = None feeCost = self.safe_float(trade, 'fee') if feeCost is not None: feeCurrencyId = self.safe_string(trade, 'fee_currency') feeCurrencyCode = self.common_currency_code(feeCurrencyId) fee = { 'cost': feeCost, 'currency': feeCurrencyCode, } return { 'id': id, 'info': trade, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'symbol': symbol, 'order': None, 'type': type, 'side': side, 'takerOrMaker': None, 'price': price, 'amount': amount, 'cost': cost, 'fee': fee, } def fetch_trades(self, symbol, since=None, limit=None, params={}): self.load_markets() market = self.market(symbol) request = { 'currency': market['base'], } if limit is None: request['count'] = limit response = self.publicGetTransactionHistoryCurrency(self.extend(request, params)) data = self.safe_value(response, 'data', []) return self.parse_trades(data, market, since, limit) def create_order(self, symbol, type, side, amount, price=None, params={}): self.load_markets() market = self.market(symbol) request = { 'order_currency': market['id'], 'Payment_currency': market['quote'], 'units': amount, } method = 'privatePostTradePlace' if type == 'limit': request['price'] = price request['type'] = 'bid' if (side == 'buy') else 'ask' else: method = 'privatePostTradeMarket' + self.capitalize(side) response = getattr(self, method)(self.extend(request, params)) id = self.safe_string(response, 'order_id') if id is None: raise InvalidOrder(self.id + ' createOrder did not return an order id') return { 'info': response, 'symbol': symbol, 'type': type, 'side': side, 'id': id, } def fetch_order(self, id, symbol=None, params={}): if symbol is None: raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument') self.load_markets() market = self.market(symbol) request = { 'order_id': id, 'count': 1, 'order_currency': market['base'], 'payment_currency': market['quote'], } response = self.privatePostInfoOrderDetail(self.extend(request, params)) data = self.safe_value(response, 'data') return self.parse_order(self.extend(data, {'order_id': id}), market) def parse_order_status(self, status): statuses = { 'Pending': 'open', 'Completed': 'closed', 'Cancel': 'canceled', } return self.safe_string(statuses, status, status) def parse_order(self, order, market=None): timestamp = self.safe_integer_product(order, 'order_date', 0.001) sideProperty = self.safe_value_2(order, 'type', 'side') side = 'buy' if (sideProperty == 'bid') else 'sell' status = self.parse_order_status(self.safe_string(order, 'order_status')) price = self.safe_float_2(order, 'order_price', 'price') type = 'limit' if price == 0: price = None type = 'market' amount = self.safe_float_2(order, 'order_qty', 'units') remaining = self.safe_float(order, 'units_remaining') if remaining is None: if status == 'closed': remaining = 0 else: remaining = amount filled = None if (amount is not None) and (remaining is not None): filled = amount - remaining symbol = None baseId = self.safe_string(order, 'order_currency') quoteId = self.safe_string(order, 'payment_currency') base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) if (base is not None) and (quote is not None): symbol = base + '/' + quote if (symbol is None) and (market is not None): symbol = market['symbol'] rawTrades = self.safe_value(order, 'contract') trades = None id = self.safe_string(order, 'order_id') if rawTrades is not None: trades = self.parse_trades(rawTrades, market, None, None, { 'side': side, 'symbol': symbol, 'order': id, }) return { 'info': order, 'id': id, 'clientOrderId': None, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'lastTradeTimestamp': None, 'symbol': symbol, 'type': type, 'side': side, 'price': price, 'amount': amount, 'cost': None, 'average': None, 'filled': filled, 'remaining': remaining, 'status': status, 'fee': None, 'trades': trades, } def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): if symbol is None: raise ArgumentsRequired(self.id + ' fetchOpenOrders requires a symbol argument') self.load_markets() market = self.market(symbol) if limit is None: limit = 100 request = { 'count': limit, 'order_currency': market['base'], 'payment_currency': market['quote'], } if since is not None: request['after'] = since response = self.privatePostInfoOrders(self.extend(request, params)) data = self.safe_value(response, 'data', []) return self.parse_orders(data, market, since, limit) def cancel_order(self, id, symbol=None, params={}): side_in_params = ('side' in params) if not side_in_params: raise ArgumentsRequired(self.id + ' cancelOrder requires a `symbol` argument and a `side` parameter(sell or buy)') if symbol is None: raise ArgumentsRequired(self.id + ' cancelOrder requires a `symbol` argument and a `side` parameter(sell or buy)') market = self.market(symbol) side = 'bid' if (params['side'] == 'buy') else 'ask' params = self.omit(params, ['side', 'currency']) request = { 'order_id': id, 'type': side, 'order_currency': market['base'], 'payment_currency': market['quote'], } return self.privatePostTradeCancel(self.extend(request, params)) def cancel_unified_order(self, order, params={}): request = { 'side': order['side'], } return self.cancel_order(order['id'], order['symbol'], self.extend(request, params)) def withdraw(self, code, amount, address, tag=None, params={}): self.check_address(address) self.load_markets() currency = self.currency(code) request = { 'units': amount, 'address': address, 'currency': currency['id'], } if currency == 'XRP' or currency == 'XMR': destination = self.safe_string(params, 'destination') if (tag is None) and (destination is None): raise ArgumentsRequired(self.id + ' ' + code + ' withdraw() requires a tag argument or an extra destination param') elif tag is not None: request['destination'] = tag response = self.privatePostTradeBtcWithdrawal(self.extend(request, params)) return { 'info': response, 'id': None, } def nonce(self): return self.milliseconds() def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): endpoint = '/' + self.implode_params(path, params) url = self.urls['api'][api] + endpoint query = self.omit(params, self.extract_params(path)) if api == 'public': if query: url += '?' + self.urlencode(query) else: self.check_required_credentials() body = self.urlencode(self.extend({ 'endpoint': endpoint, }, query)) nonce = str(self.nonce()) auth = endpoint + "\0" + body + "\0" + nonce signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512) signature64 = self.decode(base64.b64encode(self.encode(signature))) headers = { 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'Api-Key': self.apiKey, 'Api-Sign': str(signature64), 'Api-Nonce': nonce, } return {'url': url, 'method': method, 'body': body, 'headers': headers} def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody): if response is None: return if 'status' in response: status = self.safe_string(response, 'status') message = self.safe_string(response, 'message') if status is not None: if status == '0000': return feedback = self.id + ' ' + body self.throw_exactly_matched_exception(self.exceptions, status, feedback) self.throw_exactly_matched_exception(self.exceptions, message, feedback) raise ExchangeError(feedback) def request(self, path, api='public', method='GET', params={}, headers=None, body=None): response = self.fetch2(path, api, method, params, headers, body) if 'status' in response: if response['status'] == '0000': return response raise ExchangeError(self.id + ' ' + self.json(response)) return response
true
true
f7082effdfcc3ae6724076920b4094deda3a13b3
5,123
py
Python
btc/ecpoint.py
DagonR/simplebtclib
63ad331902433cc8c357463eea2fe73bb8831c73
[ "MIT" ]
null
null
null
btc/ecpoint.py
DagonR/simplebtclib
63ad331902433cc8c357463eea2fe73bb8831c73
[ "MIT" ]
null
null
null
btc/ecpoint.py
DagonR/simplebtclib
63ad331902433cc8c357463eea2fe73bb8831c73
[ "MIT" ]
null
null
null
from btc.utils import mod_inverse, int2hex # Parameters for SECP256k1 elliptic curve (used by Bitcoin) SECP256K1_A = 0 SECP256K1_B = 7 SECP256K1_GX = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798 SECP256K1_GY = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8 SECP256K1_P = 2**256 - 2**32 - 2**9 - 2**8 - 2**7 - 2**6 - 2**4 - 1 SECP256K1_ORDER = 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 SECP256K1_ORDER_LEN = SECP256K1_ORDER.bit_length() SECP256K1_H = 1 class ECPoint: """Represents a point on an elliptic curve""" def __init__(self, x, y, a=SECP256K1_A, b=SECP256K1_B, mod=SECP256K1_P): """Construct an ECPoint on the elliptic curve: y^2 = x^3 + a*x + b (mod p) """ # Check if the point(x,y) is the infinity if x or y: # Check if the point(x,y) is on the elliptic curve assert self.is_contained(x, y, a, b, mod), \ "The point {:x}, {:x} is not on " \ "the elliptic curve".format(x, y) self.x, self.y, self.a, self.b, self.mod = x, y, a, b, mod def __add__(self, other): if self.x == other.x and self.y == other.y: return self.double(self) else: return self.add(self, other) def __mul__(self, other): return self.multiply(self, other) def __repr__(self): return "({:s}, {:s})".format(int2hex(self.x), int2hex(self.y)) def __eq__(self, other): return (self.x == other.x) & (self.y == other.y) def add(self, p1, p2): """Return the sum of two ECPoint""" # The sum of infinity + p2 = p2 if p1 == ECPoint.infinity(): return p2 # The sum of p1 + infinity = p1 if p2 == ECPoint.infinity(): return p1 # Check if the points are on a vertical line if p1.x == p2.x: # If p1 and p2 is the same then double(point) # else the result is infinity. if p1.y == p2.y: return self.double(p1) else: return ECPoint.infinity() # Sum point: # x3 = s^2 - x1 - x2 # y3 = s(x1-x3) / y1 # where s = (y2-y1) / (x2-x1) p3 = ECPoint(0, 0, p1.a, p1.b, p1.mod) dy = (p2.y - p1.y) % p1.mod dx = (p2.x - p1.x) % p1.mod s = (dy * mod_inverse(dx, p1.mod)) % p1.mod p3.x = (s * s - p1.x - p2.x) % p1.mod p3.y = (s * (p1.x - p3.x) - p1.y) % p1.mod return p3 def double(self, p): """Return point * 2""" if p == ECPoint.infinity(): return ECPoint.infinity() # Sum point: # x3 = s^2 - x1 - x2 # y3 = s*(x1-x3) / y1 # where s = (3*x^2 + a) / 2*y1 p2 = ECPoint(0, 0, p.a, p.b, p.mod) dy = (3 * p.x * p.x + p.a) % p.mod dx = (2 * p.y) % p.mod s = (dy * mod_inverse(dx, p.mod)) % p.mod p2.x = (s * s - p.x - p.x) % p.mod p2.y = (s * (p.x - p2.x) - p.y) % p.mod return p2 def multiply(self, p, x): """Return p * x = p + p + ... + p""" temp = ECPoint(p.x, p.y, p.a, p.b, p.mod) x = x - 1 while x > 0: if x % 2 != 0: temp = self.double(temp) if temp == p else self.add(temp, p) x = x - 1 x = x // 2 p = self.double(p) return temp @staticmethod def infinity(): """Return the infinity point on the elliptic curve point""" return ECPoint(0, 0) @staticmethod def is_contained(x, y, a, b, mod): """Check if a point is on the elliptic curve""" # The elliptic curve -- y^2 = x^3 + a*x + b (mod p) return (y ** 2 - (x ** 3 + a * x + b)) % mod == 0 @classmethod def get_secp256k1_y(cls, x, a=SECP256K1_A, b=SECP256K1_B, p=SECP256K1_P): """Calculate y of a point with x""" # The elliptic curve -- y^2 = x^3 + a*x + b (mod p) # To solve y^2 = z mod p: # if p mod 4 = 3 => y = z^((p+1)/4) # So for y^2 = x^3 + ax + b (mod p): # y = (x^3 + ax + b)^((p+1)/4) (mod p) y = pow(x ** 3 + x * a + b, (p + 1) // 4, p) # Check if the point(x,y) is on the elliptic curve assert cls.is_contained(x, y, a, b, p), \ "The point {:x}, {:x} is not on the elliptic curve".format(x, y) return y @staticmethod def get_secp256k1_a(): return SECP256K1_A @staticmethod def get_secp256k1_b(): return SECP256K1_B @staticmethod def get_secp256k1_gx(): return SECP256K1_GX @staticmethod def get_secp256k1_gy(): return SECP256K1_GY @staticmethod def get_secp256k1_p(): return SECP256K1_P @staticmethod def get_secp256k1_order(): return SECP256K1_ORDER @staticmethod def get_secp256k1_order_len(): return SECP256K1_ORDER_LEN @staticmethod def get_secp256k1_h(): return SECP256K1_H
27.543011
84
0.519813
from btc.utils import mod_inverse, int2hex SECP256K1_A = 0 SECP256K1_B = 7 SECP256K1_GX = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798 SECP256K1_GY = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8 SECP256K1_P = 2**256 - 2**32 - 2**9 - 2**8 - 2**7 - 2**6 - 2**4 - 1 SECP256K1_ORDER = 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 SECP256K1_ORDER_LEN = SECP256K1_ORDER.bit_length() SECP256K1_H = 1 class ECPoint: def __init__(self, x, y, a=SECP256K1_A, b=SECP256K1_B, mod=SECP256K1_P): if x or y: assert self.is_contained(x, y, a, b, mod), \ "The point {:x}, {:x} is not on " \ "the elliptic curve".format(x, y) self.x, self.y, self.a, self.b, self.mod = x, y, a, b, mod def __add__(self, other): if self.x == other.x and self.y == other.y: return self.double(self) else: return self.add(self, other) def __mul__(self, other): return self.multiply(self, other) def __repr__(self): return "({:s}, {:s})".format(int2hex(self.x), int2hex(self.y)) def __eq__(self, other): return (self.x == other.x) & (self.y == other.y) def add(self, p1, p2): if p1 == ECPoint.infinity(): return p2 if p2 == ECPoint.infinity(): return p1 if p1.x == p2.x: if p1.y == p2.y: return self.double(p1) else: return ECPoint.infinity() p3 = ECPoint(0, 0, p1.a, p1.b, p1.mod) dy = (p2.y - p1.y) % p1.mod dx = (p2.x - p1.x) % p1.mod s = (dy * mod_inverse(dx, p1.mod)) % p1.mod p3.x = (s * s - p1.x - p2.x) % p1.mod p3.y = (s * (p1.x - p3.x) - p1.y) % p1.mod return p3 def double(self, p): if p == ECPoint.infinity(): return ECPoint.infinity() p2 = ECPoint(0, 0, p.a, p.b, p.mod) dy = (3 * p.x * p.x + p.a) % p.mod dx = (2 * p.y) % p.mod s = (dy * mod_inverse(dx, p.mod)) % p.mod p2.x = (s * s - p.x - p.x) % p.mod p2.y = (s * (p.x - p2.x) - p.y) % p.mod return p2 def multiply(self, p, x): temp = ECPoint(p.x, p.y, p.a, p.b, p.mod) x = x - 1 while x > 0: if x % 2 != 0: temp = self.double(temp) if temp == p else self.add(temp, p) x = x - 1 x = x // 2 p = self.double(p) return temp @staticmethod def infinity(): return ECPoint(0, 0) @staticmethod def is_contained(x, y, a, b, mod): return (y ** 2 - (x ** 3 + a * x + b)) % mod == 0 @classmethod def get_secp256k1_y(cls, x, a=SECP256K1_A, b=SECP256K1_B, p=SECP256K1_P): y = pow(x ** 3 + x * a + b, (p + 1) // 4, p) assert cls.is_contained(x, y, a, b, p), \ "The point {:x}, {:x} is not on the elliptic curve".format(x, y) return y @staticmethod def get_secp256k1_a(): return SECP256K1_A @staticmethod def get_secp256k1_b(): return SECP256K1_B @staticmethod def get_secp256k1_gx(): return SECP256K1_GX @staticmethod def get_secp256k1_gy(): return SECP256K1_GY @staticmethod def get_secp256k1_p(): return SECP256K1_P @staticmethod def get_secp256k1_order(): return SECP256K1_ORDER @staticmethod def get_secp256k1_order_len(): return SECP256K1_ORDER_LEN @staticmethod def get_secp256k1_h(): return SECP256K1_H
true
true
f708301dab84462cdc546c456f38ab6d6b2e0329
10,506
py
Python
addons/mixer/blender_data/struct_proxy.py
trisadmeslek/V-Sekai-Blender-tools
0d8747387c58584b50c69c61ba50a881319114f8
[ "MIT" ]
null
null
null
addons/mixer/blender_data/struct_proxy.py
trisadmeslek/V-Sekai-Blender-tools
0d8747387c58584b50c69c61ba50a881319114f8
[ "MIT" ]
null
null
null
addons/mixer/blender_data/struct_proxy.py
trisadmeslek/V-Sekai-Blender-tools
0d8747387c58584b50c69c61ba50a881319114f8
[ "MIT" ]
null
null
null
# GPLv3 License # # Copyright (C) 2020 Ubisoft # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ Proxy of a bpy.types.Struct, excluding bpy.types.ID that is implemented in datablock_proxy.py See synchronization.md """ from __future__ import annotations from functools import lru_cache import logging from typing import Optional, Tuple, TYPE_CHECKING, Union import bpy.types as T # noqa from mixer.blender_data import specifics from mixer.blender_data.attributes import apply_attribute, diff_attribute, read_attribute, write_attribute from mixer.blender_data.json_codec import serialize from mixer.blender_data.misc_proxies import NonePtrProxy from mixer.blender_data.proxy import Delta, DeltaReplace, DeltaUpdate, Proxy if TYPE_CHECKING: from mixer.blender_data.proxy import Context logger = logging.getLogger(__name__) def _create_clear_animation_data(incoming_proxy: StructProxy, existing_struct: T.bpy_struct) -> Optional[T.AnimData]: if existing_struct.animation_data is None: if not isinstance(incoming_proxy, NonePtrProxy): # None (current blender value) -> not None (incoming proxy) existing_struct.animation_data_create() else: if isinstance(incoming_proxy, NonePtrProxy): # not None (current blender value) -> None (incoming proxy) existing_struct.animation_data_clear() return existing_struct.animation_data @lru_cache() def _proxy_types(): from mixer.blender_data.modifier_proxies import NodesModifierProxy proxy_types = {} try: proxy_types[T.NodesModifier] = NodesModifierProxy except AttributeError: pass return proxy_types @serialize class StructProxy(Proxy): """ Holds a copy of a Blender bpy_struct """ _serialize: Tuple[str, ...] = ("_data",) def __init__(self): self._data = {} pass def copy_data(self, other: StructProxy): self._data = other._data def clear_data(self): self._data.clear() @classmethod def make(cls, bpy_struct: T.bpy_struct) -> StructProxy: proxy_class = _proxy_types().get(type(bpy_struct), StructProxy) return proxy_class() def load(self, attribute: T.bpy_struct, context: Context) -> StructProxy: """ Load the attribute Blender struct into this proxy Args: attribute: the Blender struct to load into this proxy, (e.g an ObjectDisplay instance) key: the identifier of attribute in its parent (e.g. "display") context: the proxy and visit state """ self.clear_data() properties = context.synchronized_properties.properties(attribute) # includes properties from the bl_rna only, not the "view like" properties like MeshPolygon.edge_keys # that we do not want to load anyway properties = specifics.conditional_properties(attribute, properties) for name, bl_rna_property in properties: attr = getattr(attribute, name) attr_value = read_attribute(attr, name, bl_rna_property, attribute, context) self._data[name] = attr_value return self def save( self, attribute: T.bpy_struct, parent: Union[T.bpy_struct, T.bpy_prop_collection], key: Union[int, str], context: Context, ): """ Save this proxy into attribute Args: attribute: the bpy_struct to store this proxy into parent: (e.g an Object instance) key: (e.g. "display) context: the proxy and visit state """ if key == "animation_data" and (attribute is None or isinstance(attribute, T.AnimData)): attribute = _create_clear_animation_data(self, parent) if attribute is None: logger.info(f"save: attribute is None for {context.visit_state.display_path()}.{key}") return for k, v in self._data.items(): write_attribute(attribute, k, v, context) def apply( self, attribute: T.bpy_struct, parent: Union[T.bpy_struct, T.bpy_prop_collection], key: Union[int, str], delta: Delta, context: Context, to_blender: bool = True, ) -> Union[StructProxy, NonePtrProxy]: """ Apply delta to this proxy and optionally to the Blender attribute its manages. Args: attribute: the struct to update (e.g. a Material instance) parent: the attribute that contains attribute (e.g. bpy.data.materials) key: the key that identifies attribute in parent (e.g "Material") delta: the delta to apply context: proxy and visit state to_blender: update the managed Blender attribute in addition to this Proxy """ # WARNING parent must not be searched for key as it will fail in case of duplicate keys, with libraries update = delta.value if isinstance(delta, DeltaReplace): # The structure is replaced as a whole. # TODO explain when this occurs self.copy_data(update) if to_blender: self.save(attribute, parent, key, context) else: # the structure is updated if key == "animation_data" and (attribute is None or isinstance(attribute, T.AnimData)): # if animation_data is updated to None (cleared), the parent structure is updated to store # a NonePtrProxy if to_blender: attribute = _create_clear_animation_data(update, parent) if attribute is None: return NonePtrProxy() else: if isinstance(update, NonePtrProxy): return NonePtrProxy() if attribute: for k, member_delta in update._data.items(): current_value = self._data.get(k) try: self._data[k] = apply_attribute(attribute, k, current_value, member_delta, context, to_blender) except Exception as e: logger.warning(f"Struct.apply(). Processing {member_delta}") logger.warning(f"... for {attribute}.{k}") logger.warning(f"... Exception: {e!r}") logger.warning("... Update ignored") continue return self def diff( self, attribute: T.bpy_struct, key: Union[int, str], prop: T.Property, context: Context ) -> Optional[Delta]: """ Computes the difference between the state of an item tracked by this proxy and its Blender state. As this proxy tracks a Struct or ID, the result will be a DeltaUpdate that contains a StructProxy or a DatablockProxy with an Delta item per added, deleted or updated property. One expect only DeltaUpdate, although DeltalAddition or DeltaDeletion may be produced when an addon is loaded or unloaded while a room is joined. This situation is not really supported as there is no handler to track addon changes. Args: attribute: the struct to update (e.g. a Material instance) key: the key that identifies attribute in parent (e.g "Material") prop: the Property of struct as found in its enclosing object context: proxy and visit state """ # Create a proxy that will be populated with attributes differences. diff = self.__class__() diff.init(attribute) delta = self._diff(attribute, key, prop, context, diff) return delta def _diff( self, attribute: T.bpy_struct, key: Union[int, str], prop: T.Property, context: Context, diff: StructProxy ) -> Optional[Delta]: """ Computes the difference between the state of an item tracked by this proxy and its Blender state and attached the difference to diff. See diff() Args: attribute: the struct to update (e.g. a Material instance) key: the key that identifies attribute in parent (e.g "Material") prop: the Property of struct as found in its enclosing object context: proxy and visit state diff: the proxy that holds the difference and will be transmitted in a Delta Returns: a delta if any difference is found, None otherwise """ if attribute is None: from mixer.blender_data.misc_proxies import NonePtrProxy return DeltaUpdate(NonePtrProxy()) # PERF accessing the properties from the synchronized_properties is **far** cheaper that iterating over # _data and the getting the properties with # member_property = struct.bl_rna.properties[k] # line to which py-spy attributes 20% of the total diff ! properties = context.synchronized_properties.properties(attribute) properties = specifics.conditional_properties(attribute, properties) for k, member_property in properties: try: member = getattr(attribute, k) except AttributeError: logger.info(f"diff: unknown attribute {k} in {attribute}") continue proxy_data = self._data.get(k) delta = diff_attribute(member, k, member_property, proxy_data, context) if delta is not None: diff._data[k] = delta # TODO detect media updates (reload(), and attach a media descriptor to diff) # difficult ? # if anything has changed, wrap the hollow proxy in a DeltaUpdate. This may be superfluous but # it is homogenous with additions and deletions if len(diff._data): return DeltaUpdate(diff) return None
38.625
119
0.642395
from __future__ import annotations from functools import lru_cache import logging from typing import Optional, Tuple, TYPE_CHECKING, Union import bpy.types as T from mixer.blender_data import specifics from mixer.blender_data.attributes import apply_attribute, diff_attribute, read_attribute, write_attribute from mixer.blender_data.json_codec import serialize from mixer.blender_data.misc_proxies import NonePtrProxy from mixer.blender_data.proxy import Delta, DeltaReplace, DeltaUpdate, Proxy if TYPE_CHECKING: from mixer.blender_data.proxy import Context logger = logging.getLogger(__name__) def _create_clear_animation_data(incoming_proxy: StructProxy, existing_struct: T.bpy_struct) -> Optional[T.AnimData]: if existing_struct.animation_data is None: if not isinstance(incoming_proxy, NonePtrProxy): existing_struct.animation_data_create() else: if isinstance(incoming_proxy, NonePtrProxy): existing_struct.animation_data_clear() return existing_struct.animation_data @lru_cache() def _proxy_types(): from mixer.blender_data.modifier_proxies import NodesModifierProxy proxy_types = {} try: proxy_types[T.NodesModifier] = NodesModifierProxy except AttributeError: pass return proxy_types @serialize class StructProxy(Proxy): _serialize: Tuple[str, ...] = ("_data",) def __init__(self): self._data = {} pass def copy_data(self, other: StructProxy): self._data = other._data def clear_data(self): self._data.clear() @classmethod def make(cls, bpy_struct: T.bpy_struct) -> StructProxy: proxy_class = _proxy_types().get(type(bpy_struct), StructProxy) return proxy_class() def load(self, attribute: T.bpy_struct, context: Context) -> StructProxy: self.clear_data() properties = context.synchronized_properties.properties(attribute) properties = specifics.conditional_properties(attribute, properties) for name, bl_rna_property in properties: attr = getattr(attribute, name) attr_value = read_attribute(attr, name, bl_rna_property, attribute, context) self._data[name] = attr_value return self def save( self, attribute: T.bpy_struct, parent: Union[T.bpy_struct, T.bpy_prop_collection], key: Union[int, str], context: Context, ): if key == "animation_data" and (attribute is None or isinstance(attribute, T.AnimData)): attribute = _create_clear_animation_data(self, parent) if attribute is None: logger.info(f"save: attribute is None for {context.visit_state.display_path()}.{key}") return for k, v in self._data.items(): write_attribute(attribute, k, v, context) def apply( self, attribute: T.bpy_struct, parent: Union[T.bpy_struct, T.bpy_prop_collection], key: Union[int, str], delta: Delta, context: Context, to_blender: bool = True, ) -> Union[StructProxy, NonePtrProxy]: update = delta.value if isinstance(delta, DeltaReplace): self.copy_data(update) if to_blender: self.save(attribute, parent, key, context) else: if key == "animation_data" and (attribute is None or isinstance(attribute, T.AnimData)): if to_blender: attribute = _create_clear_animation_data(update, parent) if attribute is None: return NonePtrProxy() else: if isinstance(update, NonePtrProxy): return NonePtrProxy() if attribute: for k, member_delta in update._data.items(): current_value = self._data.get(k) try: self._data[k] = apply_attribute(attribute, k, current_value, member_delta, context, to_blender) except Exception as e: logger.warning(f"Struct.apply(). Processing {member_delta}") logger.warning(f"... for {attribute}.{k}") logger.warning(f"... Exception: {e!r}") logger.warning("... Update ignored") continue return self def diff( self, attribute: T.bpy_struct, key: Union[int, str], prop: T.Property, context: Context ) -> Optional[Delta]: diff = self.__class__() diff.init(attribute) delta = self._diff(attribute, key, prop, context, diff) return delta def _diff( self, attribute: T.bpy_struct, key: Union[int, str], prop: T.Property, context: Context, diff: StructProxy ) -> Optional[Delta]: if attribute is None: from mixer.blender_data.misc_proxies import NonePtrProxy return DeltaUpdate(NonePtrProxy()) properties = context.synchronized_properties.properties(attribute) properties = specifics.conditional_properties(attribute, properties) for k, member_property in properties: try: member = getattr(attribute, k) except AttributeError: logger.info(f"diff: unknown attribute {k} in {attribute}") continue proxy_data = self._data.get(k) delta = diff_attribute(member, k, member_property, proxy_data, context) if delta is not None: diff._data[k] = delta if len(diff._data): return DeltaUpdate(diff) return None
true
true
f70830a1dfe16d9acb4b0fb3104b724a08d7490c
2,893
py
Python
inference.py
riotu-lab/tf2trt_with_onnx
f9828ed99af5530836bf6ee608e631502dfb0f02
[ "MIT" ]
12
2020-10-08T21:59:28.000Z
2022-02-05T00:13:41.000Z
inference.py
riotu-lab/tf2trt_with_onnx
f9828ed99af5530836bf6ee608e631502dfb0f02
[ "MIT" ]
3
2020-11-19T15:13:17.000Z
2022-03-07T06:54:45.000Z
inference.py
riotu-lab/tf2trt_with_onnx
f9828ed99af5530836bf6ee608e631502dfb0f02
[ "MIT" ]
7
2020-10-08T21:59:13.000Z
2022-01-20T06:37:15.000Z
import tensorrt as trt import pycuda.driver as cuda import numpy as np import pycuda.autoinit def allocate_buffers(engine, batch_size, data_type): """ This is the function to allocate buffers for input and output in the device Args: engine : The path to the TensorRT engine. batch_size : The batch size for execution time. data_type: The type of the data for input and output, for example trt.float32. Output: h_input_1: Input in the host. d_input_1: Input in the device. h_output_1: Output in the host. d_output_1: Output in the device. stream: CUDA stream. """ # Determine dimensions and create page-locked memory buffers (which won't be swapped to disk) to hold host inputs/outputs. h_input_1 = cuda.pagelocked_empty(batch_size * trt.volume(engine.get_binding_shape(0)), dtype=trt.nptype(data_type)) h_output = cuda.pagelocked_empty(batch_size * trt.volume(engine.get_binding_shape(1)), dtype=trt.nptype(data_type)) # Allocate device memory for inputs and outputs. d_input_1 = cuda.mem_alloc(h_input_1.nbytes) d_output = cuda.mem_alloc(h_output.nbytes) # Create a stream in which to copy inputs/outputs and run inference. stream = cuda.Stream() return h_input_1, d_input_1, h_output, d_output, stream def load_images_to_buffer(pics, pagelocked_buffer): preprocessed = np.asarray(pics).ravel() np.copyto(pagelocked_buffer, preprocessed) def do_inference(engine, pics_1, h_input_1, d_input_1, h_output, d_output, stream, batch_size, height, width): """ This is the function to run the inference Args: engine : Path to the TensorRT engine pics_1 : Input images to the model. h_input_1: Input in the host d_input_1: Input in the device h_output_1: Output in the host d_output_1: Output in the device stream: CUDA stream batch_size : Batch size for execution time height: Height of the output image width: Width of the output image Output: The list of output images """ print('load images to buffer') load_images_to_buffer(pics_1, h_input_1) with engine.create_execution_context() as context: context.debug_sync = False # Transfer input data to the GPU. cuda.memcpy_htod_async(d_input_1, h_input_1, stream) # Run inference. print('load profiler') context.profiler = trt.Profiler() print('execute') context.execute(batch_size=1, bindings=[int(d_input_1), int(d_output)]) print('Transfer predictions back from the GPU.') # Transfer predictions back from the GPU. cuda.memcpy_dtoh_async(h_output, d_output, stream) # Synchronize the stream stream.synchronize() # Return the host output. print(h_output.shape) out = h_output.reshape((1,-1)) return out
36.1625
125
0.696854
import tensorrt as trt import pycuda.driver as cuda import numpy as np import pycuda.autoinit def allocate_buffers(engine, batch_size, data_type): h_input_1 = cuda.pagelocked_empty(batch_size * trt.volume(engine.get_binding_shape(0)), dtype=trt.nptype(data_type)) h_output = cuda.pagelocked_empty(batch_size * trt.volume(engine.get_binding_shape(1)), dtype=trt.nptype(data_type)) # Allocate device memory for inputs and outputs. d_input_1 = cuda.mem_alloc(h_input_1.nbytes) d_output = cuda.mem_alloc(h_output.nbytes) # Create a stream in which to copy inputs/outputs and run inference. stream = cuda.Stream() return h_input_1, d_input_1, h_output, d_output, stream def load_images_to_buffer(pics, pagelocked_buffer): preprocessed = np.asarray(pics).ravel() np.copyto(pagelocked_buffer, preprocessed) def do_inference(engine, pics_1, h_input_1, d_input_1, h_output, d_output, stream, batch_size, height, width): print('load images to buffer') load_images_to_buffer(pics_1, h_input_1) with engine.create_execution_context() as context: context.debug_sync = False # Transfer input data to the GPU. cuda.memcpy_htod_async(d_input_1, h_input_1, stream) # Run inference. print('load profiler') context.profiler = trt.Profiler() print('execute') context.execute(batch_size=1, bindings=[int(d_input_1), int(d_output)]) print('Transfer predictions back from the GPU.') # Transfer predictions back from the GPU. cuda.memcpy_dtoh_async(h_output, d_output, stream) # Synchronize the stream stream.synchronize() # Return the host output. print(h_output.shape) out = h_output.reshape((1,-1)) return out
true
true
f70831b92f771b21149a5234b61179e970fa0c1c
2,406
py
Python
parquet/__main__.py
snowch/parquet-python
e2caab7aceca91a3075998d0113e186f8ba2ca37
[ "Apache-2.0" ]
1
2019-03-23T15:15:49.000Z
2019-03-23T15:15:49.000Z
parquet/__main__.py
snowch/parquet-python
e2caab7aceca91a3075998d0113e186f8ba2ca37
[ "Apache-2.0" ]
null
null
null
parquet/__main__.py
snowch/parquet-python
e2caab7aceca91a3075998d0113e186f8ba2ca37
[ "Apache-2.0" ]
null
null
null
"""parquet - tool for inspecting parquet files.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import logging import sys def setup_logging(options=None): """Configure logging based on options.""" level = logging.DEBUG if options is not None and options.debug \ else logging.WARNING console = logging.StreamHandler() console.setLevel(level) formatter = logging.Formatter('%(name)s: %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('parquet').setLevel(level) logging.getLogger('parquet').addHandler(console) def main(argv=None): """Run parquet utility application.""" argv = argv or sys.argv[1:] parser = argparse.ArgumentParser('parquet', description='Read parquet files') parser.add_argument('--metadata', action='store_true', help='show metadata on file') parser.add_argument('--row-group-metadata', action='store_true', help="show per row group metadata") parser.add_argument('--no-data', action='store_true', help="don't dump any data from the file") parser.add_argument('--limit', action='store', type=int, default=-1, help='max records to output') parser.add_argument('--col', action='append', type=str, help='only include this column (can be ' 'specified multiple times)') parser.add_argument('--no-headers', action='store_true', help='skip headers in output (only applies if ' 'format=csv)') parser.add_argument('--format', action='store', type=str, default='csv', help='format for the output data. can be csv or json.') parser.add_argument('--debug', action='store_true', help='log debug info to stderr') parser.add_argument('file', help='path to the file to parse') args = parser.parse_args(argv) setup_logging(args) import parquet if args.metadata: parquet.dump_metadata(args.file, args.row_group_metadata) if not args.no_data: parquet.dump(args.file, args) if __name__ == '__main__': main()
36.454545
79
0.621363
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import logging import sys def setup_logging(options=None): level = logging.DEBUG if options is not None and options.debug \ else logging.WARNING console = logging.StreamHandler() console.setLevel(level) formatter = logging.Formatter('%(name)s: %(levelname)-8s %(message)s') console.setFormatter(formatter) logging.getLogger('parquet').setLevel(level) logging.getLogger('parquet').addHandler(console) def main(argv=None): argv = argv or sys.argv[1:] parser = argparse.ArgumentParser('parquet', description='Read parquet files') parser.add_argument('--metadata', action='store_true', help='show metadata on file') parser.add_argument('--row-group-metadata', action='store_true', help="show per row group metadata") parser.add_argument('--no-data', action='store_true', help="don't dump any data from the file") parser.add_argument('--limit', action='store', type=int, default=-1, help='max records to output') parser.add_argument('--col', action='append', type=str, help='only include this column (can be ' 'specified multiple times)') parser.add_argument('--no-headers', action='store_true', help='skip headers in output (only applies if ' 'format=csv)') parser.add_argument('--format', action='store', type=str, default='csv', help='format for the output data. can be csv or json.') parser.add_argument('--debug', action='store_true', help='log debug info to stderr') parser.add_argument('file', help='path to the file to parse') args = parser.parse_args(argv) setup_logging(args) import parquet if args.metadata: parquet.dump_metadata(args.file, args.row_group_metadata) if not args.no_data: parquet.dump(args.file, args) if __name__ == '__main__': main()
true
true
f70831d31c18c271d339fcd121ce8405c9acc39f
1,722
py
Python
tests/test_assembler.py
proksee-project/proksee-cmd
c8a3eaaf1b0f46a0842972f54f805bbf6a2936ab
[ "Apache-2.0" ]
null
null
null
tests/test_assembler.py
proksee-project/proksee-cmd
c8a3eaaf1b0f46a0842972f54f805bbf6a2936ab
[ "Apache-2.0" ]
2
2021-10-21T16:55:04.000Z
2021-12-10T17:25:38.000Z
tests/test_assembler.py
proksee-project/proksee-cmd
c8a3eaaf1b0f46a0842972f54f805bbf6a2936ab
[ "Apache-2.0" ]
null
null
null
""" Copyright Government of Canada 2021 Written by: Eric Marinier, National Microbiology Laboratory, Public Health Agency of Canada Licensed under the Apache License, Version 2.0 (the "License"); you may not use this work except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os from pathlib import Path from proksee.assembler import Assembler from proksee.reads import Reads from proksee.skesa_assembler import SkesaAssembler from proksee.resource_specification import ResourceSpecification INPUT_DIR = os.path.join(Path(__file__).parent.absolute(), "data") OUTPUT_DIR = os.path.join(Path(__file__).parent.absolute(), "output") RESOURCE_SPECIFICATION = ResourceSpecification(4, 4) # 4 threads, 4 gigabytes class TestAssembler: def test_abstract_methods(self): """ Testing for crashes by simply running the abstract methods. The methods contain only "pass" otherwise. """ forward_filename = os.path.join(INPUT_DIR, "NA12878_fwd.fastq") reverse_filename = None reads = Reads(forward_filename, reverse_filename) # Can't instantiate abstract class, need to instantiate subclass: assembler = SkesaAssembler(reads, OUTPUT_DIR, RESOURCE_SPECIFICATION) Assembler.assemble(assembler) Assembler.get_contigs_filename(assembler)
35.142857
79
0.75784
import os from pathlib import Path from proksee.assembler import Assembler from proksee.reads import Reads from proksee.skesa_assembler import SkesaAssembler from proksee.resource_specification import ResourceSpecification INPUT_DIR = os.path.join(Path(__file__).parent.absolute(), "data") OUTPUT_DIR = os.path.join(Path(__file__).parent.absolute(), "output") RESOURCE_SPECIFICATION = ResourceSpecification(4, 4) class TestAssembler: def test_abstract_methods(self): forward_filename = os.path.join(INPUT_DIR, "NA12878_fwd.fastq") reverse_filename = None reads = Reads(forward_filename, reverse_filename) assembler = SkesaAssembler(reads, OUTPUT_DIR, RESOURCE_SPECIFICATION) Assembler.assemble(assembler) Assembler.get_contigs_filename(assembler)
true
true
f70832591120e472b3249e3d56d542f352f77c6e
2,011
py
Python
2021/day15.py
tcbegley/advent-of-code
e293d06e9cd994b26c0d10619672a6d8d2d65377
[ "MIT" ]
6
2021-12-05T11:21:17.000Z
2021-12-07T03:04:24.000Z
2021/day15.py
tcbegley/advent-of-code
e293d06e9cd994b26c0d10619672a6d8d2d65377
[ "MIT" ]
null
null
null
2021/day15.py
tcbegley/advent-of-code
e293d06e9cd994b26c0d10619672a6d8d2d65377
[ "MIT" ]
null
null
null
import heapq import sys from itertools import product def load_data(path): with open(path) as f: return { (i, j): int(value) for i, line in enumerate(f.readlines()) for j, value in enumerate(line.strip()) } def get_neighbours(loc, n_rows, n_cols): neighbours = [] x, y = loc for i in [-1, 1]: if 0 <= x + i < n_rows: neighbours.append((x + i, y)) if 0 <= y + i < n_cols: neighbours.append((x, y + i)) return neighbours def answer(cost): n_rows = max(i for i, _ in cost) + 1 n_cols = max(j for _, j in cost) + 1 target = (n_rows - 1, n_cols - 1) finalised = set() queue = [] heapq.heappush(queue, (0, (0, 0))) dist = {loc: float("inf") for loc in product(range(n_rows), range(n_cols))} dist[(0, 0)] = 0 while len(finalised) < len(cost): if target in finalised: return dist[target] min_cost_to_loc, loc = heapq.heappop(queue) finalised.add(loc) for nbr in get_neighbours(loc, n_rows, n_cols): if nbr not in finalised: old_dist = dist[nbr] new_dist = dist[loc] + cost[nbr] if new_dist < old_dist: dist[nbr] = min_cost_to_loc + cost[nbr] heapq.heappush(queue, (dist[nbr], nbr)) return dist[target] def extend(cost): n_rows = max(i for i, _ in cost) + 1 n_cols = max(j for _, j in cost) + 1 new_cost = {} for i in range(5): for j in range(5): for loc in cost: new_cost[(n_rows * i + loc[0], n_cols * j + loc[1])] = ( cost[loc] + i + j - 1 ) % 9 + 1 return new_cost def part_1(cost): return answer(cost) def part_2(cost): cost = extend(cost) return answer(cost) if __name__ == "__main__": data = load_data(sys.argv[1]) print(f"Part 1: {part_1(data)}") print(f"Part 2: {part_2(data)}")
24.52439
79
0.526604
import heapq import sys from itertools import product def load_data(path): with open(path) as f: return { (i, j): int(value) for i, line in enumerate(f.readlines()) for j, value in enumerate(line.strip()) } def get_neighbours(loc, n_rows, n_cols): neighbours = [] x, y = loc for i in [-1, 1]: if 0 <= x + i < n_rows: neighbours.append((x + i, y)) if 0 <= y + i < n_cols: neighbours.append((x, y + i)) return neighbours def answer(cost): n_rows = max(i for i, _ in cost) + 1 n_cols = max(j for _, j in cost) + 1 target = (n_rows - 1, n_cols - 1) finalised = set() queue = [] heapq.heappush(queue, (0, (0, 0))) dist = {loc: float("inf") for loc in product(range(n_rows), range(n_cols))} dist[(0, 0)] = 0 while len(finalised) < len(cost): if target in finalised: return dist[target] min_cost_to_loc, loc = heapq.heappop(queue) finalised.add(loc) for nbr in get_neighbours(loc, n_rows, n_cols): if nbr not in finalised: old_dist = dist[nbr] new_dist = dist[loc] + cost[nbr] if new_dist < old_dist: dist[nbr] = min_cost_to_loc + cost[nbr] heapq.heappush(queue, (dist[nbr], nbr)) return dist[target] def extend(cost): n_rows = max(i for i, _ in cost) + 1 n_cols = max(j for _, j in cost) + 1 new_cost = {} for i in range(5): for j in range(5): for loc in cost: new_cost[(n_rows * i + loc[0], n_cols * j + loc[1])] = ( cost[loc] + i + j - 1 ) % 9 + 1 return new_cost def part_1(cost): return answer(cost) def part_2(cost): cost = extend(cost) return answer(cost) if __name__ == "__main__": data = load_data(sys.argv[1]) print(f"Part 1: {part_1(data)}") print(f"Part 2: {part_2(data)}")
true
true
f708346fbf06081c549ed239fbba07d0ba8a6bb1
2,176
py
Python
mediathekDownloader/vsMetaInfoGenerator.py
TomMeHo/mediathekDownloader
645f76b5df6710ee7cd8c761c44e6e61c026d849
[ "MIT" ]
1
2022-01-26T19:09:56.000Z
2022-01-26T19:09:56.000Z
mediathekDownloader/vsMetaInfoGenerator.py
TomMeHo/mediathekDownloader
645f76b5df6710ee7cd8c761c44e6e61c026d849
[ "MIT" ]
null
null
null
mediathekDownloader/vsMetaInfoGenerator.py
TomMeHo/mediathekDownloader
645f76b5df6710ee7cd8c761c44e6e61c026d849
[ "MIT" ]
null
null
null
import re from vsmetaEncoder import vsmetaInfo from datetime import datetime, date class VsMetaInfoGenerator(vsmetaInfo.VsMetaInfo): def __init__(self, feedItem): super(VsMetaInfoGenerator, self).__init__() self.feedItem = feedItem self.download_url = '' # parse feedItem if hasattr(feedItem, 'title'): self.episodeTitle = feedItem.title if hasattr(feedItem, 'category'): self.showTitle = feedItem.category if hasattr(feedItem, 'summary'): self.chapterSummary = feedItem.summary if hasattr(feedItem, 'description'): self.chapterSummary = feedItem.description if hasattr(feedItem, 'link'): self.download_url = feedItem.link #if hasattr(feedItem, 'published'): self.episodeReleaseDate = datetime.strptime(feedItem.published, "%a, %d %b %Y %H:%M:%S GMT" ) if hasattr(feedItem, 'published'): self.setEpisodeDate(datetime.strptime(feedItem.published, "%a, %d %b %Y %H:%M:%S GMT").date()) if hasattr(feedItem, 'description'): self.chapterSummary = feedItem.description #cleaning some parts self.chapterSummary = self.chapterSummary.replace('![CDATA[', '') self.chapterSummary = self.chapterSummary.replace(']]', '') self.tvshowLocked = True self.episodeLocked = True episodeFound = re.search('[(](\d*)\/\d[)]',self.episodeTitle) if episodeFound != None: self.episode = int(episodeFound.group(1)) seasonFound = re.search(' Staffel (\d*) ',self.episodeTitle) if seasonFound != None: self.season = int(seasonFound.group(1)) # set other defaults self.episodeLocked = False self.tvshowLocked = False self.identifyingTerm = '%s - %s -s%se%s' % (self.showTitle, self.episodeTitle, self.season, self.episode) def isUsable(self) ->bool: if (len(self.episodeTitle) > 0 or len(self.showTitle) > 0 or len(self.showTitle2) > 0) and len(self.download_url) > 0: return True else: return False
43.52
146
0.613511
import re from vsmetaEncoder import vsmetaInfo from datetime import datetime, date class VsMetaInfoGenerator(vsmetaInfo.VsMetaInfo): def __init__(self, feedItem): super(VsMetaInfoGenerator, self).__init__() self.feedItem = feedItem self.download_url = '' if hasattr(feedItem, 'title'): self.episodeTitle = feedItem.title if hasattr(feedItem, 'category'): self.showTitle = feedItem.category if hasattr(feedItem, 'summary'): self.chapterSummary = feedItem.summary if hasattr(feedItem, 'description'): self.chapterSummary = feedItem.description if hasattr(feedItem, 'link'): self.download_url = feedItem.link if hasattr(feedItem, 'published'): self.setEpisodeDate(datetime.strptime(feedItem.published, "%a, %d %b %Y %H:%M:%S GMT").date()) if hasattr(feedItem, 'description'): self.chapterSummary = feedItem.description self.chapterSummary = self.chapterSummary.replace('![CDATA[', '') self.chapterSummary = self.chapterSummary.replace(']]', '') self.tvshowLocked = True self.episodeLocked = True episodeFound = re.search('[(](\d*)\/\d[)]',self.episodeTitle) if episodeFound != None: self.episode = int(episodeFound.group(1)) seasonFound = re.search(' Staffel (\d*) ',self.episodeTitle) if seasonFound != None: self.season = int(seasonFound.group(1)) self.episodeLocked = False self.tvshowLocked = False self.identifyingTerm = '%s - %s -s%se%s' % (self.showTitle, self.episodeTitle, self.season, self.episode) def isUsable(self) ->bool: if (len(self.episodeTitle) > 0 or len(self.showTitle) > 0 or len(self.showTitle2) > 0) and len(self.download_url) > 0: return True else: return False
true
true
f7083472e4dfe4d2a370ff6e2b3b22f4880c3fc2
3,213
py
Python
lingvo/tasks/car/pointnet_test.py
argideritzalpea/lingvo
0b13f9b02f8cf2167a1b80a43222257f2140e1e8
[ "Apache-2.0" ]
null
null
null
lingvo/tasks/car/pointnet_test.py
argideritzalpea/lingvo
0b13f9b02f8cf2167a1b80a43222257f2140e1e8
[ "Apache-2.0" ]
null
null
null
lingvo/tasks/car/pointnet_test.py
argideritzalpea/lingvo
0b13f9b02f8cf2167a1b80a43222257f2140e1e8
[ "Apache-2.0" ]
null
null
null
# Lint as: python2, python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for pointnet.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from lingvo import compat as tf from lingvo.core import py_utils from lingvo.core import test_utils from lingvo.tasks.car import pointnet class PointNetTest(test_utils.TestCase, parameterized.TestCase): def _testOutShape(self, p, input_shape, expected_shape): batch_size, num_points, _ = input_shape g = tf.Graph() with g.as_default(): net = p.Instantiate() input_data = py_utils.NestedMap( points=tf.random_uniform((batch_size, num_points, 3)), features=tf.random_uniform(input_shape), padding=tf.zeros((batch_size, num_points), dtype=tf.float32), label=tf.random_uniform((batch_size,), minval=0, maxval=16, dtype=tf.int32)) result = net.FPropDefaultTheta(input_data) with self.session(graph=g) as sess: sess.run(tf.global_variables_initializer()) np_result = sess.run(result) self.assertEqual(np_result.shape, expected_shape) @parameterized.parameters((128, 3), (128, 9), (256, 3)) def testPointNetClassifier(self, feature_dims, input_dims): p = pointnet.PointNet().Classifier( input_dims=input_dims, feature_dims=feature_dims) # Network should produce a global feature of feature_dims. self.assertEqual(p.output_dim, feature_dims) self._testOutShape(p, (8, 128, input_dims), (8, feature_dims)) def testPointNetSegmentation(self): p = pointnet.PointNet().Segmentation() # Network takes batch_size=8 input and produce 128-dim pointwise feature. self.assertEqual(p.output_dim, 128) self._testOutShape(p, (8, 100, 3), (8, 100, 128)) def testPointNetSegmentationShapeNet(self): p = pointnet.PointNet().SegmentationShapeNet() self.assertEqual(p.output_dim, 128) self._testOutShape(p, (8, 2000, 3), (8, 2000, 128)) @parameterized.parameters((128, 3), (128, 9), (256, 3)) def testPointNetPPClassifier(self, feature_dims, input_dims): p = pointnet.PointNetPP().Classifier( input_dims=input_dims, feature_dims=feature_dims) # Network should produce a global feature of feature_dims. self.assertEqual(p.output_dim, feature_dims) self._testOutShape(p, (8, 1024, input_dims), (8, feature_dims)) if __name__ == '__main__': tf.test.main()
40.1625
80
0.691566
from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized from lingvo import compat as tf from lingvo.core import py_utils from lingvo.core import test_utils from lingvo.tasks.car import pointnet class PointNetTest(test_utils.TestCase, parameterized.TestCase): def _testOutShape(self, p, input_shape, expected_shape): batch_size, num_points, _ = input_shape g = tf.Graph() with g.as_default(): net = p.Instantiate() input_data = py_utils.NestedMap( points=tf.random_uniform((batch_size, num_points, 3)), features=tf.random_uniform(input_shape), padding=tf.zeros((batch_size, num_points), dtype=tf.float32), label=tf.random_uniform((batch_size,), minval=0, maxval=16, dtype=tf.int32)) result = net.FPropDefaultTheta(input_data) with self.session(graph=g) as sess: sess.run(tf.global_variables_initializer()) np_result = sess.run(result) self.assertEqual(np_result.shape, expected_shape) @parameterized.parameters((128, 3), (128, 9), (256, 3)) def testPointNetClassifier(self, feature_dims, input_dims): p = pointnet.PointNet().Classifier( input_dims=input_dims, feature_dims=feature_dims) self.assertEqual(p.output_dim, feature_dims) self._testOutShape(p, (8, 128, input_dims), (8, feature_dims)) def testPointNetSegmentation(self): p = pointnet.PointNet().Segmentation() self.assertEqual(p.output_dim, 128) self._testOutShape(p, (8, 100, 3), (8, 100, 128)) def testPointNetSegmentationShapeNet(self): p = pointnet.PointNet().SegmentationShapeNet() self.assertEqual(p.output_dim, 128) self._testOutShape(p, (8, 2000, 3), (8, 2000, 128)) @parameterized.parameters((128, 3), (128, 9), (256, 3)) def testPointNetPPClassifier(self, feature_dims, input_dims): p = pointnet.PointNetPP().Classifier( input_dims=input_dims, feature_dims=feature_dims) self.assertEqual(p.output_dim, feature_dims) self._testOutShape(p, (8, 1024, input_dims), (8, feature_dims)) if __name__ == '__main__': tf.test.main()
true
true
f70834ecb56114e1accacc671d423b1fb09b1f29
894
py
Python
migrations/versions/f025f89b250b_.py
Misschl/flask-fresh
df17fd377b9e27aaad9fe0c5582c56098d09068c
[ "Apache-2.0" ]
null
null
null
migrations/versions/f025f89b250b_.py
Misschl/flask-fresh
df17fd377b9e27aaad9fe0c5582c56098d09068c
[ "Apache-2.0" ]
null
null
null
migrations/versions/f025f89b250b_.py
Misschl/flask-fresh
df17fd377b9e27aaad9fe0c5582c56098d09068c
[ "Apache-2.0" ]
1
2020-12-21T14:01:53.000Z
2020-12-21T14:01:53.000Z
"""empty message Revision ID: f025f89b250b Revises: 37eabcbbb8fb Create Date: 2019-10-19 18:12:48.976655 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'f025f89b250b' down_revision = '37eabcbbb8fb' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('df_goods_image', sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), sa.Column('sku', sa.Integer(), nullable=True), sa.Column('image', sa.String(length=255), nullable=True), sa.ForeignKeyConstraint(['sku'], ['df_goods_sku.id'], ), sa.PrimaryKeyConstraint('id') ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('df_goods_image') # ### end Alembic commands ###
25.542857
70
0.684564
from alembic import op import sqlalchemy as sa revision = 'f025f89b250b' down_revision = '37eabcbbb8fb' branch_labels = None depends_on = None def upgrade(): op.create_table('df_goods_image', sa.Column('id', sa.Integer(), autoincrement=True, nullable=False), sa.Column('sku', sa.Integer(), nullable=True), sa.Column('image', sa.String(length=255), nullable=True), sa.ForeignKeyConstraint(['sku'], ['df_goods_sku.id'], ), sa.PrimaryKeyConstraint('id') ) def downgrade(): op.drop_table('df_goods_image')
true
true
f708357a9d0245994baa7acb8be387d0dcb38cf4
2,347
py
Python
databuilder/extractor/dashboard/mode_analytics/mode_dashboard_usage_extractor.py
lynrayjumo/amundsendatabuilder
f1e7e246f8f4f72ad0ef5df3ef52e0e379e078f8
[ "Apache-2.0" ]
null
null
null
databuilder/extractor/dashboard/mode_analytics/mode_dashboard_usage_extractor.py
lynrayjumo/amundsendatabuilder
f1e7e246f8f4f72ad0ef5df3ef52e0e379e078f8
[ "Apache-2.0" ]
1
2020-07-07T08:44:33.000Z
2020-07-07T08:44:33.000Z
databuilder/extractor/dashboard/mode_analytics/mode_dashboard_usage_extractor.py
lynrayjumo/amundsendatabuilder
f1e7e246f8f4f72ad0ef5df3ef52e0e379e078f8
[ "Apache-2.0" ]
1
2020-08-12T09:51:58.000Z
2020-08-12T09:51:58.000Z
import logging from pyhocon import ConfigTree # noqa: F401 from typing import Any # noqa: F401 from databuilder.extractor.base_extractor import Extractor from databuilder.extractor.dashboard.mode_analytics.mode_dashboard_utils import ModeDashboardUtils from databuilder.rest_api.mode_analytics.mode_paginated_rest_api_query import ModePaginatedRestApiQuery from databuilder.rest_api.rest_api_query import RestApiQuery # noqa: F401 LOGGER = logging.getLogger(__name__) class ModeDashboardUsageExtractor(Extractor): """ A Extractor that extracts Mode dashboard's accumulated view count """ def init(self, conf): # type: (ConfigTree) -> None self._conf = conf restapi_query = self._build_restapi_query() self._extractor = ModeDashboardUtils.create_mode_rest_api_extractor(restapi_query=restapi_query, conf=self._conf) def extract(self): # type: () -> Any return self._extractor.extract() def get_scope(self): # type: () -> str return 'extractor.mode_dashboard_usage' def _build_restapi_query(self): """ Build REST API Query. To get Mode Dashboard usage, it needs to call two APIs (spaces API and reports API) joining together. :return: A RestApiQuery that provides Mode Dashboard metadata """ # type: () -> RestApiQuery # https://mode.com/developer/api-reference/analytics/reports/#listReportsInSpace reports_url_template = 'https://app.mode.com/api/{organization}/spaces/{dashboard_group_id}/reports' spaces_query = ModeDashboardUtils.get_spaces_query_api(conf=self._conf) params = ModeDashboardUtils.get_auth_params(conf=self._conf) # Reports # JSONPATH expression. it goes into array which is located in _embedded.reports and then extracts token, # and view_count json_path = '_embedded.reports[*].[token,view_count]' field_names = ['dashboard_id', 'accumulated_view_count'] reports_query = ModePaginatedRestApiQuery(query_to_join=spaces_query, url=reports_url_template, params=params, json_path=json_path, field_names=field_names, skip_no_result=True) return reports_query
39.116667
118
0.688539
import logging from pyhocon import ConfigTree from typing import Any from databuilder.extractor.base_extractor import Extractor from databuilder.extractor.dashboard.mode_analytics.mode_dashboard_utils import ModeDashboardUtils from databuilder.rest_api.mode_analytics.mode_paginated_rest_api_query import ModePaginatedRestApiQuery from databuilder.rest_api.rest_api_query import RestApiQuery LOGGER = logging.getLogger(__name__) class ModeDashboardUsageExtractor(Extractor): def init(self, conf): self._conf = conf restapi_query = self._build_restapi_query() self._extractor = ModeDashboardUtils.create_mode_rest_api_extractor(restapi_query=restapi_query, conf=self._conf) def extract(self): return self._extractor.extract() def get_scope(self): return 'extractor.mode_dashboard_usage' def _build_restapi_query(self): reports_url_template = 'https://app.mode.com/api/{organization}/spaces/{dashboard_group_id}/reports' spaces_query = ModeDashboardUtils.get_spaces_query_api(conf=self._conf) params = ModeDashboardUtils.get_auth_params(conf=self._conf) json_path = '_embedded.reports[*].[token,view_count]' field_names = ['dashboard_id', 'accumulated_view_count'] reports_query = ModePaginatedRestApiQuery(query_to_join=spaces_query, url=reports_url_template, params=params, json_path=json_path, field_names=field_names, skip_no_result=True) return reports_query
true
true
f70835b4ce9ae1fc5c692777609f137cc7b8f0ac
536
py
Python
vehicle/signals.py
COS301-SE-2020/ctrlintelligencecapstone
ddfc92408ed296c6bf64b2dd071b948a1446ede8
[ "MIT" ]
null
null
null
vehicle/signals.py
COS301-SE-2020/ctrlintelligencecapstone
ddfc92408ed296c6bf64b2dd071b948a1446ede8
[ "MIT" ]
null
null
null
vehicle/signals.py
COS301-SE-2020/ctrlintelligencecapstone
ddfc92408ed296c6bf64b2dd071b948a1446ede8
[ "MIT" ]
1
2021-05-18T02:53:10.000Z
2021-05-18T02:53:10.000Z
from django.db.models.signals import post_save from django.dispatch import receiver from .models import Vehicle, ImageSpacec from darknet_dmg import detect @receiver(post_save, sender=Vehicle) def damage_detection(sender, instance, **kwargs): image = ImageSpace.objects.filter(vehicle=instance.id).last() path = image.image.path output = detect.detect(path) print(output) #./darknet detector test data/obj.data cfg/yolov4-obj.cfg /mydrive/yolov4/backup/yolov4-obj_3000.weights /mydrive/images/car2.jpg -thresh 0.3
38.285714
145
0.774254
from django.db.models.signals import post_save from django.dispatch import receiver from .models import Vehicle, ImageSpacec from darknet_dmg import detect @receiver(post_save, sender=Vehicle) def damage_detection(sender, instance, **kwargs): image = ImageSpace.objects.filter(vehicle=instance.id).last() path = image.image.path output = detect.detect(path) print(output)
true
true
f708362f3738f4d4e8d3a496b9a1cf7f11134174
1,981
py
Python
src/webapp/pages/about.py
christianbernasconi96/ProductNetworkAnalysis
4d7a003cc81471f43a918a761c941e2effb7d2a3
[ "MIT" ]
null
null
null
src/webapp/pages/about.py
christianbernasconi96/ProductNetworkAnalysis
4d7a003cc81471f43a918a761c941e2effb7d2a3
[ "MIT" ]
null
null
null
src/webapp/pages/about.py
christianbernasconi96/ProductNetworkAnalysis
4d7a003cc81471f43a918a761c941e2effb7d2a3
[ "MIT" ]
null
null
null
import dash import dash_core_components as dcc import dash_html_components as html about_layout = [html.Div(children=[ html.Img(src='/assets/logo.png', className='logo-big', style={'marginTop': 'auto', 'marginBottom': 'auto'}), html.Div(children='Network and Sentiment Anlysis on Amazon Dataset', className="text-center title", style={'marginTop': '24px'}), html.Div(children='Data Analytics Project', className="text-center subtitle", style={'marginTop': '24px', 'marginBottom': 'auto'}), html.Div(children=[ dcc.Link(children=[ html.Div(children=[ html.Div(className='img-dataset'), html.Div(children='Dataset Exploration'), ], className="zan-box-shadow card-small")], href='/exploration', className="text-decor-none", style={'marginLeft': 'auto'}), dcc.Link(children=[ html.Div(children=[ html.Div(className='img-network'), html.Div(children='Network Analysis'), ], className="zan-box-shadow card-small")], href='/network', className="text-decor-none"), dcc.Link(children=[ html.Div(children=[ html.Div(className='img-sentiment'), html.Div(children='Sentiment Anlysis'), ], className="zan-box-shadow card-small")], href='/sentiment', className="text-decor-none", style={'marginRight': 'auto'}), ], className="flex-row", id='about', style={'marginTop': '24px', 'marginBottom': '24px'}), html.Div(children='Authors: Christian Bernasconi - Gabriele Ferrario - Riccardo Pozzi - Marco Ripamonti', className="text-center caption", style={'marginTop': 'auto', 'marginBottom': '10px'}), html.Div(children='Date: 06/07/2020', className="text-center caption", style={'marginBottom': '10px'}), ], className='flex-column-center p-20')]
60.030303
196
0.597173
import dash import dash_core_components as dcc import dash_html_components as html about_layout = [html.Div(children=[ html.Img(src='/assets/logo.png', className='logo-big', style={'marginTop': 'auto', 'marginBottom': 'auto'}), html.Div(children='Network and Sentiment Anlysis on Amazon Dataset', className="text-center title", style={'marginTop': '24px'}), html.Div(children='Data Analytics Project', className="text-center subtitle", style={'marginTop': '24px', 'marginBottom': 'auto'}), html.Div(children=[ dcc.Link(children=[ html.Div(children=[ html.Div(className='img-dataset'), html.Div(children='Dataset Exploration'), ], className="zan-box-shadow card-small")], href='/exploration', className="text-decor-none", style={'marginLeft': 'auto'}), dcc.Link(children=[ html.Div(children=[ html.Div(className='img-network'), html.Div(children='Network Analysis'), ], className="zan-box-shadow card-small")], href='/network', className="text-decor-none"), dcc.Link(children=[ html.Div(children=[ html.Div(className='img-sentiment'), html.Div(children='Sentiment Anlysis'), ], className="zan-box-shadow card-small")], href='/sentiment', className="text-decor-none", style={'marginRight': 'auto'}), ], className="flex-row", id='about', style={'marginTop': '24px', 'marginBottom': '24px'}), html.Div(children='Authors: Christian Bernasconi - Gabriele Ferrario - Riccardo Pozzi - Marco Ripamonti', className="text-center caption", style={'marginTop': 'auto', 'marginBottom': '10px'}), html.Div(children='Date: 06/07/2020', className="text-center caption", style={'marginBottom': '10px'}), ], className='flex-column-center p-20')]
true
true
f708366b7c8d41d94e7efbd36318e9496e569d45
427
py
Python
backend/athena_dream_stodio_33748/wsgi.py
crowdbotics-apps/athena-dream-stodio-33748
fee18065c2a4f93c88a9c8f951966d443cd1f6a7
[ "FTL", "AML", "RSA-MD" ]
null
null
null
backend/athena_dream_stodio_33748/wsgi.py
crowdbotics-apps/athena-dream-stodio-33748
fee18065c2a4f93c88a9c8f951966d443cd1f6a7
[ "FTL", "AML", "RSA-MD" ]
null
null
null
backend/athena_dream_stodio_33748/wsgi.py
crowdbotics-apps/athena-dream-stodio-33748
fee18065c2a4f93c88a9c8f951966d443cd1f6a7
[ "FTL", "AML", "RSA-MD" ]
null
null
null
""" WSGI config for athena_dream_stodio_33748 project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'athena_dream_stodio_33748.settings') application = get_wsgi_application()
25.117647
85
0.803279
import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'athena_dream_stodio_33748.settings') application = get_wsgi_application()
true
true
f70836887d9943e7d1cb1ae6d5c8c439f35b9265
31,586
py
Python
electrum/interface.py
samedamci/LBRY-Vault
b37b37ce1bc25f8e4b753021978ab9916556137e
[ "MIT" ]
7
2020-03-20T10:20:29.000Z
2021-04-21T12:46:25.000Z
electrum/interface.py
samedamci/LBRY-Vault
b37b37ce1bc25f8e4b753021978ab9916556137e
[ "MIT" ]
2
2021-08-08T03:28:58.000Z
2021-08-23T08:42:37.000Z
electrum/interface.py
samedamci/LBRY-Vault
b37b37ce1bc25f8e4b753021978ab9916556137e
[ "MIT" ]
3
2020-12-16T02:14:54.000Z
2021-04-21T18:35:25.000Z
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2011 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import os import re import ssl import sys import traceback import asyncio import socket from typing import Tuple, Union, List, TYPE_CHECKING, Optional, Set from collections import defaultdict from ipaddress import IPv4Network, IPv6Network, ip_address, IPv6Address import itertools import logging import aiorpcx from aiorpcx import RPCSession, Notification, NetAddress, NewlineFramer from aiorpcx.curio import timeout_after, TaskTimeout from aiorpcx.jsonrpc import JSONRPC, CodeMessageError from aiorpcx.rawsocket import RSClient import certifi from .util import ignore_exceptions, log_exceptions, bfh, SilentTaskGroup from . import util from . import x509 from . import pem from . import version from . import blockchain from .blockchain import Blockchain from . import constants from .i18n import _ from .logging import Logger if TYPE_CHECKING: from .network import Network from .simple_config import SimpleConfig ca_path = certifi.where() BUCKET_NAME_OF_ONION_SERVERS = 'onion' MAX_INCOMING_MSG_SIZE = 1_000_000 # in bytes class NetworkTimeout: # seconds class Generic: NORMAL = 30 RELAXED = 45 MOST_RELAXED = 180 class Urgent(Generic): NORMAL = 10 RELAXED = 20 MOST_RELAXED = 60 class NotificationSession(RPCSession): def __init__(self, *args, **kwargs): super(NotificationSession, self).__init__(*args, **kwargs) self.subscriptions = defaultdict(list) self.cache = {} self.default_timeout = NetworkTimeout.Generic.NORMAL self._msg_counter = itertools.count(start=1) self.interface = None # type: Optional[Interface] self.cost_hard_limit = 0 # disable aiorpcx resource limits async def handle_request(self, request): self.maybe_log(f"--> {request}") try: if isinstance(request, Notification): params, result = request.args[:-1], request.args[-1] key = self.get_hashable_key_for_rpc_call(request.method, params) if key in self.subscriptions: self.cache[key] = result for queue in self.subscriptions[key]: await queue.put(request.args) else: raise Exception(f'unexpected notification') else: raise Exception(f'unexpected request. not a notification') except Exception as e: self.interface.logger.info(f"error handling request {request}. exc: {repr(e)}") await self.close() async def send_request(self, *args, timeout=None, **kwargs): # note: semaphores/timeouts/backpressure etc are handled by # aiorpcx. the timeout arg here in most cases should not be set msg_id = next(self._msg_counter) self.maybe_log(f"<-- {args} {kwargs} (id: {msg_id})") try: # note: RPCSession.send_request raises TaskTimeout in case of a timeout. # TaskTimeout is a subclass of CancelledError, which is *suppressed* in TaskGroups response = await asyncio.wait_for( super().send_request(*args, **kwargs), timeout) except (TaskTimeout, asyncio.TimeoutError) as e: raise RequestTimedOut(f'request timed out: {args} (id: {msg_id})') from e except CodeMessageError as e: self.maybe_log(f"--> {repr(e)} (id: {msg_id})") raise else: self.maybe_log(f"--> {response} (id: {msg_id})") return response def set_default_timeout(self, timeout): self.sent_request_timeout = timeout self.max_send_delay = timeout async def subscribe(self, method: str, params: List, queue: asyncio.Queue): # note: until the cache is written for the first time, # each 'subscribe' call might make a request on the network. key = self.get_hashable_key_for_rpc_call(method, params) self.subscriptions[key].append(queue) if key in self.cache: result = self.cache[key] else: result = await self.send_request(method, params) self.cache[key] = result await queue.put(params + [result]) def unsubscribe(self, queue): """Unsubscribe a callback to free object references to enable GC.""" # note: we can't unsubscribe from the server, so we keep receiving # subsequent notifications for v in self.subscriptions.values(): if queue in v: v.remove(queue) @classmethod def get_hashable_key_for_rpc_call(cls, method, params): """Hashable index for subscriptions and cache""" return str(method) + repr(params) def maybe_log(self, msg: str) -> None: if not self.interface: return if self.interface.debug or self.interface.network.debug: self.interface.logger.debug(msg) def default_framer(self): # overridden so that max_size can be customized return NewlineFramer(max_size=MAX_INCOMING_MSG_SIZE) class NetworkException(Exception): pass class GracefulDisconnect(NetworkException): log_level = logging.INFO def __init__(self, *args, log_level=None, **kwargs): Exception.__init__(self, *args, **kwargs) if log_level is not None: self.log_level = log_level class RequestTimedOut(GracefulDisconnect): def __str__(self): return _("Network request timed out.") class RequestCorrupted(GracefulDisconnect): pass class ErrorParsingSSLCert(Exception): pass class ErrorGettingSSLCertFromServer(Exception): pass class ConnectError(NetworkException): pass class _RSClient(RSClient): async def create_connection(self): try: return await super().create_connection() except OSError as e: # note: using "from e" here will set __cause__ of ConnectError raise ConnectError(e) from e def deserialize_server(server_str: str) -> Tuple[str, str, str]: # host might be IPv6 address, hence do rsplit: host, port, protocol = str(server_str).rsplit(':', 2) if not host: raise ValueError('host must not be empty') if host[0] == '[' and host[-1] == ']': # IPv6 host = host[1:-1] if protocol not in ('s', 't'): raise ValueError('invalid network protocol: {}'.format(protocol)) net_addr = NetAddress(host, port) # this validates host and port host = str(net_addr.host) # canonical form (if e.g. IPv6 address) return host, port, protocol def serialize_server(host: str, port: Union[str, int], protocol: str) -> str: return str(':'.join([host, str(port), protocol])) def _get_cert_path_for_host(*, config: 'SimpleConfig', host: str) -> str: filename = host try: ip = ip_address(host) except ValueError: pass else: if isinstance(ip, IPv6Address): filename = f"ipv6_{ip.packed.hex()}" return os.path.join(config.path, 'certs', filename) class Interface(Logger): LOGGING_SHORTCUT = 'i' def __init__(self, network: 'Network', server: str, proxy: Optional[dict]): self.ready = asyncio.Future() self.got_disconnected = asyncio.Future() self.server = server self.host, self.port, self.protocol = deserialize_server(self.server) self.port = int(self.port) Logger.__init__(self) assert network.config.path self.cert_path = _get_cert_path_for_host(config=network.config, host=self.host) self.blockchain = None # type: Optional[Blockchain] self._requested_chunks = set() # type: Set[int] self.network = network self._set_proxy(proxy) self.session = None # type: Optional[NotificationSession] self._ipaddr_bucket = None self.tip_header = None self.tip = 0 # Dump network messages (only for this interface). Set at runtime from the console. self.debug = False asyncio.run_coroutine_threadsafe( self.network.main_taskgroup.spawn(self.run()), self.network.asyncio_loop) self.group = SilentTaskGroup() def diagnostic_name(self): return str(NetAddress(self.host, self.port)) def __str__(self): return f"<Interface {self.diagnostic_name()}>" def _set_proxy(self, proxy: dict): if proxy: username, pw = proxy.get('user'), proxy.get('password') if not username or not pw: auth = None else: auth = aiorpcx.socks.SOCKSUserAuth(username, pw) addr = NetAddress(proxy['host'], proxy['port']) if proxy['mode'] == "socks4": self.proxy = aiorpcx.socks.SOCKSProxy(addr, aiorpcx.socks.SOCKS4a, auth) elif proxy['mode'] == "socks5": self.proxy = aiorpcx.socks.SOCKSProxy(addr, aiorpcx.socks.SOCKS5, auth) else: raise NotImplementedError # http proxy not available with aiorpcx else: self.proxy = None async def is_server_ca_signed(self, ca_ssl_context): """Given a CA enforcing SSL context, returns True if the connection can be established. Returns False if the server has a self-signed certificate but otherwise is okay. Any other failures raise. """ try: await self.open_session(ca_ssl_context, exit_early=True) except ConnectError as e: cause = e.__cause__ if isinstance(cause, ssl.SSLError) and cause.reason == 'CERTIFICATE_VERIFY_FAILED': # failures due to self-signed certs are normal return False raise return True async def _try_saving_ssl_cert_for_first_time(self, ca_ssl_context): ca_signed = await self.is_server_ca_signed(ca_ssl_context) if ca_signed: with open(self.cert_path, 'w') as f: # empty file means this is CA signed, not self-signed f.write('') else: await self.save_certificate() def _is_saved_ssl_cert_available(self): if not os.path.exists(self.cert_path): return False with open(self.cert_path, 'r') as f: contents = f.read() if contents == '': # CA signed return True # pinned self-signed cert try: b = pem.dePem(contents, 'CERTIFICATE') except SyntaxError as e: self.logger.info(f"error parsing already saved cert: {e}") raise ErrorParsingSSLCert(e) from e try: x = x509.X509(b) except Exception as e: self.logger.info(f"error parsing already saved cert: {e}") raise ErrorParsingSSLCert(e) from e try: x.check_date() return True except x509.CertificateError as e: self.logger.info(f"certificate has expired: {e}") os.unlink(self.cert_path) # delete pinned cert only in this case return False async def _get_ssl_context(self): if self.protocol != 's': # using plaintext TCP return None # see if we already have cert for this server; or get it for the first time ca_sslc = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_path) if not self._is_saved_ssl_cert_available(): try: await self._try_saving_ssl_cert_for_first_time(ca_sslc) except (OSError, ConnectError, aiorpcx.socks.SOCKSError) as e: raise ErrorGettingSSLCertFromServer(e) from e # now we have a file saved in our certificate store siz = os.stat(self.cert_path).st_size if siz == 0: # CA signed cert sslc = ca_sslc else: # pinned self-signed cert sslc = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=self.cert_path) sslc.check_hostname = 0 return sslc def handle_disconnect(func): async def wrapper_func(self: 'Interface', *args, **kwargs): try: return await func(self, *args, **kwargs) except GracefulDisconnect as e: self.logger.log(e.log_level, f"disconnecting due to {repr(e)}") except aiorpcx.jsonrpc.RPCError as e: self.logger.warning(f"disconnecting due to {repr(e)}") self.logger.debug(f"(disconnect) trace for {repr(e)}", exc_info=True) finally: await self.network.connection_down(self) if not self.got_disconnected.done(): self.got_disconnected.set_result(1) # if was not 'ready' yet, schedule waiting coroutines: self.ready.cancel() return wrapper_func @ignore_exceptions # do not kill main_taskgroup @log_exceptions @handle_disconnect async def run(self): try: ssl_context = await self._get_ssl_context() except (ErrorParsingSSLCert, ErrorGettingSSLCertFromServer) as e: self.logger.info(f'disconnecting due to: {repr(e)}') return try: await self.open_session(ssl_context) except (asyncio.CancelledError, ConnectError, aiorpcx.socks.SOCKSError) as e: # make SSL errors for main interface more visible (to help servers ops debug cert pinning issues) if (isinstance(e, ConnectError) and isinstance(e.__cause__, ssl.SSLError) and self.is_main_server() and not self.network.auto_connect): self.logger.warning(f'Cannot connect to main server due to SSL error ' f'(maybe cert changed compared to "{self.cert_path}"). Exc: {repr(e)}') else: self.logger.info(f'disconnecting due to: {repr(e)}') return def _mark_ready(self) -> None: if self.ready.cancelled(): raise GracefulDisconnect('conn establishment was too slow; *ready* future was cancelled') if self.ready.done(): return assert self.tip_header chain = blockchain.check_header(self.tip_header) if not chain: self.blockchain = blockchain.get_best_chain() else: self.blockchain = chain assert self.blockchain is not None self.logger.info(f"set blockchain with height {self.blockchain.height()}") self.ready.set_result(1) async def save_certificate(self): if not os.path.exists(self.cert_path): # we may need to retry this a few times, in case the handshake hasn't completed for _ in range(10): dercert = await self.get_certificate() if dercert: self.logger.info("succeeded in getting cert") with open(self.cert_path, 'w') as f: cert = ssl.DER_cert_to_PEM_cert(dercert) # workaround android bug cert = re.sub("([^\n])-----END CERTIFICATE-----","\\1\n-----END CERTIFICATE-----",cert) f.write(cert) # even though close flushes we can't fsync when closed. # and we must flush before fsyncing, cause flush flushes to OS buffer # fsync writes to OS buffer to disk f.flush() os.fsync(f.fileno()) break await asyncio.sleep(1) else: raise GracefulDisconnect("could not get certificate after 10 tries") async def get_certificate(self): sslc = ssl.SSLContext() try: async with _RSClient(session_factory=RPCSession, host=self.host, port=self.port, ssl=sslc, proxy=self.proxy) as session: return session.transport._asyncio_transport._ssl_protocol._sslpipe._sslobj.getpeercert(True) except ValueError: return None async def get_block_header(self, height, assert_mode): self.logger.info(f'requesting block header {height} in mode {assert_mode}') # use lower timeout as we usually have network.bhi_lock here timeout = self.network.get_network_timeout_seconds(NetworkTimeout.Urgent) res = await self.session.send_request('blockchain.block.headers', [height,1], timeout=timeout) return blockchain.deserialize_header(bytes.fromhex(res['hex']), height) async def request_chunk(self, height: int, tip=None, *, can_return_early=False): index = height // 2016 if can_return_early and index in self._requested_chunks: return self.logger.info(f"requesting chunk from height {height}") size = 2016 if tip is not None: size = min(size, tip - index * 2016 + 1) size = max(size, 0) try: self._requested_chunks.add(index) res = await self.session.send_request('blockchain.block.headers', [index * 2016, size]) finally: self._requested_chunks.discard(index) conn = self.blockchain.connect_chunk(index, res['hex']) if not conn: return conn, 0 return conn, res['count'] def is_main_server(self) -> bool: return self.network.default_server == self.server async def open_session(self, sslc, exit_early=False): async with _RSClient(session_factory=NotificationSession, host=self.host, port=self.port, ssl=sslc, proxy=self.proxy) as session: self.session = session # type: NotificationSession self.session.interface = self self.session.set_default_timeout(self.network.get_network_timeout_seconds(NetworkTimeout.Generic)) try: ver = await session.send_request('server.version', [self.client_name(), version.PROTOCOL_VERSION]) except aiorpcx.jsonrpc.RPCError as e: raise GracefulDisconnect(e) # probably 'unsupported protocol version' if exit_early: return if not self.network.check_interface_against_healthy_spread_of_connected_servers(self): raise GracefulDisconnect(f'too many connected servers already ' f'in bucket {self.bucket_based_on_ipaddress()}') self.logger.info(f"connection established. version: {ver}") try: async with self.group as group: await group.spawn(self.ping) await group.spawn(self.run_fetch_blocks) await group.spawn(self.monitor_connection) except aiorpcx.jsonrpc.RPCError as e: if e.code in (JSONRPC.EXCESSIVE_RESOURCE_USAGE, JSONRPC.SERVER_BUSY, JSONRPC.METHOD_NOT_FOUND): raise GracefulDisconnect(e, log_level=logging.WARNING) from e raise async def monitor_connection(self): while True: await asyncio.sleep(1) if not self.session or self.session.is_closing(): raise GracefulDisconnect('session was closed') async def ping(self): while True: await asyncio.sleep(300) await self.session.send_request('server.ping') async def close(self): if self.session: await self.session.close() # monitor_connection will cancel tasks async def run_fetch_blocks(self): header_queue = asyncio.Queue() await self.session.subscribe('blockchain.headers.subscribe', [True], header_queue) while True: item = await header_queue.get() print(item) raw_header = item[1] print(raw_header) height = raw_header['height'] header = blockchain.deserialize_header(bfh(raw_header['hex']), height) self.tip_header = header self.tip = height if self.tip < constants.net.max_checkpoint(): raise GracefulDisconnect('server tip below max checkpoint') self._mark_ready() await self._process_header_at_tip() self.network.trigger_callback('network_updated') await self.network.switch_unwanted_fork_interface() await self.network.switch_lagging_interface() async def _process_header_at_tip(self): height, header = self.tip, self.tip_header async with self.network.bhi_lock: if self.blockchain.height() >= height and self.blockchain.check_header(header): # another interface amended the blockchain self.logger.info(f"skipping header {height}") return _, height = await self.step(height, header) # in the simple case, height == self.tip+1 if height <= self.tip: await self.sync_until(height) self.network.trigger_callback('blockchain_updated') async def sync_until(self, height, next_height=None): if next_height is None: next_height = self.tip last = None while last is None or height <= next_height: prev_last, prev_height = last, height if next_height > height + 10: could_connect, num_headers = await self.request_chunk(height, next_height) if not could_connect: if height <= constants.net.max_checkpoint(): raise GracefulDisconnect('server chain conflicts with checkpoints or genesis') last, height = await self.step(height) continue self.network.trigger_callback('network_updated') height = (height // 2016 * 2016) + num_headers assert height <= next_height+1, (height, self.tip) last = 'catchup' else: last, height = await self.step(height) assert (prev_last, prev_height) != (last, height), 'had to prevent infinite loop in interface.sync_until' return last, height async def step(self, height, header=None): assert 0 <= height <= self.tip, (height, self.tip) if header is None: header = await self.get_block_header(height, 'catchup') chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header) if chain: self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain # note: there is an edge case here that is not handled. # we might know the blockhash (enough for check_header) but # not have the header itself. e.g. regtest chain with only genesis. # this situation resolves itself on the next block return 'catchup', height+1 can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height) if not can_connect: self.logger.info(f"can't connect {height}") height, header, bad, bad_header = await self._search_headers_backwards(height, header) chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header) can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height) assert chain or can_connect if can_connect: self.logger.info(f"could connect {height}") height += 1 if isinstance(can_connect, Blockchain): # not when mocking self.blockchain = can_connect self.blockchain.save_header(header) return 'catchup', height good, bad, bad_header = await self._search_headers_binary(height, bad, bad_header, chain) return await self._resolve_potential_chain_fork_given_forkpoint(good, bad, bad_header) async def _search_headers_binary(self, height, bad, bad_header, chain): assert bad == bad_header['block_height'] _assert_header_does_not_check_against_any_chain(bad_header) self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain good = height while True: assert good < bad, (good, bad) height = (good + bad) // 2 self.logger.info(f"binary step. good {good}, bad {bad}, height {height}") header = await self.get_block_header(height, 'binary') chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header) if chain: self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain good = height else: bad = height bad_header = header if good + 1 == bad: break mock = 'mock' in bad_header and bad_header['mock']['connect'](height) real = not mock and self.blockchain.can_connect(bad_header, check_height=False) if not real and not mock: raise Exception('unexpected bad header during binary: {}'.format(bad_header)) _assert_header_does_not_check_against_any_chain(bad_header) self.logger.info(f"binary search exited. good {good}, bad {bad}") return good, bad, bad_header async def _resolve_potential_chain_fork_given_forkpoint(self, good, bad, bad_header): assert good + 1 == bad assert bad == bad_header['block_height'] _assert_header_does_not_check_against_any_chain(bad_header) # 'good' is the height of a block 'good_header', somewhere in self.blockchain. # bad_header connects to good_header; bad_header itself is NOT in self.blockchain. bh = self.blockchain.height() assert bh >= good, (bh, good) if bh == good: height = good + 1 self.logger.info(f"catching up from {height}") return 'no_fork', height # this is a new fork we don't yet have height = bad + 1 self.logger.info(f"new fork at bad height {bad}") forkfun = self.blockchain.fork if 'mock' not in bad_header else bad_header['mock']['fork'] b = forkfun(bad_header) # type: Blockchain self.blockchain = b assert b.forkpoint == bad return 'fork', height async def _search_headers_backwards(self, height, header): async def iterate(): nonlocal height, header checkp = False if height <= constants.net.max_checkpoint(): height = constants.net.max_checkpoint() checkp = True header = await self.get_block_header(height, 'backward') chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header) can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height) if chain or can_connect: return False if checkp: raise GracefulDisconnect("server chain conflicts with checkpoints") return True bad, bad_header = height, header _assert_header_does_not_check_against_any_chain(bad_header) with blockchain.blockchains_lock: chains = list(blockchain.blockchains.values()) local_max = max([0] + [x.height() for x in chains]) if 'mock' not in header else float('inf') height = min(local_max + 1, height - 1) while await iterate(): bad, bad_header = height, header delta = self.tip - height height = self.tip - 2 * delta _assert_header_does_not_check_against_any_chain(bad_header) self.logger.info(f"exiting backward mode at {height}") return height, header, bad, bad_header @classmethod def client_name(cls) -> str: return f'electrum/{version.ELECTRUM_VERSION}' def is_tor(self): return self.host.endswith('.onion') def ip_addr(self) -> Optional[str]: session = self.session if not session: return None peer_addr = session.remote_address() if not peer_addr: return None return str(peer_addr.host) def bucket_based_on_ipaddress(self) -> str: def do_bucket(): if self.is_tor(): return BUCKET_NAME_OF_ONION_SERVERS try: ip_addr = ip_address(self.ip_addr()) except ValueError: return '' if not ip_addr: return '' if ip_addr.version == 4: slash16 = IPv4Network(ip_addr).supernet(prefixlen_diff=32-16) return str(slash16) elif ip_addr.version == 6: slash48 = IPv6Network(ip_addr).supernet(prefixlen_diff=128-48) return str(slash48) return '' if not self._ipaddr_bucket: self._ipaddr_bucket = do_bucket() return self._ipaddr_bucket def _assert_header_does_not_check_against_any_chain(header: dict) -> None: chain_bad = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header) if chain_bad: raise Exception('bad_header must not check!') def check_cert(host, cert): try: b = pem.dePem(cert, 'CERTIFICATE') x = x509.X509(b) except: traceback.print_exc(file=sys.stdout) return try: x.check_date() expired = False except: expired = True m = "host: %s\n"%host m += "has_expired: %s\n"% expired util.print_msg(m) # Used by tests def _match_hostname(name, val): if val == name: return True return val.startswith('*.') and name.endswith(val[1:]) def test_certificates(): from .simple_config import SimpleConfig config = SimpleConfig() mydir = os.path.join(config.path, "certs") certs = os.listdir(mydir) for c in certs: p = os.path.join(mydir,c) with open(p, encoding='utf-8') as f: cert = f.read() check_cert(c, cert) if __name__ == "__main__": test_certificates()
40.914508
119
0.62151
import os import re import ssl import sys import traceback import asyncio import socket from typing import Tuple, Union, List, TYPE_CHECKING, Optional, Set from collections import defaultdict from ipaddress import IPv4Network, IPv6Network, ip_address, IPv6Address import itertools import logging import aiorpcx from aiorpcx import RPCSession, Notification, NetAddress, NewlineFramer from aiorpcx.curio import timeout_after, TaskTimeout from aiorpcx.jsonrpc import JSONRPC, CodeMessageError from aiorpcx.rawsocket import RSClient import certifi from .util import ignore_exceptions, log_exceptions, bfh, SilentTaskGroup from . import util from . import x509 from . import pem from . import version from . import blockchain from .blockchain import Blockchain from . import constants from .i18n import _ from .logging import Logger if TYPE_CHECKING: from .network import Network from .simple_config import SimpleConfig ca_path = certifi.where() BUCKET_NAME_OF_ONION_SERVERS = 'onion' MAX_INCOMING_MSG_SIZE = 1_000_000 class NetworkTimeout: class Generic: NORMAL = 30 RELAXED = 45 MOST_RELAXED = 180 class Urgent(Generic): NORMAL = 10 RELAXED = 20 MOST_RELAXED = 60 class NotificationSession(RPCSession): def __init__(self, *args, **kwargs): super(NotificationSession, self).__init__(*args, **kwargs) self.subscriptions = defaultdict(list) self.cache = {} self.default_timeout = NetworkTimeout.Generic.NORMAL self._msg_counter = itertools.count(start=1) self.interface = None self.cost_hard_limit = 0 async def handle_request(self, request): self.maybe_log(f"--> {request}") try: if isinstance(request, Notification): params, result = request.args[:-1], request.args[-1] key = self.get_hashable_key_for_rpc_call(request.method, params) if key in self.subscriptions: self.cache[key] = result for queue in self.subscriptions[key]: await queue.put(request.args) else: raise Exception(f'unexpected notification') else: raise Exception(f'unexpected request. not a notification') except Exception as e: self.interface.logger.info(f"error handling request {request}. exc: {repr(e)}") await self.close() async def send_request(self, *args, timeout=None, **kwargs): msg_id = next(self._msg_counter) self.maybe_log(f"<-- {args} {kwargs} (id: {msg_id})") try: response = await asyncio.wait_for( super().send_request(*args, **kwargs), timeout) except (TaskTimeout, asyncio.TimeoutError) as e: raise RequestTimedOut(f'request timed out: {args} (id: {msg_id})') from e except CodeMessageError as e: self.maybe_log(f"--> {repr(e)} (id: {msg_id})") raise else: self.maybe_log(f"--> {response} (id: {msg_id})") return response def set_default_timeout(self, timeout): self.sent_request_timeout = timeout self.max_send_delay = timeout async def subscribe(self, method: str, params: List, queue: asyncio.Queue): key = self.get_hashable_key_for_rpc_call(method, params) self.subscriptions[key].append(queue) if key in self.cache: result = self.cache[key] else: result = await self.send_request(method, params) self.cache[key] = result await queue.put(params + [result]) def unsubscribe(self, queue): # subsequent notifications for v in self.subscriptions.values(): if queue in v: v.remove(queue) @classmethod def get_hashable_key_for_rpc_call(cls, method, params): return str(method) + repr(params) def maybe_log(self, msg: str) -> None: if not self.interface: return if self.interface.debug or self.interface.network.debug: self.interface.logger.debug(msg) def default_framer(self): # overridden so that max_size can be customized return NewlineFramer(max_size=MAX_INCOMING_MSG_SIZE) class NetworkException(Exception): pass class GracefulDisconnect(NetworkException): log_level = logging.INFO def __init__(self, *args, log_level=None, **kwargs): Exception.__init__(self, *args, **kwargs) if log_level is not None: self.log_level = log_level class RequestTimedOut(GracefulDisconnect): def __str__(self): return _("Network request timed out.") class RequestCorrupted(GracefulDisconnect): pass class ErrorParsingSSLCert(Exception): pass class ErrorGettingSSLCertFromServer(Exception): pass class ConnectError(NetworkException): pass class _RSClient(RSClient): async def create_connection(self): try: return await super().create_connection() except OSError as e: # note: using "from e" here will set __cause__ of ConnectError raise ConnectError(e) from e def deserialize_server(server_str: str) -> Tuple[str, str, str]: # host might be IPv6 address, hence do rsplit: host, port, protocol = str(server_str).rsplit(':', 2) if not host: raise ValueError('host must not be empty') if host[0] == '[' and host[-1] == ']': # IPv6 host = host[1:-1] if protocol not in ('s', 't'): raise ValueError('invalid network protocol: {}'.format(protocol)) net_addr = NetAddress(host, port) # this validates host and port host = str(net_addr.host) # canonical form (if e.g. IPv6 address) return host, port, protocol def serialize_server(host: str, port: Union[str, int], protocol: str) -> str: return str(':'.join([host, str(port), protocol])) def _get_cert_path_for_host(*, config: 'SimpleConfig', host: str) -> str: filename = host try: ip = ip_address(host) except ValueError: pass else: if isinstance(ip, IPv6Address): filename = f"ipv6_{ip.packed.hex()}" return os.path.join(config.path, 'certs', filename) class Interface(Logger): LOGGING_SHORTCUT = 'i' def __init__(self, network: 'Network', server: str, proxy: Optional[dict]): self.ready = asyncio.Future() self.got_disconnected = asyncio.Future() self.server = server self.host, self.port, self.protocol = deserialize_server(self.server) self.port = int(self.port) Logger.__init__(self) assert network.config.path self.cert_path = _get_cert_path_for_host(config=network.config, host=self.host) self.blockchain = None # type: Optional[Blockchain] self._requested_chunks = set() # type: Set[int] self.network = network self._set_proxy(proxy) self.session = None # type: Optional[NotificationSession] self._ipaddr_bucket = None self.tip_header = None self.tip = 0 # Dump network messages (only for this interface). Set at runtime from the console. self.debug = False asyncio.run_coroutine_threadsafe( self.network.main_taskgroup.spawn(self.run()), self.network.asyncio_loop) self.group = SilentTaskGroup() def diagnostic_name(self): return str(NetAddress(self.host, self.port)) def __str__(self): return f"<Interface {self.diagnostic_name()}>" def _set_proxy(self, proxy: dict): if proxy: username, pw = proxy.get('user'), proxy.get('password') if not username or not pw: auth = None else: auth = aiorpcx.socks.SOCKSUserAuth(username, pw) addr = NetAddress(proxy['host'], proxy['port']) if proxy['mode'] == "socks4": self.proxy = aiorpcx.socks.SOCKSProxy(addr, aiorpcx.socks.SOCKS4a, auth) elif proxy['mode'] == "socks5": self.proxy = aiorpcx.socks.SOCKSProxy(addr, aiorpcx.socks.SOCKS5, auth) else: raise NotImplementedError # http proxy not available with aiorpcx else: self.proxy = None async def is_server_ca_signed(self, ca_ssl_context): try: await self.open_session(ca_ssl_context, exit_early=True) except ConnectError as e: cause = e.__cause__ if isinstance(cause, ssl.SSLError) and cause.reason == 'CERTIFICATE_VERIFY_FAILED': # failures due to self-signed certs are normal return False raise return True async def _try_saving_ssl_cert_for_first_time(self, ca_ssl_context): ca_signed = await self.is_server_ca_signed(ca_ssl_context) if ca_signed: with open(self.cert_path, 'w') as f: # empty file means this is CA signed, not self-signed f.write('') else: await self.save_certificate() def _is_saved_ssl_cert_available(self): if not os.path.exists(self.cert_path): return False with open(self.cert_path, 'r') as f: contents = f.read() if contents == '': # CA signed return True # pinned self-signed cert try: b = pem.dePem(contents, 'CERTIFICATE') except SyntaxError as e: self.logger.info(f"error parsing already saved cert: {e}") raise ErrorParsingSSLCert(e) from e try: x = x509.X509(b) except Exception as e: self.logger.info(f"error parsing already saved cert: {e}") raise ErrorParsingSSLCert(e) from e try: x.check_date() return True except x509.CertificateError as e: self.logger.info(f"certificate has expired: {e}") os.unlink(self.cert_path) # delete pinned cert only in this case return False async def _get_ssl_context(self): if self.protocol != 's': # using plaintext TCP return None # see if we already have cert for this server; or get it for the first time ca_sslc = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=ca_path) if not self._is_saved_ssl_cert_available(): try: await self._try_saving_ssl_cert_for_first_time(ca_sslc) except (OSError, ConnectError, aiorpcx.socks.SOCKSError) as e: raise ErrorGettingSSLCertFromServer(e) from e # now we have a file saved in our certificate store siz = os.stat(self.cert_path).st_size if siz == 0: # CA signed cert sslc = ca_sslc else: # pinned self-signed cert sslc = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=self.cert_path) sslc.check_hostname = 0 return sslc def handle_disconnect(func): async def wrapper_func(self: 'Interface', *args, **kwargs): try: return await func(self, *args, **kwargs) except GracefulDisconnect as e: self.logger.log(e.log_level, f"disconnecting due to {repr(e)}") except aiorpcx.jsonrpc.RPCError as e: self.logger.warning(f"disconnecting due to {repr(e)}") self.logger.debug(f"(disconnect) trace for {repr(e)}", exc_info=True) finally: await self.network.connection_down(self) if not self.got_disconnected.done(): self.got_disconnected.set_result(1) # if was not 'ready' yet, schedule waiting coroutines: self.ready.cancel() return wrapper_func @ignore_exceptions # do not kill main_taskgroup @log_exceptions @handle_disconnect async def run(self): try: ssl_context = await self._get_ssl_context() except (ErrorParsingSSLCert, ErrorGettingSSLCertFromServer) as e: self.logger.info(f'disconnecting due to: {repr(e)}') return try: await self.open_session(ssl_context) except (asyncio.CancelledError, ConnectError, aiorpcx.socks.SOCKSError) as e: # make SSL errors for main interface more visible (to help servers ops debug cert pinning issues) if (isinstance(e, ConnectError) and isinstance(e.__cause__, ssl.SSLError) and self.is_main_server() and not self.network.auto_connect): self.logger.warning(f'Cannot connect to main server due to SSL error ' f'(maybe cert changed compared to "{self.cert_path}"). Exc: {repr(e)}') else: self.logger.info(f'disconnecting due to: {repr(e)}') return def _mark_ready(self) -> None: if self.ready.cancelled(): raise GracefulDisconnect('conn establishment was too slow; *ready* future was cancelled') if self.ready.done(): return assert self.tip_header chain = blockchain.check_header(self.tip_header) if not chain: self.blockchain = blockchain.get_best_chain() else: self.blockchain = chain assert self.blockchain is not None self.logger.info(f"set blockchain with height {self.blockchain.height()}") self.ready.set_result(1) async def save_certificate(self): if not os.path.exists(self.cert_path): # we may need to retry this a few times, in case the handshake hasn't completed for _ in range(10): dercert = await self.get_certificate() if dercert: self.logger.info("succeeded in getting cert") with open(self.cert_path, 'w') as f: cert = ssl.DER_cert_to_PEM_cert(dercert) cert = re.sub("([^\n])-----END CERTIFICATE-----","\\1\n-----END CERTIFICATE-----",cert) f.write(cert) # and we must flush before fsyncing, cause flush flushes to OS buffer # fsync writes to OS buffer to disk f.flush() os.fsync(f.fileno()) break await asyncio.sleep(1) else: raise GracefulDisconnect("could not get certificate after 10 tries") async def get_certificate(self): sslc = ssl.SSLContext() try: async with _RSClient(session_factory=RPCSession, host=self.host, port=self.port, ssl=sslc, proxy=self.proxy) as session: return session.transport._asyncio_transport._ssl_protocol._sslpipe._sslobj.getpeercert(True) except ValueError: return None async def get_block_header(self, height, assert_mode): self.logger.info(f'requesting block header {height} in mode {assert_mode}') # use lower timeout as we usually have network.bhi_lock here timeout = self.network.get_network_timeout_seconds(NetworkTimeout.Urgent) res = await self.session.send_request('blockchain.block.headers', [height,1], timeout=timeout) return blockchain.deserialize_header(bytes.fromhex(res['hex']), height) async def request_chunk(self, height: int, tip=None, *, can_return_early=False): index = height // 2016 if can_return_early and index in self._requested_chunks: return self.logger.info(f"requesting chunk from height {height}") size = 2016 if tip is not None: size = min(size, tip - index * 2016 + 1) size = max(size, 0) try: self._requested_chunks.add(index) res = await self.session.send_request('blockchain.block.headers', [index * 2016, size]) finally: self._requested_chunks.discard(index) conn = self.blockchain.connect_chunk(index, res['hex']) if not conn: return conn, 0 return conn, res['count'] def is_main_server(self) -> bool: return self.network.default_server == self.server async def open_session(self, sslc, exit_early=False): async with _RSClient(session_factory=NotificationSession, host=self.host, port=self.port, ssl=sslc, proxy=self.proxy) as session: self.session = session # type: NotificationSession self.session.interface = self self.session.set_default_timeout(self.network.get_network_timeout_seconds(NetworkTimeout.Generic)) try: ver = await session.send_request('server.version', [self.client_name(), version.PROTOCOL_VERSION]) except aiorpcx.jsonrpc.RPCError as e: raise GracefulDisconnect(e) # probably 'unsupported protocol version' if exit_early: return if not self.network.check_interface_against_healthy_spread_of_connected_servers(self): raise GracefulDisconnect(f'too many connected servers already ' f'in bucket {self.bucket_based_on_ipaddress()}') self.logger.info(f"connection established. version: {ver}") try: async with self.group as group: await group.spawn(self.ping) await group.spawn(self.run_fetch_blocks) await group.spawn(self.monitor_connection) except aiorpcx.jsonrpc.RPCError as e: if e.code in (JSONRPC.EXCESSIVE_RESOURCE_USAGE, JSONRPC.SERVER_BUSY, JSONRPC.METHOD_NOT_FOUND): raise GracefulDisconnect(e, log_level=logging.WARNING) from e raise async def monitor_connection(self): while True: await asyncio.sleep(1) if not self.session or self.session.is_closing(): raise GracefulDisconnect('session was closed') async def ping(self): while True: await asyncio.sleep(300) await self.session.send_request('server.ping') async def close(self): if self.session: await self.session.close() # monitor_connection will cancel tasks async def run_fetch_blocks(self): header_queue = asyncio.Queue() await self.session.subscribe('blockchain.headers.subscribe', [True], header_queue) while True: item = await header_queue.get() print(item) raw_header = item[1] print(raw_header) height = raw_header['height'] header = blockchain.deserialize_header(bfh(raw_header['hex']), height) self.tip_header = header self.tip = height if self.tip < constants.net.max_checkpoint(): raise GracefulDisconnect('server tip below max checkpoint') self._mark_ready() await self._process_header_at_tip() self.network.trigger_callback('network_updated') await self.network.switch_unwanted_fork_interface() await self.network.switch_lagging_interface() async def _process_header_at_tip(self): height, header = self.tip, self.tip_header async with self.network.bhi_lock: if self.blockchain.height() >= height and self.blockchain.check_header(header): # another interface amended the blockchain self.logger.info(f"skipping header {height}") return _, height = await self.step(height, header) # in the simple case, height == self.tip+1 if height <= self.tip: await self.sync_until(height) self.network.trigger_callback('blockchain_updated') async def sync_until(self, height, next_height=None): if next_height is None: next_height = self.tip last = None while last is None or height <= next_height: prev_last, prev_height = last, height if next_height > height + 10: could_connect, num_headers = await self.request_chunk(height, next_height) if not could_connect: if height <= constants.net.max_checkpoint(): raise GracefulDisconnect('server chain conflicts with checkpoints or genesis') last, height = await self.step(height) continue self.network.trigger_callback('network_updated') height = (height // 2016 * 2016) + num_headers assert height <= next_height+1, (height, self.tip) last = 'catchup' else: last, height = await self.step(height) assert (prev_last, prev_height) != (last, height), 'had to prevent infinite loop in interface.sync_until' return last, height async def step(self, height, header=None): assert 0 <= height <= self.tip, (height, self.tip) if header is None: header = await self.get_block_header(height, 'catchup') chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header) if chain: self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain # note: there is an edge case here that is not handled. # we might know the blockhash (enough for check_header) but # not have the header itself. e.g. regtest chain with only genesis. # this situation resolves itself on the next block return 'catchup', height+1 can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height) if not can_connect: self.logger.info(f"can't connect {height}") height, header, bad, bad_header = await self._search_headers_backwards(height, header) chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header) can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height) assert chain or can_connect if can_connect: self.logger.info(f"could connect {height}") height += 1 if isinstance(can_connect, Blockchain): self.blockchain = can_connect self.blockchain.save_header(header) return 'catchup', height good, bad, bad_header = await self._search_headers_binary(height, bad, bad_header, chain) return await self._resolve_potential_chain_fork_given_forkpoint(good, bad, bad_header) async def _search_headers_binary(self, height, bad, bad_header, chain): assert bad == bad_header['block_height'] _assert_header_does_not_check_against_any_chain(bad_header) self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain good = height while True: assert good < bad, (good, bad) height = (good + bad) // 2 self.logger.info(f"binary step. good {good}, bad {bad}, height {height}") header = await self.get_block_header(height, 'binary') chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header) if chain: self.blockchain = chain if isinstance(chain, Blockchain) else self.blockchain good = height else: bad = height bad_header = header if good + 1 == bad: break mock = 'mock' in bad_header and bad_header['mock']['connect'](height) real = not mock and self.blockchain.can_connect(bad_header, check_height=False) if not real and not mock: raise Exception('unexpected bad header during binary: {}'.format(bad_header)) _assert_header_does_not_check_against_any_chain(bad_header) self.logger.info(f"binary search exited. good {good}, bad {bad}") return good, bad, bad_header async def _resolve_potential_chain_fork_given_forkpoint(self, good, bad, bad_header): assert good + 1 == bad assert bad == bad_header['block_height'] _assert_header_does_not_check_against_any_chain(bad_header) bh = self.blockchain.height() assert bh >= good, (bh, good) if bh == good: height = good + 1 self.logger.info(f"catching up from {height}") return 'no_fork', height height = bad + 1 self.logger.info(f"new fork at bad height {bad}") forkfun = self.blockchain.fork if 'mock' not in bad_header else bad_header['mock']['fork'] b = forkfun(bad_header) # type: Blockchain self.blockchain = b assert b.forkpoint == bad return 'fork', height async def _search_headers_backwards(self, height, header): async def iterate(): nonlocal height, header checkp = False if height <= constants.net.max_checkpoint(): height = constants.net.max_checkpoint() checkp = True header = await self.get_block_header(height, 'backward') chain = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header) can_connect = blockchain.can_connect(header) if 'mock' not in header else header['mock']['connect'](height) if chain or can_connect: return False if checkp: raise GracefulDisconnect("server chain conflicts with checkpoints") return True bad, bad_header = height, header _assert_header_does_not_check_against_any_chain(bad_header) with blockchain.blockchains_lock: chains = list(blockchain.blockchains.values()) local_max = max([0] + [x.height() for x in chains]) if 'mock' not in header else float('inf') height = min(local_max + 1, height - 1) while await iterate(): bad, bad_header = height, header delta = self.tip - height height = self.tip - 2 * delta _assert_header_does_not_check_against_any_chain(bad_header) self.logger.info(f"exiting backward mode at {height}") return height, header, bad, bad_header @classmethod def client_name(cls) -> str: return f'electrum/{version.ELECTRUM_VERSION}' def is_tor(self): return self.host.endswith('.onion') def ip_addr(self) -> Optional[str]: session = self.session if not session: return None peer_addr = session.remote_address() if not peer_addr: return None return str(peer_addr.host) def bucket_based_on_ipaddress(self) -> str: def do_bucket(): if self.is_tor(): return BUCKET_NAME_OF_ONION_SERVERS try: ip_addr = ip_address(self.ip_addr()) except ValueError: return '' if not ip_addr: return '' if ip_addr.version == 4: slash16 = IPv4Network(ip_addr).supernet(prefixlen_diff=32-16) return str(slash16) elif ip_addr.version == 6: slash48 = IPv6Network(ip_addr).supernet(prefixlen_diff=128-48) return str(slash48) return '' if not self._ipaddr_bucket: self._ipaddr_bucket = do_bucket() return self._ipaddr_bucket def _assert_header_does_not_check_against_any_chain(header: dict) -> None: chain_bad = blockchain.check_header(header) if 'mock' not in header else header['mock']['check'](header) if chain_bad: raise Exception('bad_header must not check!') def check_cert(host, cert): try: b = pem.dePem(cert, 'CERTIFICATE') x = x509.X509(b) except: traceback.print_exc(file=sys.stdout) return try: x.check_date() expired = False except: expired = True m = "host: %s\n"%host m += "has_expired: %s\n"% expired util.print_msg(m) # Used by tests def _match_hostname(name, val): if val == name: return True return val.startswith('*.') and name.endswith(val[1:]) def test_certificates(): from .simple_config import SimpleConfig config = SimpleConfig() mydir = os.path.join(config.path, "certs") certs = os.listdir(mydir) for c in certs: p = os.path.join(mydir,c) with open(p, encoding='utf-8') as f: cert = f.read() check_cert(c, cert) if __name__ == "__main__": test_certificates()
true
true
f708384c5609521d4be0fae187e770797ca48abf
7,973
py
Python
sharpy/managers/extensions/memory_manager.py
DuncanDHall/sharpy-sc2
7a47a7538ad99214e3f0288b6213cac882551180
[ "MIT" ]
null
null
null
sharpy/managers/extensions/memory_manager.py
DuncanDHall/sharpy-sc2
7a47a7538ad99214e3f0288b6213cac882551180
[ "MIT" ]
null
null
null
sharpy/managers/extensions/memory_manager.py
DuncanDHall/sharpy-sc2
7a47a7538ad99214e3f0288b6213cac882551180
[ "MIT" ]
null
null
null
from collections import deque from typing import Dict, Set, Deque, List, Optional from sc2.data import Race from sc2.position import Point2 from sharpy.events import UnitDestroyedEvent from sharpy.interfaces import IMemoryManager from sharpy.managers.core import ManagerBase from sc2.ids.unit_typeid import UnitTypeId from sc2.unit import Unit from sc2.units import Units MAX_SNAPSHOTS_PER_UNIT = 10 BURROWED_ALIAS: Set[UnitTypeId] = { UnitTypeId.BANELINGBURROWED, UnitTypeId.CREEPTUMORBURROWED, UnitTypeId.DRONEBURROWED, UnitTypeId.HYDRALISKBURROWED, UnitTypeId.INFESTORBURROWED, UnitTypeId.INFESTORTERRANBURROWED, UnitTypeId.LURKERMPBURROWED, UnitTypeId.QUEENBURROWED, UnitTypeId.RAVAGERBURROWED, UnitTypeId.ROACHBURROWED, UnitTypeId.SWARMHOSTBURROWEDMP, UnitTypeId.ULTRALISKBURROWED, UnitTypeId.WIDOWMINEBURROWED, UnitTypeId.ZERGLINGBURROWED, } class MemoryManager(ManagerBase, IMemoryManager): """Manages memories of where enemy units have last been seen. Structures are ignored because they have two tags. One for the real building and another for the building's snapshot when under fog of war. """ detectors: Set[UnitTypeId] def __init__(self): super().__init__() # Dictionary of units that we remember the position of. Keyed by unit tag. # Deque is used so that new snapshots are added to the left, and old ones are removed from the right. self._memory_units_by_tag: Dict[int, Deque[Unit]] = dict() # Dictionary of units that we know of, but which are longer present at the location last seen. Keyed by unit tag. self._archive_units_by_tag: Dict[int, Deque[Unit]] = dict() self._tags_destroyed: Set[int] = set() self.unit_dict: Dict[int, Deque[Unit]] = dict() self.expire_air = 60 # Time in seconds when snapshot expires self.expire_ground = 360 # Time in seconds when snapshot expires async def start(self, knowledge: "Knowledge"): await super().start(knowledge) if knowledge.my_race == Race.Protoss: self.detectors = {UnitTypeId.PHOTONCANNON, UnitTypeId.OBSERVER, UnitTypeId.OBSERVERSIEGEMODE} elif knowledge.my_race == Race.Terran: self.detectors = {UnitTypeId.MISSILETURRET, UnitTypeId.RAVEN} else: self.detectors = {UnitTypeId.OVERSEERSIEGEMODE, UnitTypeId.OVERSEER, UnitTypeId.SPORECRAWLER} knowledge.register_on_unit_destroyed_listener(self.on_unit_destroyed) async def update(self): detectors = None self.unit_dict.clear() # Iterate all currently visible enemy units. # self.ai.enemy_units is used here because it does not include memory lane units for unit in self.ai.enemy_units: # Make sure that we have not added the same unit tag to both dictionaries, as that could # create very confusing bugs. assert not (unit.tag in self._memory_units_by_tag and unit.tag in self._archive_units_by_tag) # Ignore certain types if unit.type_id in ignored_unit_types: continue if unit.tag in self._archive_units_by_tag: snaps = self._archive_units_by_tag.pop(unit.tag) else: snaps = self._memory_units_by_tag.get(unit.tag, deque(maxlen=MAX_SNAPSHOTS_PER_UNIT)) snaps.appendleft(unit) if unit.tag not in self._memory_units_by_tag: self._memory_units_by_tag[unit.tag] = snaps self.unit_dict[unit.tag] = unit memory_tags_to_remove = list() for unit_tag in self._memory_units_by_tag: if self.is_unit_visible(unit_tag): continue snap = self.get_latest_snapshot(unit_tag) points: List[Point2] = [] points.append(Point2((int(snap.position.x), int(snap.position.y)))) points.append(Point2((int(snap.position.x + 1), int(snap.position.y)))) points.append(Point2((int(snap.position.x), int(snap.position.y + 1)))) points.append(Point2((int(snap.position.x + 1), int(snap.position.y + 1)))) visible = True for point in points: if not self.ai.is_visible(point): visible = False expired = self.check_expiration(snap) if expired: self.clear_unit_cache(memory_tags_to_remove, unit_tag) elif visible: # We see that the unit is no longer there. if (snap.type_id in BURROWED_ALIAS or snap.is_burrowed) and unit_tag not in self._tags_destroyed: if detectors is None: detectors = self.cache.own(self.detectors) if detectors.closer_than(11, snap.position): self.clear_unit_cache(memory_tags_to_remove, unit_tag) else: # For burrowed units, let's change the snapshot snap._proto.is_burrowed = True # snap._proto.unit_type = BURROWED_ALIAS.get(snap.type_id, snap.type_id).value # int value snap.cache.clear() else: self.clear_unit_cache(memory_tags_to_remove, unit_tag) for tag in memory_tags_to_remove: self._memory_units_by_tag.pop(tag) memory_units = self.ghost_units # Merge enemy data with memories self.ai.enemy_units = self.ai.enemy_units + memory_units self.ai.all_enemy_units = self.ai.all_enemy_units + memory_units def clear_unit_cache(self, memory_tags_to_remove, unit_tag): memory_tags_to_remove.append(unit_tag) snaps = self._memory_units_by_tag.get(unit_tag) self._archive_units_by_tag[unit_tag] = snaps async def post_update(self): if not self.debug: return for unit in self.ghost_units: # type: Unit self.ai._client.debug_text_world(f"{unit.type_id.name}", unit.position3d, size=10) @property def ghost_units(self) -> Units: """Returns latest snapshot for all units that we know of but which are currently not visible.""" memory_units = Units([], self.ai) for tag in self._memory_units_by_tag: if self.is_unit_visible(tag): continue snap = self.get_latest_snapshot(tag) memory_units.append(snap) return memory_units # return memory_units.visible def get_latest_snapshot(self, unit_tag: int) -> Unit: """Returns the latest snapshot of a unit. Throws KeyError if unit_tag is not found.""" unit_deque = self._memory_units_by_tag[unit_tag] return unit_deque[0] def is_unit_visible(self, unit_tag: int) -> bool: """Returns true if the unit is visible on this frame.""" unit: Optional[Unit] = self.unit_dict.get(unit_tag, None) return unit is not None and not unit.is_memory def on_unit_destroyed(self, event: UnitDestroyedEvent): """Call this when a unit is destroyed, to make sure that the unit is erased from memory.""" # Remove the unit from frozen dictionaries. self._memory_units_by_tag.pop(event.unit_tag, None) self._archive_units_by_tag.pop(event.unit_tag, None) self._tags_destroyed.add(event.unit_tag) def check_expiration(self, snap: Unit) -> bool: if snap.is_flying: return snap.age > self.expire_air return snap.age > self.expire_ground # Will this end up being the same set as in enemy_units_manager.py ? ignored_unit_types = { # Protoss UnitTypeId.INTERCEPTOR, # Terran UnitTypeId.MULE, UnitTypeId.AUTOTURRET, # Zerg # Cocoons? UnitTypeId.LARVA, UnitTypeId.LOCUSTMP, UnitTypeId.LOCUSTMPFLYING, UnitTypeId.INFESTEDTERRAN, UnitTypeId.BROODLING, }
38.331731
121
0.660103
from collections import deque from typing import Dict, Set, Deque, List, Optional from sc2.data import Race from sc2.position import Point2 from sharpy.events import UnitDestroyedEvent from sharpy.interfaces import IMemoryManager from sharpy.managers.core import ManagerBase from sc2.ids.unit_typeid import UnitTypeId from sc2.unit import Unit from sc2.units import Units MAX_SNAPSHOTS_PER_UNIT = 10 BURROWED_ALIAS: Set[UnitTypeId] = { UnitTypeId.BANELINGBURROWED, UnitTypeId.CREEPTUMORBURROWED, UnitTypeId.DRONEBURROWED, UnitTypeId.HYDRALISKBURROWED, UnitTypeId.INFESTORBURROWED, UnitTypeId.INFESTORTERRANBURROWED, UnitTypeId.LURKERMPBURROWED, UnitTypeId.QUEENBURROWED, UnitTypeId.RAVAGERBURROWED, UnitTypeId.ROACHBURROWED, UnitTypeId.SWARMHOSTBURROWEDMP, UnitTypeId.ULTRALISKBURROWED, UnitTypeId.WIDOWMINEBURROWED, UnitTypeId.ZERGLINGBURROWED, } class MemoryManager(ManagerBase, IMemoryManager): detectors: Set[UnitTypeId] def __init__(self): super().__init__() self._memory_units_by_tag: Dict[int, Deque[Unit]] = dict() self._archive_units_by_tag: Dict[int, Deque[Unit]] = dict() self._tags_destroyed: Set[int] = set() self.unit_dict: Dict[int, Deque[Unit]] = dict() self.expire_air = 60 self.expire_ground = 360 async def start(self, knowledge: "Knowledge"): await super().start(knowledge) if knowledge.my_race == Race.Protoss: self.detectors = {UnitTypeId.PHOTONCANNON, UnitTypeId.OBSERVER, UnitTypeId.OBSERVERSIEGEMODE} elif knowledge.my_race == Race.Terran: self.detectors = {UnitTypeId.MISSILETURRET, UnitTypeId.RAVEN} else: self.detectors = {UnitTypeId.OVERSEERSIEGEMODE, UnitTypeId.OVERSEER, UnitTypeId.SPORECRAWLER} knowledge.register_on_unit_destroyed_listener(self.on_unit_destroyed) async def update(self): detectors = None self.unit_dict.clear() for unit in self.ai.enemy_units: assert not (unit.tag in self._memory_units_by_tag and unit.tag in self._archive_units_by_tag) if unit.type_id in ignored_unit_types: continue if unit.tag in self._archive_units_by_tag: snaps = self._archive_units_by_tag.pop(unit.tag) else: snaps = self._memory_units_by_tag.get(unit.tag, deque(maxlen=MAX_SNAPSHOTS_PER_UNIT)) snaps.appendleft(unit) if unit.tag not in self._memory_units_by_tag: self._memory_units_by_tag[unit.tag] = snaps self.unit_dict[unit.tag] = unit memory_tags_to_remove = list() for unit_tag in self._memory_units_by_tag: if self.is_unit_visible(unit_tag): continue snap = self.get_latest_snapshot(unit_tag) points: List[Point2] = [] points.append(Point2((int(snap.position.x), int(snap.position.y)))) points.append(Point2((int(snap.position.x + 1), int(snap.position.y)))) points.append(Point2((int(snap.position.x), int(snap.position.y + 1)))) points.append(Point2((int(snap.position.x + 1), int(snap.position.y + 1)))) visible = True for point in points: if not self.ai.is_visible(point): visible = False expired = self.check_expiration(snap) if expired: self.clear_unit_cache(memory_tags_to_remove, unit_tag) elif visible: if (snap.type_id in BURROWED_ALIAS or snap.is_burrowed) and unit_tag not in self._tags_destroyed: if detectors is None: detectors = self.cache.own(self.detectors) if detectors.closer_than(11, snap.position): self.clear_unit_cache(memory_tags_to_remove, unit_tag) else: snap._proto.is_burrowed = True # snap._proto.unit_type = BURROWED_ALIAS.get(snap.type_id, snap.type_id).value # int value snap.cache.clear() else: self.clear_unit_cache(memory_tags_to_remove, unit_tag) for tag in memory_tags_to_remove: self._memory_units_by_tag.pop(tag) memory_units = self.ghost_units # Merge enemy data with memories self.ai.enemy_units = self.ai.enemy_units + memory_units self.ai.all_enemy_units = self.ai.all_enemy_units + memory_units def clear_unit_cache(self, memory_tags_to_remove, unit_tag): memory_tags_to_remove.append(unit_tag) snaps = self._memory_units_by_tag.get(unit_tag) self._archive_units_by_tag[unit_tag] = snaps async def post_update(self): if not self.debug: return for unit in self.ghost_units: # type: Unit self.ai._client.debug_text_world(f"{unit.type_id.name}", unit.position3d, size=10) @property def ghost_units(self) -> Units: memory_units = Units([], self.ai) for tag in self._memory_units_by_tag: if self.is_unit_visible(tag): continue snap = self.get_latest_snapshot(tag) memory_units.append(snap) return memory_units # return memory_units.visible def get_latest_snapshot(self, unit_tag: int) -> Unit: unit_deque = self._memory_units_by_tag[unit_tag] return unit_deque[0] def is_unit_visible(self, unit_tag: int) -> bool: unit: Optional[Unit] = self.unit_dict.get(unit_tag, None) return unit is not None and not unit.is_memory def on_unit_destroyed(self, event: UnitDestroyedEvent): # Remove the unit from frozen dictionaries. self._memory_units_by_tag.pop(event.unit_tag, None) self._archive_units_by_tag.pop(event.unit_tag, None) self._tags_destroyed.add(event.unit_tag) def check_expiration(self, snap: Unit) -> bool: if snap.is_flying: return snap.age > self.expire_air return snap.age > self.expire_ground # Will this end up being the same set as in enemy_units_manager.py ? ignored_unit_types = { # Protoss UnitTypeId.INTERCEPTOR, # Terran UnitTypeId.MULE, UnitTypeId.AUTOTURRET, # Zerg # Cocoons? UnitTypeId.LARVA, UnitTypeId.LOCUSTMP, UnitTypeId.LOCUSTMPFLYING, UnitTypeId.INFESTEDTERRAN, UnitTypeId.BROODLING, }
true
true
f708393c18e2c977b6debb3a1a165eba6ac0e37d
18,133
py
Python
tests/unit/states/test_zpool.py
Noah-Huppert/salt
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
[ "Apache-2.0" ]
19
2016-01-29T14:37:52.000Z
2022-03-30T18:08:01.000Z
tests/unit/states/test_zpool.py
Noah-Huppert/salt
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
[ "Apache-2.0" ]
223
2016-03-02T16:39:41.000Z
2022-03-03T12:26:35.000Z
tests/unit/states/test_zpool.py
Noah-Huppert/salt
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
[ "Apache-2.0" ]
64
2016-02-04T19:45:26.000Z
2021-12-15T02:02:31.000Z
""" Tests for salt.states.zpool :codeauthor: Jorge Schrauwen <sjorge@blackdot.be> :maintainer: Jorge Schrauwen <sjorge@blackdot.be> :maturity: new :depends: salt.utils.zfs, salt.modules.zpool :platform: illumos,freebsd,linux """ import salt.loader import salt.states.zpool as zpool import salt.utils.zfs from salt.utils.odict import OrderedDict from tests.support.mixins import LoaderModuleMockMixin from tests.support.mock import MagicMock, patch from tests.support.unit import TestCase from tests.support.zfs import ZFSMockData class ZpoolTestCase(TestCase, LoaderModuleMockMixin): """ Test cases for salt.states.zpool """ @classmethod def setUpClass(cls): cls.utils_patch = ZFSMockData().get_patched_utils() @classmethod def tearDownClass(cls): cls.utils_patch = None def setup_loader_modules(self): self.opts = opts = salt.config.DEFAULT_MINION_OPTS.copy() utils = salt.loader.utils(opts, whitelist=["zfs"]) zpool_obj = { zpool: { "__opts__": opts, "__grains__": {"kernel": "SunOS"}, "__utils__": utils, } } return zpool_obj def test_absent_without_pool(self): """ Test zpool absent without a pool """ ret = { "name": "myzpool", "result": True, "comment": "storage pool myzpool is absent", "changes": {}, } mock_exists = MagicMock(return_value=False) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__utils__, self.utils_patch ): self.assertEqual(zpool.absent("myzpool"), ret) def test_absent_destroy_pool(self): """ Test zpool absent destroying pool """ ret = { "name": "myzpool", "result": True, "comment": "storage pool myzpool was destroyed", "changes": {"myzpool": "destroyed"}, } mock_exists = MagicMock(return_value=True) mock_destroy = MagicMock(return_value=OrderedDict([("destroyed", True)])) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.destroy": mock_destroy} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual(zpool.absent("myzpool"), ret) def test_absent_exporty_pool(self): """ Test zpool absent exporting pool """ ret = { "name": "myzpool", "result": True, "comment": "storage pool myzpool was exported", "changes": {"myzpool": "exported"}, } mock_exists = MagicMock(return_value=True) mock_destroy = MagicMock(return_value=OrderedDict([("exported", True)])) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.export": mock_destroy} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual(zpool.absent("myzpool", export=True), ret) def test_absent_busy(self): """ Test zpool absent on a busy pool """ ret = { "name": "myzpool", "result": False, "comment": "\n".join( [ "cannot unmount '/myzpool': Device busy", "cannot export 'myzpool': pool is busy", ] ), "changes": {}, } mock_exists = MagicMock(return_value=True) mock_destroy = MagicMock( return_value=OrderedDict( [ ("exported", False), ( "error", "\n".join( [ "cannot unmount '/myzpool': Device busy", "cannot export 'myzpool': pool is busy", ] ), ), ] ) ) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.export": mock_destroy} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual(zpool.absent("myzpool", export=True), ret) def test_present_import_success(self): """ Test zpool present with import allowed and unimported pool """ ret = { "name": "myzpool", "result": True, "comment": "storage pool myzpool was imported", "changes": {"myzpool": "imported"}, } config = { "import": True, } mock_exists = MagicMock(return_value=False) mock_import = MagicMock(return_value=OrderedDict([("imported", True)])) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.import": mock_import} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual(zpool.present("myzpool", config=config), ret) def test_present_import_fail(self): """ Test zpool present with import allowed and no unimported pool or layout """ ret = { "name": "myzpool", "result": False, "comment": "storage pool myzpool was not imported, no (valid) layout specified for creation", "changes": {}, } config = { "import": True, } mock_exists = MagicMock(return_value=False) mock_import = MagicMock(return_value=OrderedDict([("imported", False)])) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.import": mock_import} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual(zpool.present("myzpool", config=config), ret) def test_present_create_success(self): """ Test zpool present with non existing pool """ ret = { "name": "myzpool", "result": True, "comment": "storage pool myzpool was created", "changes": {"myzpool": "created"}, } config = { "import": False, } layout = [ OrderedDict([("mirror", ["disk0", "disk1"])]), OrderedDict([("mirror", ["disk2", "disk3"])]), ] properties = { "autoexpand": True, } filesystem_properties = { "quota": "5G", } mock_exists = MagicMock(return_value=False) mock_create = MagicMock( return_value=OrderedDict( [ ("created", True), ( "vdevs", OrderedDict( [ ("mirror-0", ["/dev/dsk/disk0", "/dev/dsk/disk1"]), ("mirror-1", ["/dev/dsk/disk2", "/dev/dsk/disk3"]), ] ), ), ] ) ) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.create": mock_create} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual( zpool.present( "myzpool", config=config, layout=layout, properties=properties, filesystem_properties=filesystem_properties, ), ret, ) def test_present_create_fail(self): """ Test zpool present with non existing pool (without a layout) """ ret = { "name": "myzpool", "result": False, "comment": "storage pool myzpool was not imported, no (valid) layout specified for creation", "changes": {}, } config = { "import": False, } mock_exists = MagicMock(return_value=False) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__utils__, self.utils_patch ): self.assertEqual(zpool.present("myzpool", config=config), ret) def test_present_create_passthrough_fail(self): """ Test zpool present with non existing pool (without a layout) """ ret = { "name": "myzpool", "result": False, "comment": "\n".join( [ "invalid vdev specification", "use 'force=True' to override the following errors:", "/data/salt/vdisk0 is part of exported pool 'zsalt'", "/data/salt/vdisk1 is part of exported pool 'zsalt'", ] ), "changes": {}, } config = { "force": False, "import": False, } layout = [ OrderedDict([("mirror", ["disk0", "disk1"])]), OrderedDict([("mirror", ["disk2", "disk3"])]), ] properties = { "autoexpand": True, } filesystem_properties = { "quota": "5G", } mock_exists = MagicMock(return_value=False) mock_create = MagicMock( return_value=OrderedDict( [ ("created", False), ( "error", "\n".join( [ "invalid vdev specification", "use 'force=True' to override the following errors:", "/data/salt/vdisk0 is part of exported pool 'zsalt'", "/data/salt/vdisk1 is part of exported pool 'zsalt'", ] ), ), ] ) ) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.create": mock_create} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual( zpool.present( "myzpool", config=config, layout=layout, properties=properties, filesystem_properties=filesystem_properties, ), ret, ) def test_present_update_success(self): """ Test zpool present with an existing pool that needs an update """ ret = { "name": "myzpool", "result": True, "comment": "properties updated", "changes": {"myzpool": {"autoexpand": False}}, } config = { "import": False, } layout = [ OrderedDict([("mirror", ["disk0", "disk1"])]), OrderedDict([("mirror", ["disk2", "disk3"])]), ] properties = { "autoexpand": False, } mock_exists = MagicMock(return_value=True) mock_get = MagicMock( return_value=OrderedDict( [ ("comment", "salt managed pool"), ("freeing", 0), ("listsnapshots", False), ("leaked", 0), ("feature@obsolete_counts", "enabled"), ("feature@sha512", "enabled"), ("delegation", True), ("dedupditto", "0"), ("dedupratio", "1.00x"), ("autoexpand", True), ("feature@bookmarks", "enabled"), ("allocated", 115712), ("guid", 1591906802560842214), ("feature@large_blocks", "enabled"), ("size", 2113929216), ("feature@enabled_txg", "active"), ("feature@hole_birth", "active"), ("capacity", 0), ("feature@multi_vdev_crash_dump", "enabled"), ("feature@extensible_dataset", "enabled"), ("cachefile", "-"), ("bootfs", "-"), ("autoreplace", True), ("readonly", False), ("version", "-"), ("health", "ONLINE"), ("expandsize", "-"), ("feature@embedded_data", "active"), ("feature@lz4_compress", "active"), ("feature@async_destroy", "enabled"), ("feature@skein", "enabled"), ("feature@empty_bpobj", "enabled"), ("feature@spacemap_histogram", "active"), ("bootsize", "-"), ("free", 2113813504), ("feature@device_removal", "enabled"), ("failmode", "wait"), ("feature@filesystem_limits", "enabled"), ("feature@edonr", "enabled"), ("altroot", "-"), ("fragmentation", "0%"), ] ) ) mock_set = MagicMock(return_value=OrderedDict([("set", True)])) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.get": mock_get} ), patch.dict(zpool.__salt__, {"zpool.set": mock_set}), patch.dict( zpool.__utils__, self.utils_patch ): self.assertEqual( zpool.present( "myzpool", config=config, layout=layout, properties=properties, ), ret, ) def test_present_update_nochange_success(self): """ Test zpool present with non existing pool """ config = { "import": False, } layout = [ OrderedDict([("mirror", ["disk0", "disk1"])]), OrderedDict([("mirror", ["disk2", "disk3"])]), ] properties = { "autoexpand": True, } mock_exists = MagicMock(return_value=True) mock_get = MagicMock( return_value=OrderedDict( [ ("comment", "salt managed pool"), ("freeing", 0), ("listsnapshots", False), ("leaked", 0), ("feature@obsolete_counts", "enabled"), ("feature@sha512", "enabled"), ("delegation", True), ("dedupditto", "0"), ("dedupratio", "1.00x"), ("autoexpand", True), ("feature@bookmarks", "enabled"), ("allocated", 115712), ("guid", 1591906802560842214), ("feature@large_blocks", "enabled"), ("size", 2113929216), ("feature@enabled_txg", "active"), ("feature@hole_birth", "active"), ("capacity", 0), ("feature@multi_vdev_crash_dump", "enabled"), ("feature@extensible_dataset", "enabled"), ("cachefile", "-"), ("bootfs", "-"), ("autoreplace", True), ("readonly", False), ("version", "-"), ("health", "ONLINE"), ("expandsize", "-"), ("feature@embedded_data", "active"), ("feature@lz4_compress", "active"), ("feature@async_destroy", "enabled"), ("feature@skein", "enabled"), ("feature@empty_bpobj", "enabled"), ("feature@spacemap_histogram", "active"), ("bootsize", "-"), ("free", 2113813504), ("feature@device_removal", "enabled"), ("failmode", "wait"), ("feature@filesystem_limits", "enabled"), ("feature@edonr", "enabled"), ("altroot", "-"), ("fragmentation", "0%"), ] ) ) ret = { "name": "myzpool", "result": True, "comment": "no update needed", "changes": {}, } with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}): with patch.dict(zpool.__salt__, {"zpool.get": mock_get}): with patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual( zpool.present( "myzpool", config=config, layout=layout, properties=properties, ), ret, ) # Run state with test=true ret = { "name": "myzpool", "result": True, "comment": "storage pool myzpool is uptodate", "changes": {}, } with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}): with patch.dict(zpool.__salt__, {"zpool.get": mock_get}): with patch.dict(zpool.__utils__, self.utils_patch): with patch.dict(zpool.__opts__, {"test": True}): self.assertEqual( zpool.present( "myzpool", config=config, layout=layout, properties=properties, ), ret, )
35.346979
105
0.456295
import salt.loader import salt.states.zpool as zpool import salt.utils.zfs from salt.utils.odict import OrderedDict from tests.support.mixins import LoaderModuleMockMixin from tests.support.mock import MagicMock, patch from tests.support.unit import TestCase from tests.support.zfs import ZFSMockData class ZpoolTestCase(TestCase, LoaderModuleMockMixin): @classmethod def setUpClass(cls): cls.utils_patch = ZFSMockData().get_patched_utils() @classmethod def tearDownClass(cls): cls.utils_patch = None def setup_loader_modules(self): self.opts = opts = salt.config.DEFAULT_MINION_OPTS.copy() utils = salt.loader.utils(opts, whitelist=["zfs"]) zpool_obj = { zpool: { "__opts__": opts, "__grains__": {"kernel": "SunOS"}, "__utils__": utils, } } return zpool_obj def test_absent_without_pool(self): ret = { "name": "myzpool", "result": True, "comment": "storage pool myzpool is absent", "changes": {}, } mock_exists = MagicMock(return_value=False) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__utils__, self.utils_patch ): self.assertEqual(zpool.absent("myzpool"), ret) def test_absent_destroy_pool(self): ret = { "name": "myzpool", "result": True, "comment": "storage pool myzpool was destroyed", "changes": {"myzpool": "destroyed"}, } mock_exists = MagicMock(return_value=True) mock_destroy = MagicMock(return_value=OrderedDict([("destroyed", True)])) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.destroy": mock_destroy} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual(zpool.absent("myzpool"), ret) def test_absent_exporty_pool(self): ret = { "name": "myzpool", "result": True, "comment": "storage pool myzpool was exported", "changes": {"myzpool": "exported"}, } mock_exists = MagicMock(return_value=True) mock_destroy = MagicMock(return_value=OrderedDict([("exported", True)])) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.export": mock_destroy} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual(zpool.absent("myzpool", export=True), ret) def test_absent_busy(self): ret = { "name": "myzpool", "result": False, "comment": "\n".join( [ "cannot unmount '/myzpool': Device busy", "cannot export 'myzpool': pool is busy", ] ), "changes": {}, } mock_exists = MagicMock(return_value=True) mock_destroy = MagicMock( return_value=OrderedDict( [ ("exported", False), ( "error", "\n".join( [ "cannot unmount '/myzpool': Device busy", "cannot export 'myzpool': pool is busy", ] ), ), ] ) ) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.export": mock_destroy} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual(zpool.absent("myzpool", export=True), ret) def test_present_import_success(self): ret = { "name": "myzpool", "result": True, "comment": "storage pool myzpool was imported", "changes": {"myzpool": "imported"}, } config = { "import": True, } mock_exists = MagicMock(return_value=False) mock_import = MagicMock(return_value=OrderedDict([("imported", True)])) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.import": mock_import} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual(zpool.present("myzpool", config=config), ret) def test_present_import_fail(self): ret = { "name": "myzpool", "result": False, "comment": "storage pool myzpool was not imported, no (valid) layout specified for creation", "changes": {}, } config = { "import": True, } mock_exists = MagicMock(return_value=False) mock_import = MagicMock(return_value=OrderedDict([("imported", False)])) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.import": mock_import} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual(zpool.present("myzpool", config=config), ret) def test_present_create_success(self): ret = { "name": "myzpool", "result": True, "comment": "storage pool myzpool was created", "changes": {"myzpool": "created"}, } config = { "import": False, } layout = [ OrderedDict([("mirror", ["disk0", "disk1"])]), OrderedDict([("mirror", ["disk2", "disk3"])]), ] properties = { "autoexpand": True, } filesystem_properties = { "quota": "5G", } mock_exists = MagicMock(return_value=False) mock_create = MagicMock( return_value=OrderedDict( [ ("created", True), ( "vdevs", OrderedDict( [ ("mirror-0", ["/dev/dsk/disk0", "/dev/dsk/disk1"]), ("mirror-1", ["/dev/dsk/disk2", "/dev/dsk/disk3"]), ] ), ), ] ) ) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.create": mock_create} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual( zpool.present( "myzpool", config=config, layout=layout, properties=properties, filesystem_properties=filesystem_properties, ), ret, ) def test_present_create_fail(self): ret = { "name": "myzpool", "result": False, "comment": "storage pool myzpool was not imported, no (valid) layout specified for creation", "changes": {}, } config = { "import": False, } mock_exists = MagicMock(return_value=False) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__utils__, self.utils_patch ): self.assertEqual(zpool.present("myzpool", config=config), ret) def test_present_create_passthrough_fail(self): ret = { "name": "myzpool", "result": False, "comment": "\n".join( [ "invalid vdev specification", "use 'force=True' to override the following errors:", "/data/salt/vdisk0 is part of exported pool 'zsalt'", "/data/salt/vdisk1 is part of exported pool 'zsalt'", ] ), "changes": {}, } config = { "force": False, "import": False, } layout = [ OrderedDict([("mirror", ["disk0", "disk1"])]), OrderedDict([("mirror", ["disk2", "disk3"])]), ] properties = { "autoexpand": True, } filesystem_properties = { "quota": "5G", } mock_exists = MagicMock(return_value=False) mock_create = MagicMock( return_value=OrderedDict( [ ("created", False), ( "error", "\n".join( [ "invalid vdev specification", "use 'force=True' to override the following errors:", "/data/salt/vdisk0 is part of exported pool 'zsalt'", "/data/salt/vdisk1 is part of exported pool 'zsalt'", ] ), ), ] ) ) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.create": mock_create} ), patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual( zpool.present( "myzpool", config=config, layout=layout, properties=properties, filesystem_properties=filesystem_properties, ), ret, ) def test_present_update_success(self): ret = { "name": "myzpool", "result": True, "comment": "properties updated", "changes": {"myzpool": {"autoexpand": False}}, } config = { "import": False, } layout = [ OrderedDict([("mirror", ["disk0", "disk1"])]), OrderedDict([("mirror", ["disk2", "disk3"])]), ] properties = { "autoexpand": False, } mock_exists = MagicMock(return_value=True) mock_get = MagicMock( return_value=OrderedDict( [ ("comment", "salt managed pool"), ("freeing", 0), ("listsnapshots", False), ("leaked", 0), ("feature@obsolete_counts", "enabled"), ("feature@sha512", "enabled"), ("delegation", True), ("dedupditto", "0"), ("dedupratio", "1.00x"), ("autoexpand", True), ("feature@bookmarks", "enabled"), ("allocated", 115712), ("guid", 1591906802560842214), ("feature@large_blocks", "enabled"), ("size", 2113929216), ("feature@enabled_txg", "active"), ("feature@hole_birth", "active"), ("capacity", 0), ("feature@multi_vdev_crash_dump", "enabled"), ("feature@extensible_dataset", "enabled"), ("cachefile", "-"), ("bootfs", "-"), ("autoreplace", True), ("readonly", False), ("version", "-"), ("health", "ONLINE"), ("expandsize", "-"), ("feature@embedded_data", "active"), ("feature@lz4_compress", "active"), ("feature@async_destroy", "enabled"), ("feature@skein", "enabled"), ("feature@empty_bpobj", "enabled"), ("feature@spacemap_histogram", "active"), ("bootsize", "-"), ("free", 2113813504), ("feature@device_removal", "enabled"), ("failmode", "wait"), ("feature@filesystem_limits", "enabled"), ("feature@edonr", "enabled"), ("altroot", "-"), ("fragmentation", "0%"), ] ) ) mock_set = MagicMock(return_value=OrderedDict([("set", True)])) with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}), patch.dict( zpool.__salt__, {"zpool.get": mock_get} ), patch.dict(zpool.__salt__, {"zpool.set": mock_set}), patch.dict( zpool.__utils__, self.utils_patch ): self.assertEqual( zpool.present( "myzpool", config=config, layout=layout, properties=properties, ), ret, ) def test_present_update_nochange_success(self): config = { "import": False, } layout = [ OrderedDict([("mirror", ["disk0", "disk1"])]), OrderedDict([("mirror", ["disk2", "disk3"])]), ] properties = { "autoexpand": True, } mock_exists = MagicMock(return_value=True) mock_get = MagicMock( return_value=OrderedDict( [ ("comment", "salt managed pool"), ("freeing", 0), ("listsnapshots", False), ("leaked", 0), ("feature@obsolete_counts", "enabled"), ("feature@sha512", "enabled"), ("delegation", True), ("dedupditto", "0"), ("dedupratio", "1.00x"), ("autoexpand", True), ("feature@bookmarks", "enabled"), ("allocated", 115712), ("guid", 1591906802560842214), ("feature@large_blocks", "enabled"), ("size", 2113929216), ("feature@enabled_txg", "active"), ("feature@hole_birth", "active"), ("capacity", 0), ("feature@multi_vdev_crash_dump", "enabled"), ("feature@extensible_dataset", "enabled"), ("cachefile", "-"), ("bootfs", "-"), ("autoreplace", True), ("readonly", False), ("version", "-"), ("health", "ONLINE"), ("expandsize", "-"), ("feature@embedded_data", "active"), ("feature@lz4_compress", "active"), ("feature@async_destroy", "enabled"), ("feature@skein", "enabled"), ("feature@empty_bpobj", "enabled"), ("feature@spacemap_histogram", "active"), ("bootsize", "-"), ("free", 2113813504), ("feature@device_removal", "enabled"), ("failmode", "wait"), ("feature@filesystem_limits", "enabled"), ("feature@edonr", "enabled"), ("altroot", "-"), ("fragmentation", "0%"), ] ) ) ret = { "name": "myzpool", "result": True, "comment": "no update needed", "changes": {}, } with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}): with patch.dict(zpool.__salt__, {"zpool.get": mock_get}): with patch.dict(zpool.__utils__, self.utils_patch): self.assertEqual( zpool.present( "myzpool", config=config, layout=layout, properties=properties, ), ret, ) ret = { "name": "myzpool", "result": True, "comment": "storage pool myzpool is uptodate", "changes": {}, } with patch.dict(zpool.__salt__, {"zpool.exists": mock_exists}): with patch.dict(zpool.__salt__, {"zpool.get": mock_get}): with patch.dict(zpool.__utils__, self.utils_patch): with patch.dict(zpool.__opts__, {"test": True}): self.assertEqual( zpool.present( "myzpool", config=config, layout=layout, properties=properties, ), ret, )
true
true
f7083a24574c3c411dcabf8ccf101df2fc7fb4a6
826
py
Python
adl_lrs/urls.py
Sembian/ADL_LRS
3535dad6371af3f9f5b67f7eabfd0f4a393e0d62
[ "Apache-2.0" ]
null
null
null
adl_lrs/urls.py
Sembian/ADL_LRS
3535dad6371af3f9f5b67f7eabfd0f4a393e0d62
[ "Apache-2.0" ]
null
null
null
adl_lrs/urls.py
Sembian/ADL_LRS
3535dad6371af3f9f5b67f7eabfd0f4a393e0d62
[ "Apache-2.0" ]
null
null
null
from django.conf.urls import patterns, include, url from django.views.generic import RedirectView # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', url(r'^$', RedirectView.as_view(url='/xAPI/')), url(r'^XAPI/', include('lrs.urls')), url(r'^xapi/', include('lrs.urls')), url(r'^xAPI/', include('lrs.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), ) urlpatterns += patterns('', url(r'^accounts/login/$', 'django.contrib.auth.views.login', name="login"), url(r'^accounts/logout/$', 'lrs.views.logout_view', name="logout"), )
34.416667
77
0.68523
from django.conf.urls import patterns, include, url from django.views.generic import RedirectView from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', url(r'^$', RedirectView.as_view(url='/xAPI/')), url(r'^XAPI/', include('lrs.urls')), url(r'^xapi/', include('lrs.urls')), url(r'^xAPI/', include('lrs.urls')), url(r'^admin/', include(admin.site.urls)), ) urlpatterns += patterns('', url(r'^accounts/login/$', 'django.contrib.auth.views.login', name="login"), url(r'^accounts/logout/$', 'lrs.views.logout_view', name="logout"), )
true
true
f7083ab5c7e7774c6fca62fde92fc3109732189f
2,171
py
Python
app/eventFrameTemplateViews/forms.py
DeschutesBrewery/brewerypi
5459dfc6b1ed415920c13a8a7c9a2d3d3c82099f
[ "MIT" ]
27
2017-11-27T05:01:05.000Z
2020-11-14T19:52:26.000Z
app/eventFrameTemplateViews/forms.py
DeschutesBrewery/brewerypi
5459dfc6b1ed415920c13a8a7c9a2d3d3c82099f
[ "MIT" ]
259
2017-11-23T00:43:26.000Z
2020-11-03T01:07:30.000Z
app/eventFrameTemplateViews/forms.py
DeschutesBrewery/brewerypi
5459dfc6b1ed415920c13a8a7c9a2d3d3c82099f
[ "MIT" ]
8
2018-10-29T04:39:29.000Z
2020-10-01T22:18:12.000Z
from flask_wtf import FlaskForm from wtforms import BooleanField, HiddenField, StringField, SubmitField, ValidationError from wtforms.validators import Length, Required from .. models import EventFrameTemplateView class CopyEventFrameTemplateViewForm(FlaskForm): name = StringField("Name", validators = [Required(), Length(1, 45)]) description = StringField("Description", validators = [Length(0, 255)]) default = BooleanField("Default") eventFrameTemplateId = HiddenField() requestReferrer = HiddenField() submit = SubmitField("Save") def validate_name(self, field): validationError = False eventFrameTemplateView = EventFrameTemplateView.query.filter_by(EventFrameTemplateId = self.eventFrameTemplateId.data, Name = field.data).first() if eventFrameTemplateView is not None: # Trying to copy an eventFrameTemplateView using a name that already exists. validationError = True if validationError: raise ValidationError(f'The name "{field.data}" already exists.') class EventFrameTemplateViewForm(FlaskForm): name = StringField("Name", validators = [Required(), Length(1, 45)]) description = StringField("Description", validators = [Length(0, 255)]) default = BooleanField("Default") selectable = BooleanField("Selectable", default = "checked") eventFrameTemplateId = HiddenField() eventFrameTemplateViewId = HiddenField() requestReferrer = HiddenField() submit = SubmitField("Save") def validate_name(self, field): validationError = False eventFrameTemplateView = EventFrameTemplateView.query.filter_by(EventFrameTemplateId = self.eventFrameTemplateId.data, Name = field.data).first() if eventFrameTemplateView is not None: if self.eventFrameTemplateViewId.data == "": # Trying to add a new event frame template view using a name that already exists. validationError = True else: if int(self.eventFrameTemplateViewId.data) != eventFrameTemplateView.EventFrameTemplateViewId: # Trying to change the name of a event frame template view to a name that already exists. validationError = True if validationError is True: raise ValidationError('The name "{}" already exists.'.format(field.data))
45.229167
147
0.773376
from flask_wtf import FlaskForm from wtforms import BooleanField, HiddenField, StringField, SubmitField, ValidationError from wtforms.validators import Length, Required from .. models import EventFrameTemplateView class CopyEventFrameTemplateViewForm(FlaskForm): name = StringField("Name", validators = [Required(), Length(1, 45)]) description = StringField("Description", validators = [Length(0, 255)]) default = BooleanField("Default") eventFrameTemplateId = HiddenField() requestReferrer = HiddenField() submit = SubmitField("Save") def validate_name(self, field): validationError = False eventFrameTemplateView = EventFrameTemplateView.query.filter_by(EventFrameTemplateId = self.eventFrameTemplateId.data, Name = field.data).first() if eventFrameTemplateView is not None: validationError = True if validationError: raise ValidationError(f'The name "{field.data}" already exists.') class EventFrameTemplateViewForm(FlaskForm): name = StringField("Name", validators = [Required(), Length(1, 45)]) description = StringField("Description", validators = [Length(0, 255)]) default = BooleanField("Default") selectable = BooleanField("Selectable", default = "checked") eventFrameTemplateId = HiddenField() eventFrameTemplateViewId = HiddenField() requestReferrer = HiddenField() submit = SubmitField("Save") def validate_name(self, field): validationError = False eventFrameTemplateView = EventFrameTemplateView.query.filter_by(EventFrameTemplateId = self.eventFrameTemplateId.data, Name = field.data).first() if eventFrameTemplateView is not None: if self.eventFrameTemplateViewId.data == "": validationError = True else: if int(self.eventFrameTemplateViewId.data) != eventFrameTemplateView.EventFrameTemplateViewId: validationError = True if validationError is True: raise ValidationError('The name "{}" already exists.'.format(field.data))
true
true
f7083b910d5d6d417596ef3c8b9fb8bfaf516c59
411
py
Python
hackerankteste.py
DMGAP/Python-Hackerrank
891fa7365d9acf71cc5827c248f10880c0f9d1a6
[ "MIT" ]
2
2021-09-12T14:56:14.000Z
2021-10-15T16:33:14.000Z
hackerankteste.py
DMGAP/Python-Hackerrank
891fa7365d9acf71cc5827c248f10880c0f9d1a6
[ "MIT" ]
null
null
null
hackerankteste.py
DMGAP/Python-Hackerrank
891fa7365d9acf71cc5827c248f10880c0f9d1a6
[ "MIT" ]
null
null
null
import math import os import random import re import sys n = int(input("")) number = int(n) number= n%2 if n ==1: print("Weird") else: if (number==0) and (n >= 2) or ((number==0) and (n <= 5)): print("Not Weird") elif (number ==0) and (n>=6) and ((number==0) and (n<=20)): print ("Weird") elif number==0 and n>20: print ("Not Weird") else : print ("Weird")
18.681818
63
0.532847
import math import os import random import re import sys n = int(input("")) number = int(n) number= n%2 if n ==1: print("Weird") else: if (number==0) and (n >= 2) or ((number==0) and (n <= 5)): print("Not Weird") elif (number ==0) and (n>=6) and ((number==0) and (n<=20)): print ("Weird") elif number==0 and n>20: print ("Not Weird") else : print ("Weird")
true
true
f7083c004337c3ce960bf0d789441131c57bd225
2,082
py
Python
src/transformers4/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py
yangkevin2/transformers4
4d841db3effaa42aef9ac5babdc50f5233394358
[ "Apache-2.0" ]
null
null
null
src/transformers4/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py
yangkevin2/transformers4
4d841db3effaa42aef9ac5babdc50f5233394358
[ "Apache-2.0" ]
null
null
null
src/transformers4/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py
yangkevin2/transformers4
4d841db3effaa42aef9ac5babdc50f5233394358
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ConvBERT checkpoint.""" import argparse from transformers4 import ConvBertConfig, ConvBertModel, TFConvBertModel, load_tf_weights_in_convbert from transformers4.utils import logging logging.set_verbosity_info() def convert_orig_tf1_checkpoint_to_pytorch(tf_checkpoint_path, convbert_config_file, pytorch_dump_path): conf = ConvBertConfig.from_json_file(convbert_config_file) model = ConvBertModel(conf) model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path) model.save_pretrained(pytorch_dump_path) tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True) tf_model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--convbert_config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained ConvBERT model. \n" "This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_orig_tf1_checkpoint_to_pytorch(args.tf_checkpoint_path, args.convbert_config_file, args.pytorch_dump_path)
37.178571
118
0.756484
import argparse from transformers4 import ConvBertConfig, ConvBertModel, TFConvBertModel, load_tf_weights_in_convbert from transformers4.utils import logging logging.set_verbosity_info() def convert_orig_tf1_checkpoint_to_pytorch(tf_checkpoint_path, convbert_config_file, pytorch_dump_path): conf = ConvBertConfig.from_json_file(convbert_config_file) model = ConvBertModel(conf) model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path) model.save_pretrained(pytorch_dump_path) tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True) tf_model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--convbert_config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained ConvBERT model. \n" "This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_orig_tf1_checkpoint_to_pytorch(args.tf_checkpoint_path, args.convbert_config_file, args.pytorch_dump_path)
true
true
f7083cb528dfbc4c395fa961f63b0a8b00653ae7
6,620
py
Python
Pi1/DevBatchControlv0.1.py
jakehyvonen/BTSPython
9580a04622226a30fea4d5cbd036c7f88a9b732d
[ "MIT" ]
null
null
null
Pi1/DevBatchControlv0.1.py
jakehyvonen/BTSPython
9580a04622226a30fea4d5cbd036c7f88a9b732d
[ "MIT" ]
null
null
null
Pi1/DevBatchControlv0.1.py
jakehyvonen/BTSPython
9580a04622226a30fea4d5cbd036c7f88a9b732d
[ "MIT" ]
null
null
null
from gpiozero import LED from time import sleep import socket import serial #low level IC control pins led1 = LED(13) #A0 pin led2 = LED(6) #A1 pin led3 = LED(5) #A2 pin led4 = LED(27) #led4+5 = device rest plate led5 = LED(22) ###setup communication with C# host software ##TCP_IP = '169.254.130.182' ##TCP_PORT = 5005 ##BUFFER_SIZE = 1024 ##MESSAGE = "Hello, windummy" ## ##s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ##s.connect((TCP_IP, TCP_PORT)) ###s.send(str.encode(MESSAGE)) #setup communication with Arduino Mega CNC controller ser = serial.Serial( port="/dev/ttyACM1", baudrate=250000, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, dsrdtr=True, rtscts=True, timeout=1 ) ser.get_settings() ser.readlines() SerialBufferIsClear = True def SwitchMUXtoA(): led1.off() led2.off() led3.off() def SwitchMUXtoB(): led1.on() led2.off() led3.off() def SwitchMUXtoC(): led1.off() led2.on() led3.off() def SwitchMUXtoD(): led1.on() led2.on() led3.off() def RestPlateON(): led4.on() led5.on() def RestPlateOFF(): led4.off() led5.off() #gcode-pixel positions 05/21/18 #pixel A: X5.3 Z0.9 #pixel B: X0 Z2.9 #pixel C: X2.1 Z8.3 #pixel D: X7.4 Z6.3 def SwitchToPixelA(): SwitchMUXtoA() ser.write('G1 X5.3 Z0.9\n'.encode()) def SwitchToPixelB(): SwitchMUXtoB() ser.write('G1 X0 Z2.9\n'.encode()) def SwitchToPixelC(): SwitchMUXtoC() ser.write('G1 X2.1 Z8.3\n'.encode()) def SwitchToPixelD(): SwitchMUXtoD() ser.write('G1 X7.4 Z6.3\n'.encode()) def SwapDevice(): ser.write('G1 E0\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('M84\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") sleep(15) RestPlateOFF() sleep(3) RestPlateON() ser.write('G1 E0\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('G1 Y0\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('G1 Y32 F777\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('G1 E175\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('M84\n'.encode()) sleep(20) RestPlateOFF() print("Finished swapping devices") def SystemInitialize(): RestPlateON() ser.write('M302 P1\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('G28 X0 Z0\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") #ser.write('G28 Z0\n'.encode()) ser.write('G1 Y33 F777\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") #ser.write('G1 Y0\n'.encode()) ser.write('G1 E175\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('M84\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") sleep(10) #buffer to prevent mistiming #RestPlateOFF() print("Finished Initialization") def GetRawInput(): var = input("Please enter a command:") print("entered: "+str(var)) if(var=="A"): SwitchMUXtoA() return True if(var=="B"): SwitchMUXtoB() return True if(var=="C"): SwitchMUXtoC() return True if(var=="D"): SwitchMUXtoD() return True if(var=="ActON"): RestPlateON() return True if(var=="ActOFF"): print("thing worked") RestPlateOFF() return True else: command = str(var)+"\n" ser.write(command.encode()) return False #RestPlateON() SystemInitialize() while True: ## data = s.recv(BUFFER_SIZE) ## print(data.decode()) ## s.send(str.encode(MESSAGE)) ## if data.decode() == "SwitchMUXtoA": ## SwitchMUXtoA() ## MarlinMessage = ser.readline().decode() ## #print(SerialBufferIsClear) ## #print(MarlinMessage) ## if("ok" in MarlinMessage): ## SerialBufferIsClear = True ## print("got the ok") print("pixel A") SwitchToPixelA() sleep(10) print("pixel B") SwitchToPixelB() sleep(10) print("pixel C") SwitchToPixelC() sleep(10) print("pixel D") SwitchToPixelD() sleep(10) SwapDevice() ## if(SerialBufferIsClear): ## SerialBufferIsClear = GetRawInput()
25.960784
55
0.60287
from gpiozero import LED from time import sleep import socket import serial led1 = LED(13) led2 = LED(6) led3 = LED(5) led4 = LED(27) led5 = LED(22) ser = serial.Serial( port="/dev/ttyACM1", baudrate=250000, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, dsrdtr=True, rtscts=True, timeout=1 ) ser.get_settings() ser.readlines() SerialBufferIsClear = True def SwitchMUXtoA(): led1.off() led2.off() led3.off() def SwitchMUXtoB(): led1.on() led2.off() led3.off() def SwitchMUXtoC(): led1.off() led2.on() led3.off() def SwitchMUXtoD(): led1.on() led2.on() led3.off() def RestPlateON(): led4.on() led5.on() def RestPlateOFF(): led4.off() led5.off() def SwitchToPixelA(): SwitchMUXtoA() ser.write('G1 X5.3 Z0.9\n'.encode()) def SwitchToPixelB(): SwitchMUXtoB() ser.write('G1 X0 Z2.9\n'.encode()) def SwitchToPixelC(): SwitchMUXtoC() ser.write('G1 X2.1 Z8.3\n'.encode()) def SwitchToPixelD(): SwitchMUXtoD() ser.write('G1 X7.4 Z6.3\n'.encode()) def SwapDevice(): ser.write('G1 E0\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('M84\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") sleep(15) RestPlateOFF() sleep(3) RestPlateON() ser.write('G1 E0\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('G1 Y0\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('G1 Y32 F777\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('G1 E175\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('M84\n'.encode()) sleep(20) RestPlateOFF() print("Finished swapping devices") def SystemInitialize(): RestPlateON() ser.write('M302 P1\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('G28 X0 Z0\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('G1 Y33 F777\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('G1 E175\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") ser.write('M84\n'.encode()) SerialBufferIsClear = False while(SerialBufferIsClear != True): MarlinMessage = ser.readline().decode() print(MarlinMessage) if("ok" in MarlinMessage): SerialBufferIsClear = True print("got the ok") sleep(10) print("Finished Initialization") def GetRawInput(): var = input("Please enter a command:") print("entered: "+str(var)) if(var=="A"): SwitchMUXtoA() return True if(var=="B"): SwitchMUXtoB() return True if(var=="C"): SwitchMUXtoC() return True if(var=="D"): SwitchMUXtoD() return True if(var=="ActON"): RestPlateON() return True if(var=="ActOFF"): print("thing worked") RestPlateOFF() return True else: command = str(var)+"\n" ser.write(command.encode()) return False SystemInitialize() while True: print("pixel A") SwitchToPixelA() sleep(10) print("pixel B") SwitchToPixelB() sleep(10) print("pixel C") SwitchToPixelC() sleep(10) print("pixel D") SwitchToPixelD() sleep(10) SwapDevice()
true
true
f7083ce629eebc31d6d0fe4a790c5a32b6ec69ca
1,020
py
Python
imagepy/menus/File/Import/roi_plg.py
adines/imagepy
d7cdf3273d25e06046626ef2ef9200b1846ea49a
[ "BSD-4-Clause" ]
1
2019-02-22T03:09:24.000Z
2019-02-22T03:09:24.000Z
imagepy/menus/File/Import/roi_plg.py
adines/imagepy
d7cdf3273d25e06046626ef2ef9200b1846ea49a
[ "BSD-4-Clause" ]
null
null
null
imagepy/menus/File/Import/roi_plg.py
adines/imagepy
d7cdf3273d25e06046626ef2ef9200b1846ea49a
[ "BSD-4-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on 12/21/2018 @author: BioinfoTongLI """ import numpy as np import read_roi from imagepy.core.engine import Free from imagepy import IPy from skimage.draw import polygon class Plugin(Free): """load_ij_roi: use read_roi and th pass to shapely objects""" title = 'Import Rois from IJ' para = {'path': '', 'name': 'Undefined', 'width': 512, 'height': 512} view = [(str, 'name', 'name', ''), (int, 'width', (1, 3000), 0, 'width', 'pix'), (int, 'height', (1, 3000), 0, 'height', 'pix')] def load(self): filt = '|'.join(['%s files (*.%s)|*.%s' % (i.upper(), i, i) for i in ["zip"]]) return IPy.getpath(self.title, filt, 'open', self.para) def run(self, para=None): ls = read_roi.read_roi_zip(para['path']) img = np.zeros((para['height'], para['width']), dtype=np.int32) for i in ls: img[polygon(ls[i]['y'], ls[i]['x'], img.shape)] = int(i) IPy.show_img([img], para['name'])
31.875
86
0.558824
import numpy as np import read_roi from imagepy.core.engine import Free from imagepy import IPy from skimage.draw import polygon class Plugin(Free): title = 'Import Rois from IJ' para = {'path': '', 'name': 'Undefined', 'width': 512, 'height': 512} view = [(str, 'name', 'name', ''), (int, 'width', (1, 3000), 0, 'width', 'pix'), (int, 'height', (1, 3000), 0, 'height', 'pix')] def load(self): filt = '|'.join(['%s files (*.%s)|*.%s' % (i.upper(), i, i) for i in ["zip"]]) return IPy.getpath(self.title, filt, 'open', self.para) def run(self, para=None): ls = read_roi.read_roi_zip(para['path']) img = np.zeros((para['height'], para['width']), dtype=np.int32) for i in ls: img[polygon(ls[i]['y'], ls[i]['x'], img.shape)] = int(i) IPy.show_img([img], para['name'])
true
true
f7083d021a8246eb35e98c07308c8cc6808d8bec
9,979
py
Python
ertk/stats.py
bagustris/emotion
5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36
[ "MIT" ]
3
2020-11-03T14:54:22.000Z
2021-04-12T12:23:10.000Z
ertk/stats.py
bagustris/emotion
5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36
[ "MIT" ]
null
null
null
ertk/stats.py
bagustris/emotion
5bd83d3ca8a6eb930f449b7a990fefd75d0c7d36
[ "MIT" ]
2
2020-12-03T06:21:59.000Z
2021-01-16T04:47:12.000Z
from functools import partial from typing import Callable, List, Union import numpy as np import pandas as pd from scipy.stats import friedmanchisquare, rankdata from sklearn.metrics.pairwise import pairwise_distances from statsmodels.stats.libqsturng import qsturng Matrix = List[List[float]] def friedman_nemenyi(table: pd.DataFrame, alpha: float = 0.05): """Runs Friedman test on given table and optionally graphs a critical-difference diagram. Args: ----- table: DataFrame The data table, with subjects as rows and independent variable (condition) as columns. alpha: float Significance level, must be in the range (0, 1), default is 0.05. Returns: -------- pval: float The p-value for the Friedman test. cd: float The critical difference from the Nemenyi post-hoc test. df: pd.DataFrame A table containing statistics relating to ranking and average values of the condiions. The dataframe has these columns: "mean_rank", "mean", "std", "median", "mad", "effect_size". """ _, pval = friedmanchisquare(*table.transpose().to_numpy()) names = list(table.columns) avgrank = rankdata(-table.to_numpy(), axis=1).mean(0) df = pd.DataFrame( { "mean_rank": avgrank, "mean": table.mean(), "std": table.std(), "median": table.median(), "mad": table.mad(), }, index=names, ).sort_values("mean_rank") topclf = df.index[0] n, k = table.shape # Effect size is calculated in terms of differences in MAD df["effect_size"] = (df.loc[topclf, "median"] - df["median"]) / np.sqrt( ((n - 1) * df.loc[topclf, "mad"] ** 2 + (n - 1) * df["mad"] ** 2) / (2 * n - 2) ) cd = qsturng(1 - alpha, k, np.inf) * np.sqrt((k * (k + 1)) / (12 * n)) return pval, cd, df def _get_dist_func(metric: Union[Callable, str], **kwargs): if callable(metric): return partial(metric, **kwargs) else: if metric != "minkowski" and "p" in kwargs: del kwargs["p"] if metric != "mahalanobis" and "VI" in kwargs: del kwargs["VI"] return partial(pairwise_distances, metric=metric, **kwargs) def bhattacharyya_dist(x: np.ndarray, y: np.ndarray, pinv: bool = False): """Calculate Bhattacharyya distance between multivariate Gaussian distributions. Args: ----- x: array-like Data matrix of shape (n1_samples, n_features) corresponding to the first group. y: array-like Data matrix of shape (n2_samples, n_features) corresponding to the second group. pinv: bool Use pseudoinverse instead of inverse. This is useful if the covariance matrices don't have full rank or otherwise aren't invertible. """ mu1 = np.expand_dims(np.mean(x, axis=0), 1) mu2 = np.expand_dims(np.mean(y, axis=0), 1) cov1 = np.cov(x, rowvar=False) cov2 = np.cov(y, rowvar=False) cov = (cov1 + cov2) / 2 _, ldet1 = np.linalg.slogdet(cov1) _, ldet2 = np.linalg.slogdet(cov2) _, ldet = np.linalg.slogdet(cov) if pinv: covinv = np.linalg.pinv(cov, hermitian=True, rcond=1e-8) else: covinv = np.linalg.inv(cov) db = (mu1 - mu2).T.dot(covinv).dot(mu1 - mu2) / 8 + ldet / 2 - ldet1 / 4 - ldet2 / 4 return db.item() def corr_ratio(x: np.ndarray, groups: Union[List[int], np.ndarray]): """Calculates correlation ratio for each feature using the given groups. Args: ----- data: numpy.ndarray Data matrix, with shape (n_instances, n_features). groups: list or numpy.ndarray 1D array of groups assignments of length n_instances. Groups should be labelled from 0 to G - 1 inclusive, where G is the number of groups. Returns: -------- eta: numpy.ndarray 1D array of correlation coefficients of length n_features. Each value is in [0, 1] except if a feature takes only one value, in which case eta will be nan. """ groups = np.array(groups) n_groups = groups.max() + 1 counts = np.bincount(groups) mean = x.mean(0) g_means = np.empty((n_groups, x.shape[1])) for g in range(n_groups): g_means[g, :] = x[groups == g].mean(0) num = np.sum(counts[:, None] * (g_means - mean) ** 2, axis=0) den = np.sum((x - mean) ** 2, axis=0) old_err = np.seterr(divide="ignore", invalid="ignore") eta2 = num / den np.seterr(**old_err) return np.sqrt(eta2) def dunn( x: np.ndarray, clusters: Union[List[int], np.ndarray], intra_method: str = "mean", inter_method: str = "cent", metric: Union[Callable, str] = "l2", p: int = 2, ): """Calculates the Dunn index for cluster "goodness". Args: ----- data: numpy.ndarray Data matrix, with shape (n_instances, n_features). clusters: list or numpy.ndarray 1D array of cluster assignments of length n_instances. Clusters should be labelled from 0 to C - 1 inclusive, where C is the number of clusters. intra_method: str Method for calculating intra-cluster distance. One of "max", "mean", "cent". inter_method: str Method for calculating inter-cluster distance. One of "cent". metric: str or callable Distance metric. If str, must be one of the sklearn or scipy distance methods. If callable, must take one positional argument and return a pairwise distance matrix. p: int Value of p for p-norm when using "lp" distance metric. Returns: -------- dunn: float The Dunn index for this data and cluster assignment. """ clusters = np.array(clusters, dtype=int) n_clusters = clusters.max() + 1 d = _get_dist_func(metric, p=p) intra = np.zeros(n_clusters) for c in range(n_clusters): clust_data = x[clusters == c] if intra_method == "max": idx = np.triu_indices(len(clust_data)) intra[c] = d(clust_data)[idx].max() elif intra_method == "mean": idx = np.triu_indices(len(clust_data)) intra[c] = d(clust_data)[idx].mean() elif intra_method == "cent": mean = clust_data.mean(0) intra[c] = d(clust_data, mean[None, :]).mean() inter = np.zeros((n_clusters, n_clusters)) for i in range(n_clusters): inter[i, i] = np.inf # To avoid min = 0 for j in range(i + 1, n_clusters): if inter_method == "cent": mean_i = x[clusters == i].mean(0) mean_j = x[clusters == j].mean(0) inter[i, j] = inter[j, i] = d(mean_i[None, :], mean_j[None, :]) return inter.min() / intra.max() def kappa(data: np.ndarray): """Calculates Fleiss' kappa for inter-rater agreement. Args: ----- data: numpy.ndarray The data matrix, in the form (raters x units). """ cats = np.unique(data) n, N = data.shape counts = np.stack([np.sum(data == c, 0) for c in cats], 1) p_j = np.sum(counts, axis=0) / (N * n) assert np.isclose(np.sum(p_j), 1) Pe = np.sum(p_j ** 2) P = (np.sum(counts ** 2, 1) - n) / (n * (n - 1)) Pbar = np.mean(P) return (Pbar - Pe) / (1 - Pe) class Deltas: @staticmethod def nominal(c: int, k: int): return float(c != k) @staticmethod def interval(c: float, k: float): return (c - k) ** 2 def alpha( data: np.ndarray, delta: Union[Callable[[int, int], float], List[List[float]], str] = "nominal", ): """Calculates Krippendorff's alpha coefficient [1, sec. 11.3] for inter-rater agreement. [1] K. Krippendorff, Content analysis: An introduction to its methodology. Sage publications, 2004. Args: ----- data: numpy.ndarray The data matrix, shape (n_raters, n_units). Each cell (i, j) represents the value assigned to unit j by rater i, or 0 representing no response. delta: callable, 2-D array-like or str The delta metric. Default is the nominal metric, which takes the value 1 in case c != k and 0 otherwise. """ # The following implementation was based off the Wikipedia article: # https://en.wikipedia.org/wiki/Krippendorff%27s_alpha # Response categories go from 1 to R, 0 represents no response R = np.max(data) counts = np.apply_along_axis(lambda x: np.bincount(x, minlength=R + 1), 0, data).T count_sum = np.sum(counts, 0) assert len(count_sum) == R + 1 def ordinal(c: int, k: int): if k < c: c, k = k, c s = ( sum(count_sum[g] for g in range(c, k + 1)) - (count_sum[c] + count_sum[k]) / 2 ) return s ** 2 if isinstance(delta, str): delta = { "nominal": Deltas.nominal, "ordinal": ordinal, "interval": Deltas.interval, }[delta] if not callable(delta): try: delta[0][0] except IndexError: raise TypeError("delta must be either str, callable or 2D array.") def _delta(c, k): new_delta = delta return new_delta[c][k] delta = _delta m_u = np.sum(counts[:, 1:], 1) valid = m_u >= 2 counts = counts[valid] m_u = m_u[valid] data = data[:, valid] n = np.sum(m_u) n_cku = np.matmul(counts[:, :, None], counts[:, None, :]) for i in range(R + 1): n_cku[:, i, i] = counts[:, i] * (counts[:, i] - 1) D_o = 0 for c in range(1, R + 1): for k in range(1, R + 1): D_o += delta(c, k) * n_cku[:, c, k] D_o = np.sum(D_o / (n * (m_u - 1))) D_e = 0 P_ck = np.bincount(data.flat) for c in range(1, R + 1): for k in range(1, R + 1): D_e += delta(c, k) * P_ck[c] * P_ck[k] D_e /= n * (n - 1) return 1 - D_o / D_e
30.990683
88
0.585029
from functools import partial from typing import Callable, List, Union import numpy as np import pandas as pd from scipy.stats import friedmanchisquare, rankdata from sklearn.metrics.pairwise import pairwise_distances from statsmodels.stats.libqsturng import qsturng Matrix = List[List[float]] def friedman_nemenyi(table: pd.DataFrame, alpha: float = 0.05): _, pval = friedmanchisquare(*table.transpose().to_numpy()) names = list(table.columns) avgrank = rankdata(-table.to_numpy(), axis=1).mean(0) df = pd.DataFrame( { "mean_rank": avgrank, "mean": table.mean(), "std": table.std(), "median": table.median(), "mad": table.mad(), }, index=names, ).sort_values("mean_rank") topclf = df.index[0] n, k = table.shape df["effect_size"] = (df.loc[topclf, "median"] - df["median"]) / np.sqrt( ((n - 1) * df.loc[topclf, "mad"] ** 2 + (n - 1) * df["mad"] ** 2) / (2 * n - 2) ) cd = qsturng(1 - alpha, k, np.inf) * np.sqrt((k * (k + 1)) / (12 * n)) return pval, cd, df def _get_dist_func(metric: Union[Callable, str], **kwargs): if callable(metric): return partial(metric, **kwargs) else: if metric != "minkowski" and "p" in kwargs: del kwargs["p"] if metric != "mahalanobis" and "VI" in kwargs: del kwargs["VI"] return partial(pairwise_distances, metric=metric, **kwargs) def bhattacharyya_dist(x: np.ndarray, y: np.ndarray, pinv: bool = False): mu1 = np.expand_dims(np.mean(x, axis=0), 1) mu2 = np.expand_dims(np.mean(y, axis=0), 1) cov1 = np.cov(x, rowvar=False) cov2 = np.cov(y, rowvar=False) cov = (cov1 + cov2) / 2 _, ldet1 = np.linalg.slogdet(cov1) _, ldet2 = np.linalg.slogdet(cov2) _, ldet = np.linalg.slogdet(cov) if pinv: covinv = np.linalg.pinv(cov, hermitian=True, rcond=1e-8) else: covinv = np.linalg.inv(cov) db = (mu1 - mu2).T.dot(covinv).dot(mu1 - mu2) / 8 + ldet / 2 - ldet1 / 4 - ldet2 / 4 return db.item() def corr_ratio(x: np.ndarray, groups: Union[List[int], np.ndarray]): groups = np.array(groups) n_groups = groups.max() + 1 counts = np.bincount(groups) mean = x.mean(0) g_means = np.empty((n_groups, x.shape[1])) for g in range(n_groups): g_means[g, :] = x[groups == g].mean(0) num = np.sum(counts[:, None] * (g_means - mean) ** 2, axis=0) den = np.sum((x - mean) ** 2, axis=0) old_err = np.seterr(divide="ignore", invalid="ignore") eta2 = num / den np.seterr(**old_err) return np.sqrt(eta2) def dunn( x: np.ndarray, clusters: Union[List[int], np.ndarray], intra_method: str = "mean", inter_method: str = "cent", metric: Union[Callable, str] = "l2", p: int = 2, ): clusters = np.array(clusters, dtype=int) n_clusters = clusters.max() + 1 d = _get_dist_func(metric, p=p) intra = np.zeros(n_clusters) for c in range(n_clusters): clust_data = x[clusters == c] if intra_method == "max": idx = np.triu_indices(len(clust_data)) intra[c] = d(clust_data)[idx].max() elif intra_method == "mean": idx = np.triu_indices(len(clust_data)) intra[c] = d(clust_data)[idx].mean() elif intra_method == "cent": mean = clust_data.mean(0) intra[c] = d(clust_data, mean[None, :]).mean() inter = np.zeros((n_clusters, n_clusters)) for i in range(n_clusters): inter[i, i] = np.inf for j in range(i + 1, n_clusters): if inter_method == "cent": mean_i = x[clusters == i].mean(0) mean_j = x[clusters == j].mean(0) inter[i, j] = inter[j, i] = d(mean_i[None, :], mean_j[None, :]) return inter.min() / intra.max() def kappa(data: np.ndarray): cats = np.unique(data) n, N = data.shape counts = np.stack([np.sum(data == c, 0) for c in cats], 1) p_j = np.sum(counts, axis=0) / (N * n) assert np.isclose(np.sum(p_j), 1) Pe = np.sum(p_j ** 2) P = (np.sum(counts ** 2, 1) - n) / (n * (n - 1)) Pbar = np.mean(P) return (Pbar - Pe) / (1 - Pe) class Deltas: @staticmethod def nominal(c: int, k: int): return float(c != k) @staticmethod def interval(c: float, k: float): return (c - k) ** 2 def alpha( data: np.ndarray, delta: Union[Callable[[int, int], float], List[List[float]], str] = "nominal", ): R = np.max(data) counts = np.apply_along_axis(lambda x: np.bincount(x, minlength=R + 1), 0, data).T count_sum = np.sum(counts, 0) assert len(count_sum) == R + 1 def ordinal(c: int, k: int): if k < c: c, k = k, c s = ( sum(count_sum[g] for g in range(c, k + 1)) - (count_sum[c] + count_sum[k]) / 2 ) return s ** 2 if isinstance(delta, str): delta = { "nominal": Deltas.nominal, "ordinal": ordinal, "interval": Deltas.interval, }[delta] if not callable(delta): try: delta[0][0] except IndexError: raise TypeError("delta must be either str, callable or 2D array.") def _delta(c, k): new_delta = delta return new_delta[c][k] delta = _delta m_u = np.sum(counts[:, 1:], 1) valid = m_u >= 2 counts = counts[valid] m_u = m_u[valid] data = data[:, valid] n = np.sum(m_u) n_cku = np.matmul(counts[:, :, None], counts[:, None, :]) for i in range(R + 1): n_cku[:, i, i] = counts[:, i] * (counts[:, i] - 1) D_o = 0 for c in range(1, R + 1): for k in range(1, R + 1): D_o += delta(c, k) * n_cku[:, c, k] D_o = np.sum(D_o / (n * (m_u - 1))) D_e = 0 P_ck = np.bincount(data.flat) for c in range(1, R + 1): for k in range(1, R + 1): D_e += delta(c, k) * P_ck[c] * P_ck[k] D_e /= n * (n - 1) return 1 - D_o / D_e
true
true
f7083e7be8c43bd2e80217c44c86601be9add0cd
12,053
py
Python
rdkit/Chem/Pharm2D/Utils.py
docking-org/rdk
6eb710254f027b348a8e3089e6a92c3d40de0949
[ "PostgreSQL" ]
1
2019-01-23T06:02:24.000Z
2019-01-23T06:02:24.000Z
rdkit/Chem/Pharm2D/Utils.py
Mike575/rdkit
373a89021e478f878c6011a201e3fb8f4a122093
[ "PostgreSQL" ]
null
null
null
rdkit/Chem/Pharm2D/Utils.py
Mike575/rdkit
373a89021e478f878c6011a201e3fb8f4a122093
[ "PostgreSQL" ]
1
2022-03-30T03:22:10.000Z
2022-03-30T03:22:10.000Z
# # Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC # # @@ All Rights Reserved @@ # This file is part of the RDKit. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the RDKit source tree. # """ utility functionality for the 2D pharmacophores code See Docs/Chem/Pharm2D.triangles.jpg for an illustration of the way pharmacophores are broken into triangles and labelled. See Docs/Chem/Pharm2D.signatures.jpg for an illustration of bit numbering """ from __future__ import print_function, division import itertools # # number of points in a scaffold -> sequence of distances (p1,p2) in # the scaffold # nPointDistDict = { 2: ((0, 1), ), 3: ((0, 1), (0, 2), (1, 2)), 4: ((0, 1), (0, 2), (0, 3), (1, 2), (2, 3)), 5: ((0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (2, 3), (3, 4)), 6: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (1, 2), (2, 3), (3, 4), (4, 5)), 7: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)), 8: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)), 9: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8)), 10: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9)), } # # number of distances in a scaffold -> number of points in the scaffold # nDistPointDict = { 1: 2, 3: 3, 5: 4, 7: 5, 9: 6, 11: 7, 13: 8, 15: 9, 17: 10, } _trianglesInPharmacophore = {} def GetTriangles(nPts): """ returns a tuple with the distance indices for triangles composing an nPts-pharmacophore """ global _trianglesInPharmacophore if nPts < 3: return [] res = _trianglesInPharmacophore.get(nPts, []) if not res: idx1, idx2, idx3 = (0, 1, nPts - 1) while idx1 < nPts - 2: res.append((idx1, idx2, idx3)) idx1 += 1 idx2 += 1 idx3 += 1 res = tuple(res) _trianglesInPharmacophore[nPts] = res return res def _fact(x): if x <= 1: return 1 accum = 1 for i in range(x): accum *= i + 1 return accum def BinsTriangleInequality(d1, d2, d3): """ checks the triangle inequality for combinations of distance bins. the general triangle inequality is: d1 + d2 >= d3 the conservative binned form of this is: d1(upper) + d2(upper) >= d3(lower) """ if d1[1] + d2[1] < d3[0]: return False if d2[1] + d3[1] < d1[0]: return False if d3[1] + d1[1] < d2[0]: return False return True def ScaffoldPasses(combo, bins=None): """ checks the scaffold passed in to see if all contributing triangles can satisfy the triangle inequality the scaffold itself (encoded in combo) is a list of binned distances """ # this is the number of points in the pharmacophore nPts = nDistPointDict[len(combo)] tris = GetTriangles(nPts) for tri in tris: ds = [bins[combo[x]] for x in tri] if not BinsTriangleInequality(ds[0], ds[1], ds[2]): return False return True _numCombDict = {} def NumCombinations(nItems, nSlots): """ returns the number of ways to fit nItems into nSlots We assume that (x,y) and (y,x) are equivalent, and (x,x) is allowed. General formula is, for N items and S slots: res = (N+S-1)! / ( (N-1)! * S! ) """ global _numCombDict res = _numCombDict.get((nItems, nSlots), -1) if res == -1: res = _fact(nItems + nSlots - 1) // (_fact(nItems - 1) * _fact(nSlots)) _numCombDict[(nItems, nSlots)] = res return res _verbose = 0 _countCache = {} def CountUpTo(nItems, nSlots, vs, idx=0, startAt=0): """ Figures out where a given combination of indices would occur in the combinatorial explosion generated by _GetIndexCombinations_ **Arguments** - nItems: the number of items to distribute - nSlots: the number of slots in which to distribute them - vs: a sequence containing the values to find - idx: used in the recursion - startAt: used in the recursion **Returns** an integer """ global _countCache if _verbose: print(' ' * idx, 'CountUpTo(%d)' % idx, vs[idx], startAt) if idx == 0 and (nItems, nSlots, tuple(vs)) in _countCache: return _countCache[(nItems, nSlots, tuple(vs))] elif idx >= nSlots: accum = 0 elif idx == nSlots - 1: accum = vs[idx] - startAt else: accum = 0 # get the digit at idx correct for i in range(startAt, vs[idx]): nLevsUnder = nSlots - idx - 1 nValsOver = nItems - i if _verbose: print(' ' * idx, ' ', i, nValsOver, nLevsUnder, NumCombinations(nValsOver, nLevsUnder)) accum += NumCombinations(nValsOver, nLevsUnder) accum += CountUpTo(nItems, nSlots, vs, idx + 1, vs[idx]) if _verbose: print(' ' * idx, '>', accum) if idx == 0: _countCache[(nItems, nSlots, tuple(vs))] = accum return accum _indexCombinations = {} def GetIndexCombinations(nItems, nSlots, slot=0, lastItemVal=0): """ Generates all combinations of nItems in nSlots without including duplicates **Arguments** - nItems: the number of items to distribute - nSlots: the number of slots in which to distribute them - slot: used in recursion - lastItemVal: used in recursion **Returns** a list of lists """ global _indexCombinations if not slot and (nItems, nSlots) in _indexCombinations: res = _indexCombinations[(nItems, nSlots)] elif slot >= nSlots: res = [] elif slot == nSlots - 1: res = [[x] for x in range(lastItemVal, nItems)] else: res = [] for x in range(lastItemVal, nItems): tmp = GetIndexCombinations(nItems, nSlots, slot + 1, x) for entry in tmp: res.append([x] + entry) if not slot: _indexCombinations[(nItems, nSlots)] = res return res def GetAllCombinations(choices, noDups=1, which=0): """ Does the combinatorial explosion of the possible combinations of the elements of _choices_. **Arguments** - choices: sequence of sequences with the elements to be enumerated - noDups: (optional) if this is nonzero, results with duplicates, e.g. (1,1,0), will not be generated - which: used in recursion **Returns** a list of lists >>> GetAllCombinations([(0,),(1,),(2,)]) [[0, 1, 2]] >>> GetAllCombinations([(0,),(1,3),(2,)]) [[0, 1, 2], [0, 3, 2]] >>> GetAllCombinations([(0,1),(1,3),(2,)]) [[0, 1, 2], [0, 3, 2], [1, 3, 2]] """ if which >= len(choices): res = [] elif which == len(choices) - 1: res = [[x] for x in choices[which]] else: res = [] tmp = GetAllCombinations(choices, noDups=noDups, which=which + 1) for thing in choices[which]: for other in tmp: if not noDups or thing not in other: res.append([thing] + other) return res def GetUniqueCombinations(choices, classes, which=0): """ Does the combinatorial explosion of the possible combinations of the elements of _choices_. """ # print(choices, classes) assert len(choices) == len(classes) if which >= len(choices): res = [] elif which == len(choices) - 1: res = [[(classes[which], x)] for x in choices[which]] else: res = [] tmp = GetUniqueCombinations(choices, classes, which=which + 1) for thing in choices[which]: for other in tmp: idxThere = 0 for x in other: if x[1] == thing: idxThere += 1 if not idxThere: newL = [(classes[which], thing)] + other newL.sort() if newL not in res: res.append(newL) return res def GetUniqueCombinations_new(choices, classes, which=0): """ Does the combinatorial explosion of the possible combinations of the elements of _choices_. """ # print(choices, classes) assert len(choices) == len(classes) combos = set() for choice in itertools.product(*choices): # If a choice occurs in more than one of the fields, we ignore this case if len(set(choice)) != len(choice): continue combos.add(tuple(sorted((cls, ch) for cls, ch in zip(classes, choice)))) return [list(combo) for combo in sorted(combos)] def UniquifyCombinations(combos): """ uniquifies the combinations in the argument **Arguments**: - combos: a sequence of sequences **Returns** - a list of tuples containing the unique combos """ resD = {} for combo in combos: k = combo[:] k.sort() resD[tuple(k)] = tuple(combo) return list(resD.values()) def GetPossibleScaffolds(nPts, bins, useTriangleInequality=True): """ gets all realizable scaffolds (passing the triangle inequality) with the given number of points and returns them as a list of tuples """ if nPts < 2: res = 0 elif nPts == 2: res = [(x, ) for x in range(len(bins))] else: nDists = len(nPointDistDict[nPts]) combos = GetAllCombinations([range(len(bins))] * nDists, noDups=0) res = [] for combo in combos: if not useTriangleInequality or ScaffoldPasses(combo, bins): res.append(tuple(combo)) return res def OrderTriangle(featIndices, dists): """ put the distances for a triangle into canonical order It's easy if the features are all different: >>> OrderTriangle([0,2,4],[1,2,3]) ([0, 2, 4], [1, 2, 3]) It's trickiest if they are all the same: >>> OrderTriangle([0,0,0],[1,2,3]) ([0, 0, 0], [3, 2, 1]) >>> OrderTriangle([0,0,0],[2,1,3]) ([0, 0, 0], [3, 2, 1]) >>> OrderTriangle([0,0,0],[1,3,2]) ([0, 0, 0], [3, 2, 1]) >>> OrderTriangle([0,0,0],[3,1,2]) ([0, 0, 0], [3, 2, 1]) >>> OrderTriangle([0,0,0],[3,2,1]) ([0, 0, 0], [3, 2, 1]) >>> OrderTriangle([0,0,1],[3,2,1]) ([0, 0, 1], [3, 2, 1]) >>> OrderTriangle([0,0,1],[1,3,2]) ([0, 0, 1], [1, 3, 2]) >>> OrderTriangle([0,0,1],[1,2,3]) ([0, 0, 1], [1, 3, 2]) >>> OrderTriangle([0,0,1],[1,3,2]) ([0, 0, 1], [1, 3, 2]) """ if len(featIndices) != 3: raise ValueError('bad indices') if len(dists) != 3: raise ValueError('bad dists') fs = set(featIndices) if len(fs) == 3: return featIndices, dists dSums = [0] * 3 dSums[0] = dists[0] + dists[1] dSums[1] = dists[0] + dists[2] dSums[2] = dists[1] + dists[2] mD = max(dSums) if len(fs) == 1: if dSums[0] == mD: if dists[0] > dists[1]: ireorder = (0, 1, 2) dreorder = (0, 1, 2) else: ireorder = (0, 2, 1) dreorder = (1, 0, 2) elif dSums[1] == mD: if dists[0] > dists[2]: ireorder = (1, 0, 2) dreorder = (0, 2, 1) else: ireorder = (1, 2, 0) dreorder = (2, 0, 1) else: if dists[1] > dists[2]: ireorder = (2, 0, 1) dreorder = (1, 2, 0) else: ireorder = (2, 1, 0) dreorder = (2, 1, 0) else: # two classes if featIndices[0] == featIndices[1]: if dists[1] > dists[2]: ireorder = (0, 1, 2) dreorder = (0, 1, 2) else: ireorder = (1, 0, 2) dreorder = (0, 2, 1) elif featIndices[0] == featIndices[2]: if dists[0] > dists[2]: ireorder = (0, 1, 2) dreorder = (0, 1, 2) else: ireorder = (2, 1, 0) dreorder = (2, 1, 0) else: # featIndices[1]==featIndices[2]: if dists[0] > dists[1]: ireorder = (0, 1, 2) dreorder = (0, 1, 2) else: ireorder = (0, 2, 1) dreorder = (1, 0, 2) dists = [dists[x] for x in dreorder] featIndices = [featIndices[x] for x in ireorder] return featIndices, dists # ------------------------------------ # # doctest boilerplate # def _runDoctests(verbose=None): # pragma: nocover import sys import doctest failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose) sys.exit(failed) if __name__ == '__main__': # pragma: nocover _runDoctests()
25.590234
96
0.578611
from __future__ import print_function, division import itertools nPointDistDict = { 2: ((0, 1), ), 3: ((0, 1), (0, 2), (1, 2)), 4: ((0, 1), (0, 2), (0, 3), (1, 2), (2, 3)), 5: ((0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (2, 3), (3, 4)), 6: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (1, 2), (2, 3), (3, 4), (4, 5)), 7: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)), 8: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)), 9: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8)), 10: ((0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9)), } nDistPointDict = { 1: 2, 3: 3, 5: 4, 7: 5, 9: 6, 11: 7, 13: 8, 15: 9, 17: 10, } _trianglesInPharmacophore = {} def GetTriangles(nPts): global _trianglesInPharmacophore if nPts < 3: return [] res = _trianglesInPharmacophore.get(nPts, []) if not res: idx1, idx2, idx3 = (0, 1, nPts - 1) while idx1 < nPts - 2: res.append((idx1, idx2, idx3)) idx1 += 1 idx2 += 1 idx3 += 1 res = tuple(res) _trianglesInPharmacophore[nPts] = res return res def _fact(x): if x <= 1: return 1 accum = 1 for i in range(x): accum *= i + 1 return accum def BinsTriangleInequality(d1, d2, d3): if d1[1] + d2[1] < d3[0]: return False if d2[1] + d3[1] < d1[0]: return False if d3[1] + d1[1] < d2[0]: return False return True def ScaffoldPasses(combo, bins=None): nPts = nDistPointDict[len(combo)] tris = GetTriangles(nPts) for tri in tris: ds = [bins[combo[x]] for x in tri] if not BinsTriangleInequality(ds[0], ds[1], ds[2]): return False return True _numCombDict = {} def NumCombinations(nItems, nSlots): global _numCombDict res = _numCombDict.get((nItems, nSlots), -1) if res == -1: res = _fact(nItems + nSlots - 1) // (_fact(nItems - 1) * _fact(nSlots)) _numCombDict[(nItems, nSlots)] = res return res _verbose = 0 _countCache = {} def CountUpTo(nItems, nSlots, vs, idx=0, startAt=0): global _countCache if _verbose: print(' ' * idx, 'CountUpTo(%d)' % idx, vs[idx], startAt) if idx == 0 and (nItems, nSlots, tuple(vs)) in _countCache: return _countCache[(nItems, nSlots, tuple(vs))] elif idx >= nSlots: accum = 0 elif idx == nSlots - 1: accum = vs[idx] - startAt else: accum = 0 for i in range(startAt, vs[idx]): nLevsUnder = nSlots - idx - 1 nValsOver = nItems - i if _verbose: print(' ' * idx, ' ', i, nValsOver, nLevsUnder, NumCombinations(nValsOver, nLevsUnder)) accum += NumCombinations(nValsOver, nLevsUnder) accum += CountUpTo(nItems, nSlots, vs, idx + 1, vs[idx]) if _verbose: print(' ' * idx, '>', accum) if idx == 0: _countCache[(nItems, nSlots, tuple(vs))] = accum return accum _indexCombinations = {} def GetIndexCombinations(nItems, nSlots, slot=0, lastItemVal=0): global _indexCombinations if not slot and (nItems, nSlots) in _indexCombinations: res = _indexCombinations[(nItems, nSlots)] elif slot >= nSlots: res = [] elif slot == nSlots - 1: res = [[x] for x in range(lastItemVal, nItems)] else: res = [] for x in range(lastItemVal, nItems): tmp = GetIndexCombinations(nItems, nSlots, slot + 1, x) for entry in tmp: res.append([x] + entry) if not slot: _indexCombinations[(nItems, nSlots)] = res return res def GetAllCombinations(choices, noDups=1, which=0): if which >= len(choices): res = [] elif which == len(choices) - 1: res = [[x] for x in choices[which]] else: res = [] tmp = GetAllCombinations(choices, noDups=noDups, which=which + 1) for thing in choices[which]: for other in tmp: if not noDups or thing not in other: res.append([thing] + other) return res def GetUniqueCombinations(choices, classes, which=0): assert len(choices) == len(classes) if which >= len(choices): res = [] elif which == len(choices) - 1: res = [[(classes[which], x)] for x in choices[which]] else: res = [] tmp = GetUniqueCombinations(choices, classes, which=which + 1) for thing in choices[which]: for other in tmp: idxThere = 0 for x in other: if x[1] == thing: idxThere += 1 if not idxThere: newL = [(classes[which], thing)] + other newL.sort() if newL not in res: res.append(newL) return res def GetUniqueCombinations_new(choices, classes, which=0): assert len(choices) == len(classes) combos = set() for choice in itertools.product(*choices): if len(set(choice)) != len(choice): continue combos.add(tuple(sorted((cls, ch) for cls, ch in zip(classes, choice)))) return [list(combo) for combo in sorted(combos)] def UniquifyCombinations(combos): resD = {} for combo in combos: k = combo[:] k.sort() resD[tuple(k)] = tuple(combo) return list(resD.values()) def GetPossibleScaffolds(nPts, bins, useTriangleInequality=True): if nPts < 2: res = 0 elif nPts == 2: res = [(x, ) for x in range(len(bins))] else: nDists = len(nPointDistDict[nPts]) combos = GetAllCombinations([range(len(bins))] * nDists, noDups=0) res = [] for combo in combos: if not useTriangleInequality or ScaffoldPasses(combo, bins): res.append(tuple(combo)) return res def OrderTriangle(featIndices, dists): if len(featIndices) != 3: raise ValueError('bad indices') if len(dists) != 3: raise ValueError('bad dists') fs = set(featIndices) if len(fs) == 3: return featIndices, dists dSums = [0] * 3 dSums[0] = dists[0] + dists[1] dSums[1] = dists[0] + dists[2] dSums[2] = dists[1] + dists[2] mD = max(dSums) if len(fs) == 1: if dSums[0] == mD: if dists[0] > dists[1]: ireorder = (0, 1, 2) dreorder = (0, 1, 2) else: ireorder = (0, 2, 1) dreorder = (1, 0, 2) elif dSums[1] == mD: if dists[0] > dists[2]: ireorder = (1, 0, 2) dreorder = (0, 2, 1) else: ireorder = (1, 2, 0) dreorder = (2, 0, 1) else: if dists[1] > dists[2]: ireorder = (2, 0, 1) dreorder = (1, 2, 0) else: ireorder = (2, 1, 0) dreorder = (2, 1, 0) else: if featIndices[0] == featIndices[1]: if dists[1] > dists[2]: ireorder = (0, 1, 2) dreorder = (0, 1, 2) else: ireorder = (1, 0, 2) dreorder = (0, 2, 1) elif featIndices[0] == featIndices[2]: if dists[0] > dists[2]: ireorder = (0, 1, 2) dreorder = (0, 1, 2) else: ireorder = (2, 1, 0) dreorder = (2, 1, 0) else: if dists[0] > dists[1]: ireorder = (0, 1, 2) dreorder = (0, 1, 2) else: ireorder = (0, 2, 1) dreorder = (1, 0, 2) dists = [dists[x] for x in dreorder] featIndices = [featIndices[x] for x in ireorder] return featIndices, dists def _runDoctests(verbose=None): import sys import doctest failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose) sys.exit(failed) if __name__ == '__main__': _runDoctests()
true
true
f7083e93a60d9d63b08a9b5da38bc8c394ece1f9
3,342
py
Python
tickvaultpythonapi/parsing/predicate.py
TickSmith/tickvault-python-api
712a8a27bbc1815414009a60c92344471f36b81f
[ "MIT" ]
4
2018-02-12T14:05:46.000Z
2019-08-14T15:48:16.000Z
tickvaultpythonapi/parsing/predicate.py
TickSmith/tickvault-python-api
712a8a27bbc1815414009a60c92344471f36b81f
[ "MIT" ]
null
null
null
tickvaultpythonapi/parsing/predicate.py
TickSmith/tickvault-python-api
712a8a27bbc1815414009a60c92344471f36b81f
[ "MIT" ]
1
2020-01-14T11:12:02.000Z
2020-01-14T11:12:02.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2017 TickSmith Corp. # # Licensed under the MIT License # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ''' Provides reusable query structure ''' import sys from tickvaultpythonapi.parsing.operation import Operation, BaseOperation class Predicate(object): key = "" operation = "" value = "" opClass = Operation() # Defaults to operation, which allows no operations def __init__(self, key, op, val): """ Assign key, operation and value """ self.key = key self.operation = self.get_valid_op(op) self.value = val def get_valid_op(self, op): """ Uses opClass (subtypes of Operation) to determine whether the given operation is allowed. If it is, it returns the string that will be appended to the key name (ex. '>' results in 'Gte', so that the query will be 'keyGte') """ try: return self.opClass.get_str(op) except Exception as e: sys.exit(e) def get_as_kv_pair(self): """ Get as key-value pair (ex. key = 'price', operation = '!=', value = '50', result= {"priceNeq" : "50"}) """ return {self.key + self.operation : str(self.value)} def get_as_tuple(self): """ Get as tuple (ex. key = 'price', operation = '!=', value = '50', result= ("priceNeq","50") """ return (self.key + self.operation, str(self.value)) def __str__(self): """ @Overrride of __str__() """ return self.key + self.operation + "=" + str(self.value) class BasePredicate(Predicate): # Replace opClass with BaseOperation opClass = BaseOperation() # Getter for opClass @classmethod def get_op_class(self): return self.opClass if __name__ == '__main__': params = {"param1":"value1"} bp = BasePredicate("line_type", "=", "T,E") print(bp.opClass.op_to_str) p = bp.get_as_kv_pair() params = {**params, **p} print(params) print(BasePredicate("price", ">", 7).get_as_kv_pair()) print(BasePredicate("price", ">=", "a")) print(BasePredicate("price", "<=", "7").get_as_kv_pair()) print(BasePredicate("price", "!=", "7"))
31.528302
80
0.640036
import sys from tickvaultpythonapi.parsing.operation import Operation, BaseOperation class Predicate(object): key = "" operation = "" value = "" opClass = Operation() def __init__(self, key, op, val): self.key = key self.operation = self.get_valid_op(op) self.value = val def get_valid_op(self, op): try: return self.opClass.get_str(op) except Exception as e: sys.exit(e) def get_as_kv_pair(self): return {self.key + self.operation : str(self.value)} def get_as_tuple(self): return (self.key + self.operation, str(self.value)) def __str__(self): return self.key + self.operation + "=" + str(self.value) class BasePredicate(Predicate): opClass = BaseOperation() @classmethod def get_op_class(self): return self.opClass if __name__ == '__main__': params = {"param1":"value1"} bp = BasePredicate("line_type", "=", "T,E") print(bp.opClass.op_to_str) p = bp.get_as_kv_pair() params = {**params, **p} print(params) print(BasePredicate("price", ">", 7).get_as_kv_pair()) print(BasePredicate("price", ">=", "a")) print(BasePredicate("price", "<=", "7").get_as_kv_pair()) print(BasePredicate("price", "!=", "7"))
true
true
f7083ffe9154fa0150d4edfa7cf36e104d7b7b0f
117,025
py
Python
FINE/expansionModules/robustPipelineSizing.py
sdickler/FINE
3114fd009e80a7eadacffe26bf5ff8e6a126ac61
[ "MIT" ]
34
2018-07-02T16:20:39.000Z
2022-03-30T09:46:44.000Z
FINE/expansionModules/robustPipelineSizing.py
sdickler/FINE
3114fd009e80a7eadacffe26bf5ff8e6a126ac61
[ "MIT" ]
19
2018-11-09T07:56:20.000Z
2022-02-15T10:54:21.000Z
FINE/expansionModules/robustPipelineSizing.py
sdickler/FINE
3114fd009e80a7eadacffe26bf5ff8e6a126ac61
[ "MIT" ]
42
2018-09-24T15:07:20.000Z
2022-02-25T18:41:52.000Z
""" Last edited: January 20 2020 |br| @author: FINE Developer Team (FZJ IEK-3) \n\n The approaches used are described in Robinius et. al. (2019) "Robust Optimal Discrete Arc Sizing for Tree-Shaped Potential Networks" and they are further developed with the help of Theorem 10 of Labbé et. al. (2019) "Bookings in the European gas market: characterisation of feasibility and computational complexity results" and Lemma 3.4 and 3.5 of Schewe et. al. (preprint 2020) "Computing Technical Capacities in the European Entry-Exit Gas Market is NP-Hard" """ import pandas as pd from FINE import utils import networkx as nx import math import pyomo.environ as py import warnings from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition import numpy as np import copy from scipy.optimize import fsolve import matplotlib.pyplot as plt import matplotlib as mpl import shapely as shp import time from multiprocessing import Pool import sys from functools import partial try: import geopandas as gpd except ImportError: warnings.warn('The GeoPandas python package could not be imported.') # local type und value checker def isPandasDataFrameNumber(dataframe): # check if dataframe is a pandas dataframe and if each value is float or int if not isinstance(dataframe, pd.DataFrame): raise TypeError("The input argument has to be a pandas DataFrame") else: if not dataframe.select_dtypes(exclude=["float", "int"]).empty: raise ValueError("The input pandas DataFrame has to contain only floats or ints") def isPandasSeriesPositiveNumber(pandasSeries): # Check if the input argument is a pandas series and it contains only positive numbers if not isinstance(pandasSeries, pd.Series): raise TypeError("The input argument has to be a pandas series") else: for index in pandasSeries.index: utils.isPositiveNumber(pandasSeries[index]) def isNetworkxGraph(graph): # Check if the input argument is a networkx graph if not isinstance(graph, nx.Graph): raise TypeError("The input argument has to be a networkx graph") def isDictionaryPositiveNumber(dictionary): # Check if the input argument is a dictionary with positive numbers as values if not isinstance(dictionary, dict): raise TypeError("The input argument has to be a dictionary") else: for key in dictionary.keys(): utils.isPositiveNumber(dictionary[key]) def checkLowerUpperBoundsOfDicts(lowerDict, upperDict): # check if lowerDict and upperDict have the same keys and if lowerDict[key] <= upperDict[key] holds if not (lowerDict.keys() == upperDict.keys()): raise ValueError("The input arguments have to have the same keys") else: for key in lowerDict.keys(): if lowerDict[key] > upperDict[key]: raise ValueError("The lower bound has to be the smaller than the upper bound") def isListOfStrings(strings): # check if strings is list of strings if not isinstance(strings, list): raise TypeError("The input argument has to be a list") else: for string in strings: utils.isString(string) def isBool(boolean): # check if boolean is a bool if not isinstance(boolean, bool): raise TypeError("The input argument has to be a bool") # End utils checks def getInjectionWithdrawalRates(componentName='', esM=None, operationVariablesOptimumData=None): """ Determines the injection and withdrawal rates into a network from a component in an EnergySystemModel object or based on the fluid flow data. :param componentName: name of the network component in the EnergySystemModel class (only required the fluid flows are to be obtained from the EnergySystemModel class) |br| * the default value is '' :type componentName: string :param esM: EnergySystemModel object with an optimized Pyomo instance (only needs to be specified if the operationVariablesOptimumData are to be obtained from the EnergySystemModel object) |br| * the default value is None :type esM: FINE EnergySystemModel :param operationVariablesOptimumData: the injection and withdrawal rates into and out of the network can either be obtained from a DataFrame with the original fluid flows or an EnergySystemModel with an optimized Pyomo instance. In the former case, the argument is a pandas DataFrame with two index columns (specifying the names of the start and end node of a pipeline) and one index row (for the time steps). The data in the DataFrame denotes the flow coming from the start node and going to the end node [e.g. in kWh or Nm^3]. Example: 0 1 ... 8759 node1 node2 0.1 0.0 ... 0.9 node2 node3 0.0 0.3 ... 0.4 node2 node1 0.9 0.9 ... 0.2 node3 node2 1.1 0.2 ... 0.9 |br| * the default value is None :type operationVariablesOptimumData: pandas DataFrame with non-negative floats :return: injection and withdrawal rates (withdrawals from the network are positive while injections are negative) :rtype: pandas DataFrame """ #TODO check type and value correctness # Get the original optimal operation variables if operationVariablesOptimumData is not None: op = operationVariablesOptimumData else: op = esM.componentModelingDict[esM.componentNames[componentName]]. \ getOptimalValues('operationVariablesOptimum')['values'].loc[componentName] # Get a map of the component's network if esM is None: mapN = {} for conn in operationVariablesOptimumData.index: loc, loc_ = conn mapN.setdefault(loc, {}).update({loc_: loc + '_' + loc_}) mapN.setdefault(loc_, {}).update({loc: loc_ + '_' + loc}) else: mapN = esM.getComponent(componentName)._mapL # Initialize list for nodal injection and withdrawal time series data injectionWithdrawalRates, nodeIx = [], [] # Reset connections set (not all indices might be in the operationVariablesOptimumData data) connections = set() # For each node loc, compute the injection and withdrawal rates for loc, locConn in mapN.items(): # As in a few cases zero columns/ rows are dropped from data frames, two lists # of eligible connection indices are created. ixIn, ixOut = [], [] for loc_, conn in locConn.items(): if (loc, loc_) in op.index: ixOut.append((loc, loc_)), connections.add((loc, loc_)) if (loc_, loc) in op.index: ixIn.append((loc_, loc)), connections.add((loc_, loc)) # If either list has at least one entry, the incoming and outgoing flows are selected # from the original optimal flow variables and aggregated. The resulting commodity # withdrawals from the network are positive while injections are negative. if (len(ixIn) != 0) | (len(ixOut) != 0): injectionWithdrawalRates.append(op.loc[ixIn].sum() - op.loc[ixOut].sum()) nodeIx.append(loc) # Concat data to a pandas dataframe injectionWithdrawalRates = pd.concat(injectionWithdrawalRates, keys=nodeIx, axis=1) return injectionWithdrawalRates def getNetworkLengthsFromESM(componentName, esM): """ Obtains the pipeline lengths of a transmission component in an EnergySystemModel class. :param componentName: name of the network component in the EnergySystemModel class (only required if the fluid flows are to be obtained from the EnergySystemModel class) |br| * the default value is '' :type componentName: string :param esM: EnergySystemModel object with an optimized Pyomo instance (only needs to be specified if the operationVariablesOptimumData are to be obtained from the EnergySystemModel object) |br| * the default value is None :type esM: FINE EnergySystemModel :return: pipeline distances in the length unit specified in the esM object :rtype: pandas series """ utils.isString(componentName) utils.isEnergySystemModelInstance(esM) distances = esM.getComponent(componentName).distances.copy() indexMap = esM.getComponent(componentName)._mapC distances.index = [indexMap[ix] for ix in distances.index] return distances def getRefinedShapeFile(shapeFilePath, regColumn1, regColumn2, dic_node_minPress, dic_node_maxPress, minPipeLength, maxPipeLength): """ If a pipe is longer than maxPipeLength than it will be split into several pipes with equidistant length, i.e., replace arc (u,v) by (u,v_1), (v_1,v_2),..., (v_n,v) with n = ceil(lengthOfPipe/maxPipeLength) -1 :param shapeFilePath: path to a shape file which connects the gas injection/ withdrawal nodes with each other. The rows of the file describe connections between the injection/ withdrawal nodes. The required geometry of these connections is a shapely LineString. Additionally, the file has two columns holding the names of the two injection/ withdrawal nodes (start and end point of the LineString). :type shapeFilePath: string :param regColumn1: name of the column which holds the name of the injection/ withdrawal node at the beginning of the line :type regColumn1: string :param regColumn2: name of the column which holds the name of the injection/ withdrawal node at the end of the line :type regColumn2: string :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar] :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar]. It holds: dic_node_minPress[index] <= dic_node_maxPress[index]. :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float :param minPipeLength: desired minimum length of a pipe in [m], note: not always possible to achieve. :type minPipeLength: positive number :param maxPipeLength: determines the maximal length of a pipe in [m]. :type maxPipeLength: positive number :return: distances_new - pipeline distances in m :rtype: pandas series :return: dic_node_minPress_new - dictionary that contains for every node of the network its lower pressure bound in [bar] :rtype: dictionary key: node of the network, value: non-negative float :return: dic_node_maxPress_new - dictionary that contains for every node of the network its upper pressure bound in [bar] :rtype: dictionary key: node of the network, value: non-negative float :return: gdfNodes - GeoDataFrame with the nodes of the network and their names :rtype: geopandas GeoDataFrame :return: gdfEdges - GeoDataFrame with the edges of the network and the names of their start and end nodes :rtype: geopandas GeoDataFrame """ # type and value check isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) utils.isString(regColumn1), utils.isString(regColumn2) utils.isStrictlyPositiveNumber(maxPipeLength) utils.isStrictlyPositiveNumber(minPipeLength) # Read shape file with linestrings connecting the entry/ exit nodes of the gas gdf=gpd.read_file(shapeFilePath) if not (gdf.geometry.type == 'LineString').all(): raise ValueError("Geometries of the shape file have to be LineStrings") print('Number of edges before segmentation:', len(gdf)) originalNodesSet = set(gdf[regColumn1]) | set(gdf[regColumn2]) print('Number of nodes before segmentation:', len(originalNodesSet)) # Obtain nodes from shape file, assign names and minimum/ maximum pressure levels to them, delete duplicates coordNames, coords = [], [] pMin, pMax = [], [] lines = [] # Break linestrings into linear pieces for i, row in gdf.iterrows(): # Simplify linestring (to increase the minimum length of pipeline connections wherever possible) line = row.geometry.simplify(minPipeLength) lines.append(line) row.geometry = line # Get new nodes coords_ = [i for i in line.coords] coords.extend(coords_) coordNames_ = [row[regColumn1]] coordNames_.extend([row[regColumn1] + '_' + row[regColumn2] + '_' + str(j) for j in range(len(coords_)-2)]) coordNames_.append(row[regColumn2]) coordNames.extend(coordNames_) # Get averaged lower and upper pressure levels pMin.extend([(dic_node_minPress[row[regColumn1]]*(len(coords_)-j-1) + dic_node_minPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))]) pMax.extend([(dic_node_maxPress[row[regColumn1]]*(len(coords_)-j-1) + dic_node_maxPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))]) gdf['geometry'] = lines # Create DataFrame of old and new nodes and drop duplicates dfNodes = pd.DataFrame([coordNames, pMin, pMax, coords], index=['nodeName','pMin','pMax','lon_lat']).T dfNodes = dfNodes.drop_duplicates(subset='lon_lat') dfNodes = dfNodes.drop_duplicates(subset='nodeName') # Obtain edges from shape file, assign names to them, delete duplicates nodesIn_nodesOut = [] nodesIn = [] nodesOut = [] lineStrings = [] for i, row in gdf.iterrows(): coords_ = [i for i in row.geometry.coords] for j in range(len(coords_)-1): nodeIn = dfNodes.loc[dfNodes['lon_lat'] == coords_[j],'nodeName'].iloc[0] nodeOut = dfNodes.loc[dfNodes['lon_lat'] == coords_[j+1],'nodeName'].iloc[0] nodesIn.append(nodeIn), nodesOut.append(nodeOut) nodes = [nodeIn,nodeOut] nodes.sort() nodesIn_nodesOut.append('edge_' + nodes[0] + '_' + nodes[1]) lineStrings.append(shp.geometry.LineString([coords_[j],coords_[j+1]])) dfEdges = pd.DataFrame([nodesIn, nodesOut, nodesIn_nodesOut, lineStrings], index=['nodeIn', 'nodeOut','edgeName','geometry']).T dfEdges = dfEdges.drop_duplicates(subset='edgeName') gdfEdges = gpd.GeoDataFrame(dfEdges,crs=gdf.crs).to_crs({'init': 'epsg:3035'}) print('Number of edges after 1. segmentation:', len(gdfEdges)) print('Number of nodes after 1. segmentation:', len(dfNodes)) # Add nodes when line distances are too long newNodes, newLines, newNodesName, newLinesName = [], [], [], [] nodesIn, nodesOut, coords = [], [], [] pMin, pMax = [], [] for i, row in gdfEdges.iterrows(): # If lines are two long, segment them if np.round(row['geometry'].length,2) > maxPipeLength: nbNewNodes = int(np.floor(row['geometry'].length/maxPipeLength)) line = row.geometry newNodes_, newLines_, newNodesName_, newLinesName_ = [], [], [], [] nodesIn_, nodesOut_, coords_ = [], [], [] pMin_, pMax_ = [], [] nodeStart, nodeEnd = line.interpolate(0), line.interpolate(line.length) nodeStartName = row['nodeIn'] pMinIn = dfNodes[dfNodes['nodeName'] == row['nodeIn'] ]['pMin'].iloc[0] pMinOut = dfNodes[dfNodes['nodeName'] == row['nodeOut']]['pMin'].iloc[0] pMaxIn = dfNodes[dfNodes['nodeName'] == row['nodeIn'] ]['pMax'].iloc[0] pMaxOut = dfNodes[dfNodes['nodeName'] == row['nodeOut']]['pMax'].iloc[0] spacing = row['geometry'].length/(nbNewNodes+1) for j in range(1,nbNewNodes+1): newNode = line.interpolate(j*spacing) newNodes_.append(newNode) coords_.append((newNode.x, newNode.y)) newNodeName = row['nodeIn'] + '_' + row['nodeOut'] + '_a_' + str(j) newNodesName_.append(newNodeName) newLine = shp.geometry.LineString([nodeStart,newNode]) newLines_.append(newLine) newLinesName_.append('temp'), nodesIn_.append(nodeStartName), nodesOut_.append(newNodeName) pMin_.append((pMinIn*(nbNewNodes-j+1) + pMinOut*j)/(nbNewNodes+1)) pMax_.append((pMaxIn*(nbNewNodes-j+1) + pMaxOut*j)/(nbNewNodes+1)) nodeStart, nodeStartName = newNode, newNodeName newLines_.append(shp.geometry.LineString([newNode,nodeEnd])) newLinesName_.append('temp') nodesIn_.append(newNodeName), nodesOut_.append(row['nodeOut']) newNodes.extend(newNodes_), newLines.extend(newLines_), newNodesName.extend(newNodesName_) newLinesName.extend(newLinesName_), pMin.extend(pMin_), pMax.extend(pMax_) nodesIn.extend(nodesIn_), nodesOut.extend(nodesOut_), coords.extend(coords_) if len(newNodes) > 0: dfNodes = dfNodes.append(pd.DataFrame([newNodesName, pMin, pMax, coords], index=['nodeName','pMin','pMax','lon_lat']).T) dfEdges = pd.DataFrame([nodesIn, nodesOut, newLinesName, newLines], index=['nodeIn', 'nodeOut','edgeName','geometry']).T gdfEdgesNew = gpd.GeoDataFrame(dfEdges,crs=gdf.crs).to_crs({'init': 'epsg:3035'}) gdfEdges = gdfEdges.append(gdfEdgesNew) gdfEdges = gdfEdges[gdfEdges.geometry.length.round(2) <= maxPipeLength] del gdfEdges['edgeName'] renameDict = {name: 'auxNode' + str(i) for i, name in enumerate(dfNodes.nodeName.values) if name not in originalNodesSet} for node in originalNodesSet: renameDict.update({node:node}) gdfEdges['nodeIn'] = gdfEdges.apply(lambda x: renameDict[x['nodeIn']], axis=1) gdfEdges['nodeOut'] = gdfEdges.apply(lambda x: renameDict[x['nodeOut']], axis=1) gdfEdges['distances'] = gdfEdges['geometry'].length print('Number of edges after 2. segmentation:', len(gdfEdges)) dfNodes['nodeName'] = dfNodes.apply(lambda x: renameDict[x['nodeName']], axis=1) dfNodes['geometry'] = dfNodes.apply(lambda x: shp.geometry.Point(x['lon_lat']), axis=1) del dfNodes['lon_lat'] gdfNodes = gpd.GeoDataFrame(dfNodes,crs=gdf.crs).to_crs({'init': 'epsg:3035'}) print('Number of nodes after 2. segmentation:', len(gdfNodes)) print('Minimum length [m]:', gdfEdges.distances.min(), 'Maximum length [m]:', gdfEdges.distances.max()) distances_new = pd.Series(gdfEdges['distances'].values, index = [(n1, n2) for n1, n2 in zip(gdfEdges['nodeIn'],gdfEdges['nodeOut'])]) dic_node_minPress_new = {n:pMin for n, pMin in zip(gdfNodes['nodeName'], gdfNodes['pMin'])} dic_node_maxPress_new = {n:pMax for n, pMax in zip(gdfNodes['nodeName'], gdfNodes['pMax'])} return distances_new, dic_node_minPress_new, dic_node_maxPress_new, gdfNodes, gdfEdges def createNetwork(distances): """ Creates undirected network/graph from given distances; updates distances such that either (u,v) or (v,u) are contained :param distances: pipeline distances in the length unit specified in the esM object :type distances: pandas series :return: graph of the network corresponding to the distances :rtype: graph object of networkx :return: pipeline distances in the length unit specified in the esM object :rtype: pandas series """ # type and value check isPandasSeriesPositiveNumber(distances) for index in distances.index: if not isinstance(index, tuple): raise TypeError("Index of pandas series has to be a tuple") # first check if distances are consistent, i.e. if (u,v) and (v,u) are in distances they have to have the same # length and we will delete one of them # tmp list for reversed edges that we will be delete tmp_edges = [] for edge in distances.index: if (edge[1], edge[0]) in distances.index and (edge[1], edge[0]) not in tmp_edges: assert (distances[edge] == distances[(edge[1], edge[0])]) tmp_edges.append(edge) # delete tmp_edges because reversed edges are already contained and we consider an undirected graph distances = distances.drop(tmp_edges) # get edges for graph edges = distances.index # create empty graph G = nx.Graph() # create graph from given edges and add length as edge attribute for edge in edges: G.add_edge(edge[0], edge[1], length=distances[edge]) return G, distances def createSteinerTree(graph, distances, inner_nodes): """ Computes a steiner tree with minimal sum of pipeline lengths; updates distances such that only arcs of the spanning tree are contained with corresponding length :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m] :type graph: networkx graph object :param distances: pipeline distances in the length unit specified in the esM object :type distances: pandas series :return spanning tree with sum of lengths of pipelines is minimal :rtype: graph object of networkx """ from networkx.algorithms import approximation # type and value check isNetworkxGraph(graph) isPandasSeriesPositiveNumber(distances) # compute spanning tree with minimal sum of pipeline lengths S = approximation.steiner_tree(graph, terminal_nodes=inner_nodes, weight='length') # TODO check why function fails when MST function is not called here S = nx.minimum_spanning_tree(S, weight='length') # delete edges that are in graph but not in the tree from the distance matrix edgesToDelete = [] for edge in distances.index: # check if edge or its reversed edge are contained in the tree # you have to check both directions because we have an undirected graph if edge not in S.edges and (edge[1], edge[0]) not in S.edges: edgesToDelete.append(edge) distances = distances.drop(edgesToDelete) return S, distances def _generateRobustScenarios(startNode_endNode, **kwargs): startNode = startNode_endNode[0] endNode = startNode_endNode[1] return startNode_endNode, computeSingleSpecialScenario(startNode=startNode, endNode=endNode, **kwargs) def generateRobustScenarios(injectionWithdrawalRates, graph, distances, dic_node_minPress, dic_node_maxPress, solver='glpk', threads=1, verbose=0): """ Compute for every node combination a special robust scenario according to Robinius et. al. (2019) and Labbé et. al. (2019) :param injectionWithdrawalRates: injection and withdrawal rates (withdrawals from the network are positive while injections are negative) for every time step and node; unit [kg/s] :type: pandas dataframe :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m] :type graph: networkx graph object :param distances: pipeline distances in the length unit specified in the esM object :type distances: pandas series :param threads: number of threads used for parallelization :type threads: positive integer :param verbose: if > 0, parallelization progress is displayed :type verbose: int :return dictionary that contains for every node pair a dictionary containing all arc flows of the corresponding special scenario :rtype: dictionary key: (node1,node2), value: dictionary: key: arc, value: arc flow in [kg/s] :return list of entry node :rtype: list of strings :return list of exit node :rtype: list of strings """ # Type and value checks isPandasDataFrameNumber(injectionWithdrawalRates) isNetworkxGraph(graph) isPandasSeriesPositiveNumber(distances) # get for every entry/exit node the minimal and maximal injection rate and save it in a # dictionary: key: node, value: min Rate; respectively max Rate in [kg/s] # we note that inner nodes a handled separately in the computation of the special scenario dic_nodes_MinCapacity = {} dic_nodes_MaxCapacity = {} # list of entry nodes and exit nodes; note node can be in both for example storages entries = [] exits = [] inners = [] for node in list(injectionWithdrawalRates.columns.values): minRate = injectionWithdrawalRates[node].min() maxRate = injectionWithdrawalRates[node].max() assert (minRate <= maxRate) dic_nodes_MinCapacity[node] = minRate dic_nodes_MaxCapacity[node] = maxRate # if minRate is negative, then node is an entry; if maxRate is positive, then node is an exit if minRate < 0.0: entries.append(node) if maxRate > 0.0: exits.append(node) elif maxRate > 0: exits.append(node) else: inners.append(node) maxPressuresAreEqual = True if len(set(dic_node_maxPress.values())) == 1 else False p_exits = [dic_node_minPress[exit] for exit in exits] p_entries_inners = [dic_node_minPress[node] for node in entries] p_inners = [dic_node_minPress[node] for node in inners] p_entries_inners.extend(p_inners) minPressureExitsIsLarger = True if min(p_exits) >= max(p_entries_inners) else False # compute special scenario for each node combination; see Paper Robinius et. al.(2019); Labbé et. al. (2019) # save arc flows of special scenarios for each node combination; # dictionary: key: node pair, value: dictionary: key: arc, value: arc flow dic_nodePair_flows = {} if maxPressuresAreEqual and minPressureExitsIsLarger: if verbose == 0: print('Reduced robust scenario set can be generated' + ' (pMax is equal at all nodes & pMin at exits is >= at inner and entry nodes).') nodes = [(startNode, endNode) for startNode in entries for endNode in exits if startNode != endNode] else: nodes = [(startNode, endNode) for startNode in graph.nodes for endNode in graph.nodes if startNode != endNode] pool = Pool(threads) for i, values in enumerate(pool.imap(partial(_generateRobustScenarios, graph=graph, distances=distances, entries=entries, exits=exits, dic_nodes_MinCapacity=dic_nodes_MinCapacity, dic_nodes_MaxCapacity=dic_nodes_MaxCapacity, solver=solver), nodes), 1): if verbose == 0: sys.stderr.write('\rPercentage simulated: {:d}%'.format(int(i / len(nodes) * 100))) dic_nodePair_flows[values[0]] = values[1] pool.close() pool.join() return dic_nodePair_flows, entries, exits def computeSingleSpecialScenario(graph, distances, entries, exits, startNode, endNode, dic_nodes_MinCapacity, dic_nodes_MaxCapacity, specialScenario=True, solver='glpk'): """ Compute special robust scenario for given node combination according to Robinius et. al. (2019) and Labbé et. al. (2019) :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m] :type graph: networkx graph object :param distances: pipeline distances in the length unit specified in the esM object :type distances: pandas series :param entries: list of entry nodes of the network :type entries: list of strings :param exits: list of exit nodes of the network :type exits: list of strings :param startNode: node of the network (starting node of the special scenario) :type startNode: string :param endNode: node of the network (end node of special scenario) :type endNode: string :param dic_nodes_MinCapacity: dictionary containing minimal capacity for each node :type dic_nodes_MinCapacity: dictionary: key: node of the network, value: float :param dic_nodes_MaxCapacity: dictionary containing maximal capacity for each node :type dic_nodes_MaxCapacity: dictionary: key: node of the network, value: float :param specialScenario: bool: True if we compute special robust scenario; False if we compute scenario for fixed demand vector, e.g., for scenario of a time step :type specialScenario: bool :param solver: name of the optimization solver to use :type solver: string, default 'glpk' :return dictionary that contains for every arc the corresponding arc flows of the (special) scenario :rtype: dictionary key: arc, value: arc flow """ # Type and value check isNetworkxGraph(graph) isPandasSeriesPositiveNumber(distances) isListOfStrings(entries) isListOfStrings(exits) utils.isString(startNode) utils.isString(endNode) if isinstance(dic_nodes_MinCapacity, dict) and isinstance(dic_nodes_MaxCapacity, dict): if not (dic_nodes_MinCapacity.keys() == dic_nodes_MaxCapacity.keys()): raise TypeError("Dictionaries for min and max capacity need same keys") for node in dic_nodes_MinCapacity.keys(): if not (isinstance(dic_nodes_MinCapacity[node], float) or isinstance(dic_nodes_MinCapacity[node], int)): raise TypeError("The input argument has to be an number") if not (isinstance(dic_nodes_MaxCapacity[node], float) or isinstance(dic_nodes_MaxCapacity[node], int)): raise TypeError("The input argument has to be an number") if dic_nodes_MaxCapacity[node] < dic_nodes_MinCapacity[node]: raise ValueError("minimal node capacity has to be equal or smaller than maximal node capacity") else: raise TypeError("dic_nodes_MinCapacity and dic_nodes_MinCapacity have to be dictionaries") isBool(specialScenario) # we build concrete Pyomo Model model = py.ConcreteModel() # Description model: we have a simple directed graph. We allow negative flows because a pipe can be used in both # directions by the flows model.Nodes = py.Set(initialize=graph.nodes) # important to use distances.keys() instead of graph.edges such that we do not have key errors later on because # the edges in graph are undirected and in distances.keys() directed model.Arcs = py.Set(initialize=distances.keys(), dimen=2) # create demand variables for every node; # if specialScenario is true, then we compute special scenario, i.e. entry/exit demand variables are bounded by # min(0,minimal_capacity) <= demandVariable <= max(0, maximal_capacity) # demand variables for inner nodes are set to zero # if specialScenario is false, the demand variable is just bounded by the minimal and maximal capacity if specialScenario: def demandCapacities(model, node): if node in entries or node in exits: return min(0, dic_nodes_MinCapacity[node]), max(0, dic_nodes_MaxCapacity[node]) else: return 0, 0 model.Demand = py.Var(model.Nodes, bounds=demandCapacities) else: # we do not compute special scenarios; we just compute flows for given, possibly fixed, demands def demandCapacities(model, node): return dic_nodes_MinCapacity[node], dic_nodes_MaxCapacity[node] model.Demand = py.Var(model.Nodes, bounds=demandCapacities) # create arc flow variables for every arc of the network model.Flow = py.Var(model.Arcs) # compute NodesOut, i.e., set of nodes that are connected to considered node by outgoing arc def nodes_out_init(model, node): retval = [] for (i, j) in model.Arcs: if i == node: retval.append(j) return retval model.NodesOut = py.Set(model.Nodes, initialize=nodes_out_init) # compute NodesIn, i.e., set of nodes connected to considered node by ingoing arc def nodes_in_init(model, node): retval = [] for (i, j) in model.Arcs: if j == node: retval.append(i) return retval model.NodesIn = py.Set(model.Nodes, initialize=nodes_in_init) # add flow balance constraints corresponding to the node demands def flow_balance_rule(model, node): return sum(model.Flow[i, node] for i in model.NodesIn[node]) \ - sum(model.Flow[node, j] for j in model.NodesOut[node]) \ == model.Demand[node] model.FlowBalance_cons = py.Constraint(model.Nodes, rule=flow_balance_rule) # compute unique flow-path P(startNode,endNode) from entry to exit; given by list of nodes of the path pathNodes = nx.shortest_path(graph, source=startNode, target=endNode) # non zero coefficients of objective function dic_arc_coef = {} # determine coefficients for objective function # if for an arc (u,v), u, respectively v, are not in pathNodes, then the coefficient is 0 # if arc (u,v) of pathNodes satisfies P(startNode, u) subset P(startNode,v), then coefficient is 1, otherwise -1 for index in range(0, len(pathNodes) - 1): # check which direction of the arc is contained in the graph if (pathNodes[index], pathNodes[index + 1]) in model.Arcs: dic_arc_coef[(pathNodes[index], pathNodes[index + 1])] = 1 else: dic_arc_coef[(pathNodes[index + 1], pathNodes[index])] = -1 # we set objective def obj_rule(model): return sum(dic_arc_coef[arc] * model.Flow[arc] for arc in dic_arc_coef.keys()) model.Obj = py.Objective(rule=obj_rule, sense=py.maximize) # Create a solver opt = SolverFactory(solver) # Solve optimization model results = opt.solve(model) # status of solver status = results.solver.status # termination condition termCondition = results.solver.termination_condition # save the solution of the flows in a dictionary key: arcs, values: flow dic_scenario_flow = {} if status == SolverStatus.error or status == SolverStatus.aborted or status == SolverStatus.unknown: utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) + '. No output is generated.', 0, 0) elif termCondition == TerminationCondition.infeasibleOrUnbounded or \ termCondition == TerminationCondition.infeasible or \ termCondition == TerminationCondition.unbounded: utils.output('Optimization problem is ' + str(termCondition) + '. No output is generated.', 0, 0) else: # If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown # status), show a warning message. if not termCondition == TerminationCondition.optimal: warnings.warn('Output is generated for a non-optimal solution.') # dic_arcScenario has key (v,w,scenario) and value flow will be needed for MIP for arc in model.Arcs: dic_scenario_flow[arc] = model.Flow[arc].value return dic_scenario_flow def computeLargeMergedDiameters(dic_subSetDiam_costs, nDigits=6): """ Compute merged diameters, i.e. compute equivalent single diameter for two looped pipes. :param dic_subSetDiam_costs: dictionary containing diameters in [m] and costs in [Euro/m] :type: dictionary: key: diameter, value: costs :param nDigits: number of digits used in the round function |br| * the default value is 6 :type nDigits: positive int :return dic_newDiam_costs: dictionary containing merged diameters in [m] and costs in [Euro/m] :rtype: dictionary: key: diameter, value: costs :return dic_newDiam_oldDiam: dictionary matching new diameters to old diameters :rtype: dictionary: key: new diameter, value: corresponding old diameter, which will be used in the looped pipe """ # Type and value check if isinstance(dic_subSetDiam_costs, dict): for diam in dic_subSetDiam_costs.keys(): utils.isStrictlyPositiveNumber(diam) utils.isStrictlyPositiveNumber(dic_subSetDiam_costs[diam]) else: raise TypeError("The input has to be a dictionary") utils.isStrictlyPositiveInt(nDigits) dic_newDiam_costs = {} dic_newDiam_oldDiam = {} for diam in dic_subSetDiam_costs.keys(): # compute new diameter in [m] and its costs in [Euro/m] # for Formula see (1) in Paper Reuß et. al. # since at current state we consider the diameter for a looped pipe the above is # equivalent to 2^(2/5) * diam and thus, we do not have to transform diam from [m] to [mm] newDiam = ((diam ** (5 / 2) + diam ** (5 / 2)) ** (2 / 5)).__round__(nDigits) # costs are two times costs of diam because newDiam represents two looped pipe with diameter diam newCosts = 2 * dic_subSetDiam_costs[diam] dic_newDiam_costs[newDiam] = newCosts dic_newDiam_oldDiam[newDiam] = diam return dic_newDiam_costs, dic_newDiam_oldDiam def determinePressureDropCoef(dic_scenario_flows, distances, dic_node_minPress, dic_node_maxPress, diameters, ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965, nDigits=6): """ Compute for each scenario, diameter, and each arc the corresponding pressure drop :param dic_scenario_flows: dictionary that contains for every node pair a dictionary containing all arc flows in [kg/s] of the corresponding (special) scenario :type dic_scenario_flows: dictionary key: scenarioName (node1,node2), value: dictionary: key: arc, value: arc flow :param distances: pipeline distances in the length unit specified in the esM object ([m]) :type distances: pandas series :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar] :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar] :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float It holds dic_node_minPress[index] <= dic_node_maxPress[index] :param diameters: list of diameters in [m] :type: list of strictly positive numbers :param ir: integral roughness of pipe in [mm] |br| * the default value is 0.2 (hydrogen, this value can also be used for methane) :type ir: positive float; optional :param rho_n: density at standard state in [kg/m^3] |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane) :type rho_n: positive float; optional :param T_m: constant temperature in [kelvin] |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane) :type T_m: float; optional :param T_n: temperature in standard state in [kelvin] |br| * the default value is 273.15 (hydrogen, this value can also be used for methane) :type T_n: float; optional :param p_n: pressure at standard state in [bar] |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane) :type p_n: non-negative float; optional :param Z_n: realgasfactor of hydrogen at standard state |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane) :type Z_n: non-negative float; optional :param nDigits: number of digits used in the round function |br| * the default value is 6 :type nDigits: positive int; optional :return dictionary that contains for every scenario and diameter the corresponding pressure drops :rtype: dictionary key: (diameter, scenario Name), value: dic: key: arc, value: pressure drop """ # check type and value if not isinstance(dic_scenario_flows, dict): raise TypeError("The input has to be a dictionary") isPandasSeriesPositiveNumber(distances) isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) if isinstance(diameters, list): for diam in diameters: utils.isPositiveNumber(diam) else: raise TypeError("Diameters has to be a list") utils.isStrictlyPositiveNumber(ir) utils.isStrictlyPositiveNumber(rho_n) if not isinstance(T_m, float): raise TypeError("The input argument has to be an number") if not isinstance(T_n, float): raise TypeError("The input argument has to be an number") utils.isPositiveNumber(p_n) utils.isPositiveNumber(Z_n) utils.isStrictlyPositiveInt(nDigits) # compute for each diameter, scenario, and arc its pressure drop # save results in dic: key: (diameter, scenario Name), value: dic: key: arc, value: pressure drop dic_pressureDropCoef = {} for diameter in diameters: for nodePair in dic_scenario_flows.keys(): # initialize dictionary dic_pressureDropCoef[(diameter, nodePair)] = {} # compute cross section of considered pipe and diameter tmpvalue_A = 0.25 * np.pi * diameter ** 2 for arc in dic_scenario_flows[nodePair].keys(): # check if flow is unequal to zero if dic_scenario_flows[nodePair][arc] != 0.0: # Compute approximation of average pressure flow in pipe (u,v) by # if flow((u,v)) is positive then set p_min to lower pressure bound of v and p_max to # upper pressure bound u # if flow((u,v)) is negative then set p_min to lower pressure bound of u and p_max to # upper pressure bound v if dic_scenario_flows[nodePair][arc] > 0: p_min = dic_node_minPress[arc[1]] p_max = dic_node_maxPress[arc[0]] else: p_min = dic_node_minPress[arc[0]] p_max = dic_node_maxPress[arc[1]] # compute approximation of average pressure p_m = (2 / 3) * (p_max + p_min - (p_max * p_min) / (p_max + p_min)) # approximation for density rho = 0.11922 * p_m ** 0.91192 - 0.17264 # approximation of the realgasfactor Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050 K_m = Z_m / Z_n # approximation of the dynamic viscosity eta = 1.04298 * 10 ** (-10) * p_m ** 1.53560 + 8.79987 * 10 ** (-6) nue = eta / rho # compute velocity tmpvalue_w = (abs(dic_scenario_flows[nodePair][arc]) / rho) / tmpvalue_A # compute reynolds number tmpvalue_Re = tmpvalue_w * (diameter / nue) tmpvalue_alpha = np.exp(-np.exp(6.75 - 0.0025 * tmpvalue_Re)) tmpvalue_Lambda = (64 / tmpvalue_Re) * (1 - tmpvalue_alpha) + tmpvalue_alpha * ( -2 * np.log10(2.7 * (np.log10(tmpvalue_Re) ** 1.2 / tmpvalue_Re) + ir / (3.71 * 1000 * diameter))) ** (-2) # note p_n is in [bar] instead of [PA], thus we divide tmpvalue_C by 10**5 # explanation: we have p_i^2-p_j^2=C. If p_i is in [PA] and we want p_i in [bar] then this leads to # (p_i/10^5)^2-(p_j/10^5)^2=C/10^10 # but we changed p_n in computation C from [PA] to [bar] hence we only divide C by 10^5 tmpvalue_C_bar = tmpvalue_Lambda * 16 * rho_n * T_m * p_n * K_m / (np.pi ** 2 * T_n * 10 ** 5) # compute final pressure drop coefficient depending on the flow tmp_value_C_coef = (distances[arc] / rho_n ** 2) * \ (tmpvalue_C_bar * dic_scenario_flows[nodePair][arc] * abs(dic_scenario_flows[nodePair][arc]) / diameter ** 5) # save pressure drop for considered diameter, scenario, and arc dic_pressureDropCoef[(diameter, nodePair)][arc] = tmp_value_C_coef else: dic_pressureDropCoef[(diameter, nodePair)][arc] = 0 return dic_pressureDropCoef def determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureDropCoef, specialScenarioNames, dic_node_minPress, dic_node_maxPress, dic_diam_costs, robust=True, solver='glpk', threads=4, verbose=0): """ Model of optimal pipeline sizing (diameter selection) w.r.t. to the given scenarios :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m] :type graph: networkx graph object :param distances: pipeline distances in the length unit specified in the esM object ([m]) :type distances: pandas series :param dic_pressureDropCoef: dictionary that contains for every scenario and diameter the corresponding pressure drops in [bar] :type dic_pressureDropCoef: dictionary: keys: scenarioName; value: dict: key: arc, value: pressure drop in [bar] :param specialScenarioNames: list of names of scenarios. In robust case tuples (startNode, endNode). :type specialScenarioNames: list of tuples in the robust case, otherwise list of time Steps :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar] :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar] :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float It holds dic_node_minPress[index] <= dic_node_maxPress[index] :param dic_diam_costs: dictionary that contains for every diameter in [m] its costs [Euro/m] :type dic_diam_costs: dictionary key: diameter, value: non-negative float :param robust: Bool that is true, if we optimize w.r.t. robust scenarios, otherwise False. :type robust: bool :return dictionary that contains for every arc the optimal diameter in [m] :rtype dictionary: key: arc, value: optimal diameter :param solver: name of the optimization solver to use :type solver: string, default 'glpk' :param threads: number of threads used for optimization (if gurobi is used) :type threads: positive integer :param verbose: if > 0, parallelization progress is displayed :type verbose: int :return dictionary that contains for every scenario the corresponding pressure levels :rtype dictionary: key: scenarioName, value: dict: key: node, value: pressure level of node """ # type and value checks isNetworkxGraph(graph) isPandasSeriesPositiveNumber(distances) if not isinstance(dic_pressureDropCoef, dict): raise TypeError("The input has to be a dictionary") if isinstance(specialScenarioNames, list): if robust: for scenario in specialScenarioNames: isinstance(scenario, tuple) else: raise TypeError("The input argument has to be a list") isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) if isinstance(dic_diam_costs, dict): for diam in dic_diam_costs.keys(): utils.isStrictlyPositiveNumber(diam) utils.isStrictlyPositiveNumber(dic_diam_costs[diam]) else: raise TypeError("The input has to be a dictionary") if not isinstance(robust, bool): raise TypeError("The input has to be a bool") utils.isString(solver) utils.isPositiveNumber(verbose) # set list of available diameters diameters = dic_diam_costs.keys() # build concrete pyomo model model = py.ConcreteModel() # sets for nodes, arcs, diameters, scenarios model.nodes = py.Set(initialize=graph.nodes) model.arcs = py.Set(initialize=list(distances.keys()), dimen=2) # diameters assuming that each pipe has the same diameter options model.diameters = py.Set(initialize=diameters) # if we have special scenarios, scenario names are tuples, otherwise not if robust: # set indices for each scenario by its nodePair = (startnode, endnode) model.scenarios = py.Set(initialize=specialScenarioNames, dimen=2) else: # set indices for each timeStep number model.scenarios = py.Set(initialize=specialScenarioNames, dimen=1) # create variables binaries x are the same for each scenario # pressure variables are different for each scenario model.x = py.Var(model.arcs, model.diameters, domain=py.Binary) if robust: def pressureBounds(model, node, startnode, endnode): return dic_node_minPress[node] ** 2, dic_node_maxPress[node] ** 2 model.pi = py.Var(model.nodes, model.scenarios, bounds=pressureBounds) else: def pressureBounds(model, node, timeStep): return dic_node_minPress[node] ** 2, dic_node_maxPress[node] ** 2 model.pi = py.Var(model.nodes, model.scenarios, bounds=pressureBounds) # objective: minimize the costs def obj_rule(model): return sum( sum(dic_diam_costs[diam] * distances[arc] * model.x[arc, diam] for diam in model.diameters) for arc in model.arcs) model.Obj = py.Objective(rule=obj_rule) # pressure drop for each cons and each scenario if robust: def pressure_drop(model, arc0, arc1, scenarioStart, scenarioEnd): return model.pi[arc1, (scenarioStart, scenarioEnd)] - model.pi[arc0, (scenarioStart, scenarioEnd)] == \ -sum(dic_pressureDropCoef[(diam, (scenarioStart, scenarioEnd))][(arc0, arc1)] * model.x[arc0, arc1, diam] for diam in model.diameters) model.PressureDrop_cons = py.Constraint(model.arcs, model.scenarios, rule=pressure_drop) else: def pressure_dropNotRobust(model, arc0, arc1, timeStep): return model.pi[arc1, timeStep] - model.pi[arc0, timeStep] == \ -sum(dic_pressureDropCoef[(diam, timeStep)][(arc0, arc1)] * model.x[arc0, arc1, diam] for diam in model.diameters) model.PressureDrop_cons = py.Constraint(model.arcs, model.scenarios, rule=pressure_dropNotRobust) # ensure that a single diameter per arc is chosen def selection_diameter(model, arc0, arc1): return sum(model.x[arc0, arc1, diam] for diam in model.diameters) == 1 model.SelectionDiameter_cons = py.Constraint(model.arcs, rule=selection_diameter) # Create a solver opt = SolverFactory(solver) # Set the specified solver options # Solve optimization problem. The optimization solve time is stored and the solver information is printed. if (verbose == 2) & (solver == 'gurobi'): optimizationSpecs = ' LogToConsole=0' opt.set_options('Threads=' + str(threads) + optimizationSpecs) results = opt.solve(model, tee=True, keepfiles=False) else: results = opt.solve(model, tee=True, report_timing=True, keepfiles=False) # status of solver status = results.solver.status # termination condition termCondition = results.solver.termination_condition # write diameter solution to dictionary: key: arc, value: optimal diameter # write pressure solutions to dictionary; key: scenarioName, value: dict: key: node, value: pressure level in [bar] dic_arc_diam = {} dic_scen_node_press = {} if status == SolverStatus.error or status == SolverStatus.aborted or status == SolverStatus.unknown: utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) + '. No output is generated.', 0, 0) elif termCondition == TerminationCondition.infeasibleOrUnbounded or \ termCondition == TerminationCondition.infeasible or \ termCondition == TerminationCondition.unbounded: utils.output('Optimization problem is ' + str(termCondition) + '. No output is generated.', 0, 0) else: # If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown # status), show a warning message. if not termCondition == TerminationCondition.optimal: warnings.warn('Output is generated for a non-optimal solution.') # initialize dict with empty dict for scenario in specialScenarioNames: dic_scen_node_press[scenario] = {} for v in model.component_objects(py.Var, active=True): varobject = getattr(model, str(v)) for index in varobject: # round because sometimes we are nearly one if str(varobject) == 'x' and round(varobject[index].value) == 1: dic_arc_diam.update({(index[0], index[1]): index[2]}) elif str(varobject) == 'pi': if robust: # need sqrt() because in model pressure is quadratic because of the transformation dic_scen_node_press[(index[1], index[2])].update({index[0]: np.sqrt(varobject[index].value)}) else: # need sqrt() because in model pressure is quadratic because of the transformation dic_scen_node_press[(index[1])].update({index[0]: np.sqrt(varobject[index].value)}) return dic_arc_diam, dic_scen_node_press def _postprocessing(scenario, dic_scenario_flows, graph, **kwargs): dic_scen_PressLevel = {} dic_scen_MaxViolPress = math.inf # copy a list of nodes tmp_nodes = copy.deepcopy(list(graph.nodes)) # we now set iteratively the pressure level of a single node to its upper pressure bound and then compute the # unique pressure levels until we find valid pressure levels or have tested all nodes while tmp_nodes: # we have not found valid pressure levels for this scenario # temporary pressure levels dic_tmp_pressure = {} for node in list(graph.nodes): dic_tmp_pressure[node] = None # choose the node which pressure level is fixed to the upper pressure bound current_node = tmp_nodes[0] validation, tmp_viol = computePressureAtNode(graph=graph, node=current_node, nodeUpperBound=current_node, dic_scenario_flows=dic_scenario_flows[scenario], dic_node_pressure=dic_tmp_pressure, **kwargs) # if validation true, then we have feasible pressure levels; empty list of nodes that have to be # considered if validation: tmp_nodes = [] # we have feasible pressure level and save them dic_scen_PressLevel = dic_tmp_pressure dic_scen_MaxViolPress = tmp_viol else: # remove considered entry from list of nodes that will be considered for fixing the pressure level tmp_nodes.remove(tmp_nodes[0]) # we update the maximal pressure level violation if tmp_viol < dic_scen_MaxViolPress: # save currently best pressure levels dic_scen_PressLevel = copy.deepcopy(dic_tmp_pressure) dic_scen_MaxViolPress = tmp_viol return scenario, dic_scen_PressLevel, dic_scen_MaxViolPress def postprocessing(graph, distances, dic_arc_diam, dic_scenario_flows, dic_node_minPress, dic_node_maxPress, threads=1, verbose=0): """" Compute "more" accurate pressure levels for the considered scenarios in the network with optimal diameters Apply postprocessing of Master's thesis with adaption that we possibly consider every node for fixing its pressure level to the upper pressure bound. :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m] :type graph: networkx graph object :param distances: pipeline distances in the length unit specified in the esM object ([m]) :type distances: pandas series :param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m] :type: dictionary: key: arc, value: optimal diameter :param dic_scenario_flows: dictionary that contains for every node pair a dictionary containing all arc flows in [kg/s] of the corresponding (special) scenario :type dic_scenario_flows: dictionary key: scenarioName (node1,node2), value: dictionary: key: arc, value: arc flow :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar] :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar] :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float :param threads: number of threads used for parallelization :type threads: positive integer :param verbose: if > 0, parallelization progress is displayed :type verbose: int It holds dic_node_minPress[index] <= dic_node_maxPress[index] :return: dictionary that contains for every scenario the corresponding pressure levels in [bar] :rtype: dictionary key: scenarioName, value: dic: key: arc, value pressure level :return: dictionary that contains for every scenario the maximal pressure bound violation in [bar] :rtype: dictionary key: scenarioName, value: float = maximal pressure bound violation """ # Type and value check isNetworkxGraph(graph) isPandasSeriesPositiveNumber(distances) if not isinstance(dic_scenario_flows, dict): raise TypeError("The input has to be a dictionary") if isinstance(dic_arc_diam, dict): for diam in dic_arc_diam.keys(): utils.isStrictlyPositiveNumber(dic_arc_diam[diam]) else: raise TypeError("The input has to be a dictionary") isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) # best found pressure levels for scenarios; dic key: scenario, value: dic: key: node, value: pressure level in [bar] dic_scen_PressLevel = {} # maximal violation of pressure bounds; zero if no violation exists; dic: key: scenario, value: pressure violation dic_scen_MaxViolPress = {} # we compute "precise" pressure levels for every scenarios pool = Pool(threads) scenarios = [scenario for scenario in dic_scenario_flows.keys()] for i, values in enumerate(pool.imap(partial(_postprocessing, validation=True, graph=graph, dic_arc_diam=dic_arc_diam, distances=distances, dic_node_minPress=dic_node_minPress, dic_node_maxPress=dic_node_maxPress, tmp_violation=0, dic_scenario_flows=dic_scenario_flows), scenarios), 1): if verbose == 0: sys.stderr.write('\rPercentage simulated: {:d}%'.format(int(i / len(scenarios) * 100))) dic_scen_PressLevel[values[0]] = values[1] dic_scen_MaxViolPress[values[0]] = values[2] pool.close() pool.join() return dic_scen_PressLevel, dic_scen_MaxViolPress def computePressureAtNode(validation, node, nodeUpperBound, graph, dic_arc_diam, distances, dic_scenario_flows, dic_node_minPress, dic_node_maxPress, tmp_violation, dic_node_pressure, ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965, nDigits=6): """" Compute pressure levels recursive for given scenario and node that is fixed to its upper pressure level :param validation: boolean that is False, if the computed pressure levels are infeasible :rtype validation: bool :param node: node of the network for which we currently consider for computing the pressure levels :type node: str :param nodeUpperBound: node which pressure level is fixed to the upper bound :type node: str :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m] :type graph: networkx graph object :param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m] :type: dictionary: key: arc, value: optimal diameter :param distances: pipeline distances in the length unit specified in the esM object ([m]) :type distances: pandas series :param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s] :type: dictionary: key: arc, value: arc flow :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar] :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar] :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float It holds dic_node_minPress[index] <= dic_node_maxPress[index] :param tmp_violation: violation of the current pressure bounds in [bar] :type tmp_violation: float :param dic_node_pressure: dictionary that contains node pressure levels in [bar] :type dic_node_pressure: dictionary key: node of the network, value: non-negative float :param ir: integral roughness of pipe in [mm] |br| * the default value is 0.2 (hydrogen, this value can also be used for methane) :type ir: positive float :param rho_n: density at standard state in [kg/m^3] |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane) :type rho_n: positive float :param T_m: constant temperature in [kelvin] |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane) :type T_m: float :param T_n: temperature in standard state in [kelvin] |br| * the default value is 273.15 (hydrogen, this value can also be used for methane) :type T_n: float :param p_n: pressure at standard state in [bar] |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane) :type p_n: non-negative float :param Z_n: realgasfactor of hydrogen at standard state |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane) :type Z_n: non-negative float :param nDigits: number of digits used in the pandas round function. Is applied to the specified or determined injection and withdrawal rates. |br| * the default value is 6 :type nDigits: positive int :return validation: boolean that is true, if the computed pressure levels are feasible :rtype: bool :return maximal violation of the pressure bounds w.r.t. the computed pressure levels in [bar] :rtype: float """ # Type and value check isBool(validation) utils.isString(node) utils.isString(nodeUpperBound) isNetworkxGraph(graph) isPandasSeriesPositiveNumber(distances) if not isinstance(dic_scenario_flows, dict): raise TypeError("The input has to be a dictionary") if isinstance(dic_arc_diam, dict): for diam in dic_arc_diam.keys(): utils.isStrictlyPositiveNumber(dic_arc_diam[diam]) else: raise TypeError("The input has to be a dictionary") isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) utils.isPositiveNumber(tmp_violation) if not isinstance(dic_node_pressure, dict): raise TypeError("The Input has to a dictionary") utils.isStrictlyPositiveNumber(ir) utils.isStrictlyPositiveNumber(rho_n) if not isinstance(T_m, float): raise TypeError("The input argument has to be an number") if not isinstance(T_n, float): raise TypeError("The input argument has to be an number") utils.isPositiveNumber(p_n) utils.isPositiveNumber(Z_n) utils.isStrictlyPositiveInt(nDigits) # if node is equal to nodeUpperBound, we fix its pressure level to the upper bound; base case in recursion if node == nodeUpperBound: dic_node_pressure[node] = dic_node_maxPress[node] # list of arcs arcs = list(distances.keys()) # we now compute the neighbors of the considered node neighbors = graph.neighbors(node) # compute pressure levels for neighbor nodes for neighbor in neighbors: # check if pressure is already computed if dic_node_pressure[neighbor] is None: # check if (node,neighbor) or (neighbor,node) is in graph if (node, neighbor) in arcs: # check flow direction for arc (node,neighbor) if dic_scenario_flows[(node, neighbor)] >= 0.0: # we know pressure level of beginning node of arc; compute pressure level for end node of arc dic_node_pressure[neighbor] = computePressureEndnodeArc((node, neighbor), dic_node_pressure[node], dic_scenario_flows, dic_arc_diam, distances, ir, rho_n, T_m, T_n, p_n, Z_n) else: # we know pressure level of endnode dic_node_pressure[neighbor] = computePressureStartnodeArc((node, neighbor), dic_node_pressure[node], dic_scenario_flows, dic_arc_diam, distances, ir, rho_n, T_m, T_n, p_n, Z_n, tol=10 ** (- nDigits)) else: # we know that arc (neighbor,node) is contained in the graph # check flow direction if dic_scenario_flows[(neighbor, node)] <= 0.0: # we know pressure of start node dic_node_pressure[neighbor] = computePressureEndnodeArc((neighbor, node), dic_node_pressure[node], dic_scenario_flows, dic_arc_diam, distances, ir, rho_n, T_m, T_n, p_n, Z_n) else: # we know pressure level of end node dic_node_pressure[neighbor] = computePressureStartnodeArc((neighbor, node), dic_node_pressure[node], dic_scenario_flows, dic_arc_diam, distances, ir, rho_n, T_m, T_n, p_n, Z_n, tol=10 ** (- nDigits)) # check if new computed pressure level is feasible if dic_node_pressure[neighbor] == - math.inf: # pressure violation is really high tmp_violation = math.inf return False, tmp_violation # check if we violate pressure bounds for neighbor node if dic_node_pressure[neighbor] < dic_node_minPress[neighbor] \ or dic_node_pressure[neighbor] > dic_node_maxPress[neighbor]: # pressure level is not valid validation = False # update pressure bound violation if dic_node_pressure[neighbor] < dic_node_minPress[neighbor]: # update violation and violation node if it is bigger if tmp_violation is None or \ abs(dic_node_minPress[neighbor] - dic_node_pressure[neighbor]) > tmp_violation: tmp_violation = abs(dic_node_minPress[neighbor] - dic_node_pressure[neighbor]) else: if tmp_violation is None or \ abs(dic_node_pressure[neighbor] - dic_node_maxPress[neighbor]) > tmp_violation: tmp_violation = abs(dic_node_pressure[neighbor] - dic_node_maxPress[neighbor]) # compute value for neighbor of tmp validation, tmp_violation = computePressureAtNode(validation, neighbor, nodeUpperBound, graph, dic_arc_diam, distances, dic_scenario_flows, dic_node_minPress, dic_node_maxPress, tmp_violation, dic_node_pressure) return validation, tmp_violation def computePressureStartnodeArc(arc, pressureEndNode, dic_scenario_flows, dic_arc_diam, distances, ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965, tol=10 ** (-4)): """" For given arc and pressure level of endNode compute the pressure of the startNode by solving the corresponding equation system :param arc: arc of the network for which we know the pressure at the endNode, i.e. the node which receives gas :type arc: tuple :param pressureEndNode: pressure level of endNode :type pressureEndNode: non-negative float :param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s]; note arc flow of arc has to be positive :type: dictionary: key: arc, value: arc flow :param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m] :type: dictionary: key: arc, value: optimal diameter :param distances: pipeline distances in the length unit specified in the esM object ([m]) :type distances: pandas series :param ir: integral roughness of pipe in [mm] |br| * the default value is 0.2 (hydrogen, this value can also be used for methane) :type ir: positive float :param rho_n: density at standard state in [kg/m^3] |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane) :type rho_n: positive float :param T_m: constant temperature in [kelvin] |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane) :type T_m: float :param T_n: temperature in standard state in [kelvin] |br| * the default value is 273.15 (hydrogen, this value can also be used for methane) :type T_n: float :param p_n: pressure at standard state in [bar] |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane) :type p_n: non-negative float :param Z_n: realgasfactor of hydrogen at standard state |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane) :type Z_n: non-negative float :param tol: tolerance to which accuracy we solve the equation system |br| * the default value is 10^-4 :type tol: non-negative float :return: pressure level of startNode in [bar] :rtype: float """ # Type and Value check if not isinstance(arc, tuple): raise TypeError("The input has to be a tuple") utils.isStrictlyPositiveNumber(pressureEndNode) if not isinstance(dic_scenario_flows, dict): raise TypeError("The input has to be a dictionary") if isinstance(dic_arc_diam, dict): for diam in dic_arc_diam.keys(): utils.isStrictlyPositiveNumber(dic_arc_diam[diam]) isPandasSeriesPositiveNumber(distances) utils.isStrictlyPositiveNumber(ir) utils.isStrictlyPositiveNumber(rho_n) if not isinstance(T_m, float): raise TypeError("The input argument has to be an number") if not isinstance(T_n, float): raise TypeError("The input argument has to be an number") utils.isPositiveNumber(p_n) utils.isPositiveNumber(Z_n) utils.isStrictlyPositiveNumber(tol) if dic_scenario_flows[arc] == 0.0: return pressureEndNode # define function of nonlinear equation system f(x) = pressure_start^2-pressure_end^2-C # because then root is our valid pressure level solution, because we know pressure_end def f(pressure_start): d = dic_arc_diam[arc] A = 0.25 * math.pi * d ** 2 rho_in = 0.11922 * pressure_start ** 0.91192 - 0.17264 V_in = abs(dic_scenario_flows[arc]) / rho_in w_in = V_in / A eta_in = 1.04298 * 10 ** (-10) * pressure_start ** 1.53560 + 8.79987 * 10 ** (-6) nue_in = eta_in / rho_in Re_in = w_in * (d / nue_in) alpha = math.exp(-math.exp(6.75 - 0.0025 * Re_in)) Lambda = (64 / Re_in) * (1 - alpha) + alpha * (-2 * math.log10( (2.7 * (math.log10(Re_in)) ** 1.2) / Re_in + ir / (3.71 * 1000 * d))) ** (-2) C_tilde = (Lambda * distances[arc] * rho_in * w_in ** 2) / (2 * d) # note pressure_start is in bar p_m = pressure_start - C_tilde / 10 ** 5 if p_m < 0.0: # pressure drop too large no valid pressure assignment possible return -math.inf Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050 K_m = Z_m / Z_n # note flow direction is given by startnode endnode so we square the arcflow C = (Lambda * 16 * distances[arc] * T_m * p_n * K_m) / ( math.pi ** 2 * T_n * rho_n * 10 ** 5 * dic_arc_diam[arc] ** 5) * dic_scenario_flows[arc] ** 2 return pressure_start ** 2 - pressureEndNode ** 2 - C # find root of f, start value pressure_end + 0.5(bar) # x = fsolve(f, pressureEndNode + 0.5) # pressureEndnode + guess for solution depending on flow; you can replace this guess by the approximation of the # pressure drop of the MIP to probably achieve better results x = fsolve(f, pressureEndNode + 0.5 * (dic_scenario_flows[arc] ** 2) / (dic_arc_diam[arc] ** 5)) # check if tolerance is ok assert isinstance(tol, float) # check tolerance of first solution if f(x[0]) <= tol: # value is ok # because x is an array return first entry, we only have one solution for the nonlinear equation system return x[0] else: print('nonlinear equation system failed') # this warning means we could not solve the system, this could be the case if the pressure drop is too large # or when the start value for the nonlinear equation solver is too far away from the solution print("Nonlinear equation system in Postprocessing failed. Try another node which pressure level is" " set to the upper bound") return -math.inf def computePressureEndnodeArc(arc, pressureStartNode, dic_scenario_flows, dic_arc_diam, distances, ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965): """" For given arc and pressure level of startNode compute the pressure of the endNode :param arc: arc of the network for which we know the pressure at the endNode, i.e. the node which receives gas :type arc: tuple :param pressureStartNode: pressure level of endNode :type pressureStartNode: non-negative float :param dic_scenario_flows: dictionary scenario and corresponding flows in [kg/s] :type: dictionary: key: arc, value: arc flow :param dic_arc_diam: dictionary containing for each arc the optimal diameter in [m] :type: dictionary: key: arc, value: optimal diameter :param distances: pipeline distances in the length unit specified in the esM object ([m]) :type distances: pandas series :param ir: integral roughness of pipe in [mm] |br| * the default value is 0.2 (hydrogen, this value can also be used for methane) :type ir: positive float :param rho_n: density at standard state in [kg/m^3] |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane) :type rho_n: positive float :param T_m: constant temperature in [kelvin] |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane) :type T_m: float :param T_n: temperature in standard state in [kelvin] |br| * the default value is 273.15 (hydrogen, this value can also be used for methane) :type T_n: float :param p_n: pressure at standard state in [bar] |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane) :type p_n: non-negative float :param Z_n: realgasfactor of hydrogen at standard state |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane) :type Z_n: non-negative float :return: pressure level of endNode in [bar] :rtype: float """ # Type and Value check if not isinstance(arc, tuple): raise TypeError("The input has to be a tuple") utils.isStrictlyPositiveNumber(pressureStartNode) if not isinstance(dic_scenario_flows, dict): raise TypeError("The input has to be a dictionary") if isinstance(dic_arc_diam, dict): for diam in dic_arc_diam.keys(): utils.isStrictlyPositiveNumber(dic_arc_diam[diam]) isPandasSeriesPositiveNumber(distances) utils.isStrictlyPositiveNumber(ir) utils.isStrictlyPositiveNumber(rho_n) if not isinstance(T_m, float): raise TypeError("The input argument has to be an number") if not isinstance(T_n, float): raise TypeError("The input argument has to be an number") utils.isPositiveNumber(p_n) utils.isPositiveNumber(Z_n) arcFlow = dic_scenario_flows[arc] if arcFlow != 0: d = dic_arc_diam[arc] A = 0.25 * math.pi * d ** 2 rho_in = 0.11922 * pressureStartNode ** 0.91192 - 0.17264 V_in = abs(arcFlow) / rho_in w_in = V_in / A eta_in = 1.04298 * 10 ** (-10) * pressureStartNode ** 1.53560 + 8.79987 * 10 ** (-6) nue_in = eta_in / rho_in Re_in = w_in * (d / nue_in) alpha = math.exp(-math.exp(6.75 - 0.0025 * Re_in)) Lambda = (64 / Re_in) * (1 - alpha) + alpha * (-2 * math.log10( (2.7 * (math.log10(Re_in)) ** 1.2) / Re_in + ir / (3.71 * 1000 * d))) ** (-2) C_tilde = (Lambda * distances[arc] * rho_in * w_in ** 2) / (2 * d) # note pressure_start is in bar p_m = pressureStartNode - C_tilde / 10 ** 5 if p_m < 0.0: # pressure drop too large no valid pressure assignment possible return -math.inf Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050 K_m = Z_m / Z_n # note flow direction is given by startnode endnode so we square the arcflow C = (Lambda * 16 * distances[arc] * T_m * p_n * K_m) / (math.pi ** 2 * T_n * rho_n * 10 ** 5 * dic_arc_diam[arc] ** 5) * arcFlow ** 2 else: # flow is zero therefore pressure drop is zero C = 0 if pressureStartNode ** 2 - C >= 0: return math.sqrt(pressureStartNode ** 2 - C) else: # pressure drop is too big return negative value, which is a invalid pressure value return -math.inf def _computeTimeStepFlows(index, injectionWithdrawalRates, graph, **kwargs): # compute flows corresponding to demand by fixing demand for every node to given value and then compute # flows by LP dic_nodes_MinCapacity = {} dic_nodes_MaxCapacity = {} activeNodes = injectionWithdrawalRates.columns for node in graph.nodes: if node in activeNodes: dic_nodes_MinCapacity[node] = injectionWithdrawalRates.at[index, node] dic_nodes_MaxCapacity[node] = injectionWithdrawalRates.at[index, node] else: dic_nodes_MinCapacity[node] = 0 dic_nodes_MaxCapacity[node] = 0 # compute flows return index, computeSingleSpecialScenario(dic_nodes_MinCapacity=dic_nodes_MinCapacity, dic_nodes_MaxCapacity=dic_nodes_MaxCapacity, graph=graph, **kwargs) def computeTimeStepFlows(injectionWithdrawalRates, distances, graph, entries, exits, threads=1, verbose=0, solver='glpk'): """" Compute for each timeStep and demands given by injectionWithdrawalRates the corresponding flow values :param: injectionWithdrawalRates: injection and withdrawal rates (withdrawals from the network are positive while injections are negative) in [kg^3/s] :type injectionWithdrawalRates: pandas DataFrame :param distances: pipeline distances in the length unit specified in the esM object ([m]) :type distances: pandas series :param graph: an undirected networkx graph: Its edges have the attribute length which is the pipeline length in [m] :type graph: networkx graph object :param entries: list of entry nodes of the network :type entries: list of str :param exits: list of exit nodes of the network :type exits: list of str :param threads: number of threads used for parallelization :type threads: positive integer :param verbose: if > 0, parallelization progress is displayed :type verbose: int :param solver: name of the optimization solver to use :type solver: string, default 'glpk' :return: dictionary that contains for every time step the corresponding flows in [kg/s] :rtype: dictionary key: timeStep, value: dict: key: arc, value: arc flow """ # Type and value check isPandasDataFrameNumber(injectionWithdrawalRates) isPandasSeriesPositiveNumber(distances) isNetworkxGraph(graph) isListOfStrings(entries) isListOfStrings(exits) # compute for every time step the corresponding flows; dict: key: timeStep, value: dict: key: arc, value: flow dic_timeStep_flows = {} # nodes with nonzero demand are given by columns of dataframe activeNodes = injectionWithdrawalRates.columns pool = Pool(threads) indexList = list(injectionWithdrawalRates.index) for i, values in enumerate(pool.imap(partial(_computeTimeStepFlows, graph=graph, distances=distances, entries=entries, exits=exits, startNode=activeNodes[0], endNode=activeNodes[1], specialScenario=False, injectionWithdrawalRates=injectionWithdrawalRates, solver=solver), indexList), 1): if verbose == 0: sys.stderr.write('\rPercentage simulated: {:d}%'.format(int(i / len(indexList) * 100))) dic_timeStep_flows[values[0]] = values[1] pool.close() pool.join() return dic_timeStep_flows def networkRefinement(distances, maxPipeLength, dic_node_minPress, dic_node_maxPress): """ If a pipe is longer than maxPipeLength than it will be split into several pipes with equidistant length, i.e., replace arc (u,v) by (u,v_1), (v_1,v_2),..., (v_n,v) with n = ceil(lengthOfPipe/maxPipeLength) -1 # TODO this function is only used for testing :param distances: pipeline distances in the length unit specified in the esM object :type distances: pandas series :param maxPipeLength: determines the maximal length of a pipe in [m]. :type maxPipeLength: positive number :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar] :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar] :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float It holds dic_node_minPress[index] <= dic_node_maxPress[index] :return: graph of the network corresponding to the distances :rtype: graph object of networkx :return: pipeline distances in the length unit specified in the esM object :rtype: pandas series :return: dic_node_minPress dictionary that contains for every node of the network its lower pressure bound in [bar] :rtype: dictionary key: node of the network, value: non-negative float :return dic_node_maxPress dictionary that contains for every node of the network its upper pressure bound in [bar] :rtype: dictionary key: node of the network, value: non-negative float """ # type and value check isPandasSeriesPositiveNumber(distances) isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) if maxPipeLength is not None: utils.isStrictlyPositiveNumber(maxPipeLength) # if maximal pipeline length is a positive number we apply the refinement if maxPipeLength is not None: # we have to check if pipes satisfy maximal pipeline length # list of new arcs that will be added newPipes = [] # list of lengths of new added pipes newPipesLengths = [] # list of split original pipes splitEdges = [] for edge in distances.index: # get length of pipeline pipeLength = distances[edge] if pipeLength > maxPipeLength: # compute number of necessary artificial nodes nArtificialNodes = math.ceil(pipeLength / maxPipeLength) - 1 # compute length of new pipelines newPipeLength = float(pipeLength / (math.ceil(pipeLength / maxPipeLength))) # lower and upper pressure bound for new nodes computed by average of nodes of original edge lowPress = (dic_node_minPress[edge[0]] + dic_node_minPress[edge[1]]) / 2 maxPress = (dic_node_maxPress[edge[0]] + dic_node_maxPress[edge[1]]) / 2 # add first new pipe and its length newPipes.append((edge[0], "v" + str(1) + "_" + str(edge[0]) + "_" + str(edge[1]))) # add length of first new pipe newPipesLengths.append(newPipeLength) # add lower and upper bound for new artificial node dic_node_minPress["v" + str(1) + "_" + str(edge[0]) + "_" + str(edge[1])] = lowPress dic_node_maxPress["v" + str(1) + "_" + str(edge[0]) + "_" + str(edge[1])] = maxPress # add intermediate artificial pipes, its length, and lower/upper pressure bounds for index in range(1, nArtificialNodes): newPipes.append(("v" + str(index) + "_" + str(edge[0]) + "_" + str(edge[1]), "v" + str(index + 1) + "_" + str(edge[0]) + "_" + str(edge[1]))) newPipesLengths.append(newPipeLength) dic_node_minPress["v" + str(index + 1) + "_" + str(edge[0]) + "_" + str(edge[1])] = lowPress dic_node_maxPress["v" + str(index + 1) + "_" + str(edge[0]) + "_" + str(edge[1])] = maxPress # add last new pipe and its length newPipes.append(("v" + str(nArtificialNodes) + "_" + str(edge[0]) + "_" + str(edge[1]), edge[1])) newPipesLengths.append(newPipeLength) # add edge to split edges splitEdges.append(edge) # Now delete edges that have been split distances = distances.drop(splitEdges) # Add new edges distances = distances.append(pd.Series(newPipesLengths, index=newPipes)) # get edges for graph edges = distances.index # create empty graph G = nx.Graph() # create graph from given edges and add length as edge attribute for edge in edges: G.add_edge(edge[0], edge[1], length=distances[edge]) return G, distances, dic_node_minPress, dic_node_maxPress def determineDiscretePipelineDesign(robust, injectionWithdrawalRates, distances, dic_node_minPress, dic_node_maxPress, dic_diameter_costs=None, dic_candidateMergedDiam_costs=None, gdfEdges=None, regColumn1='nodeIn', regColumn2='nodeOut', solver='glpk', opexForDiameters=None, economicLifetime=30, interestRate=0.08, costUnit='€', ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965, originalFluidFlows=None, nDigits=6, verbose=0, threads=1): """ We compute a robust (depending on parameter robust) optimal pipeline design, i.e. for a given network, we compute a minimal spanning tree w.r.t. its total length. Afterward, we compute our robust (special) scenarios, see Robinius et. al.. Also we compute for every timeStep of injectionWithdrawalRates the corresponding flows. We compute merged diameters according to list candidatesMergedDiameter, i.e. we compute a equivalent single diameter for two parallel pipes with the same diameter If robust is True, then we compute the corresponding pressure drops for every diameter and robust scenario. If robust is False, then we compute for every timeStep the corresponding pressure drops for every diameter and timeStep. If robust is True, then we compute optimal diameters by a MIP for the robust scenarios. If robust is False, then we compute optimal diameters by a MIP for the timeStep scenarios. Not Robust Version! In a postprocessing step, we compute "precise" pressure levels for the robust scenarios and the timeStep scenarios. Note that if robust is False, then the network may be infeasible for robust scenarios which can occur in the network! :param robust: Bool that is true, we build a robust pipeline network, otherwise not :type robust: bool :param injectionWithdrawalRates: the argument is a pandas DataFrame with the index column denoting the timesteps and the index row denoting the name of the network's nodes. Injection are denoted with negative floats and withdrawal with positive floats in [kg/s]. Example: node1 node2 node3 0 -4 2 2 1 3 -1.5 -1.5 ... ... ... ... 8759 0 -1 1. :type injectionWithdrawalRates: pandas DataFrame with floats :param distances: the parameter is a pandas Series with the indices being tuples of the network's nodes and the values being the lengths of the pipelines in [m]. Example: (node1, node2) 1000 (node2, node3) 50000 (node2, node1) 1000 (node3, node2) 50000 :type distances: pandas Series :param dic_node_minPress: dictionary that contains for every node of the network its lower pressure bound in [bar] :type dic_node_minPress: dictionary: key: node of the network, value: non-negative float :param dic_node_maxPress: dictionary that contains for every node of the network its upper pressure bound in [bar] :type dic_node_maxPress: dictionary key: node of the network, value: non-negative float It holds dic_node_minPress[index] <= dic_node_maxPress[index] :param dic_diameter_costs: dictionary that contains all diameters in [m] as keys and the values are the corresponding costs in [Euro/m]. Default Value is a preselection of diameters and its costs. if None, then we chose the following preselection of diameters and costs dic_diameter_costs = {0.1063: 37.51, 0.1307: 38.45, 0.1593: 39.64, 0.2065: 42.12, 0.2588: 45.26, 0.3063: 48.69, 0.3356: 51.07, 0.3844: 55.24, 0.432: 59.86, 0.4796: 64.98, 0.527: 70.56, 0.578: 76.61, 0.625: 82.99, 0.671: 89.95, 0.722: 97.38, 0.7686: 105.28, 0.814: 113.63, 0.864: 122.28, 0.915: 131.56, 0.96: 141.3, 1.011: 151.5, 1.058: 162.17, 1.104: 173.08, 1.155: 184.67, 1.249: 209.24, 1.342: 235.4, 1.444: 263.66, 1.536: 293.78} :type dic_diameter_costs: dict with keys: diameters, values: cost for pipeline; optional :param dic_candidateMergedDiam_costs: dictionary that contains a set of diameters in [m] as keys and the values are the corresponding costs in [Euro/m]. This diameters are then used to compute a single equivalent diameter for two looped (parallel) pipes with the considered diameter. |br| * the default value is empty dictionary {} :type dic_candidateMergedDiam_costs: dict with keys: diameters, values: cost for pipeline; optional :param gdfEdges: GeoDataFrame with the edges of the network and the names of their start and end nodes. Required for geo-referenced result visualization. Should be obtained from the getRefinedShapeFile function. :type gdfEdges: GeoDataFrame or None: optional, default is None :param regColumn1: name of the column in gdfEdges which holds the name of the injection/ withdrawal node at the beginning of the line. Required if gdfEdges is specified. :type regColumn1: string, optional, default is 'nodeIn' :param regColumn2: name of the column in gdfEdges which holds the name of the injection/ withdrawal node at the end of the line. Required if gdfEdges is specified. :type regColumn2: string, optional, default is 'nodeOut' :param solver: name of the optimization solver to use :type solver: string, default 'glpk' :param ir: integral roughness of pipe in [mm] |br| * the default value is 0.2 (hydrogen, this value can also be used for methane) :type ir: positive float :param rho_n: density at standard state in [kg/m^3] |br| * the default value is 0.089882 (hydrogen, you can use 0.71745877 for methane) :type rho_n: positive float :param T_m: constant temperature in [kelvin] |br| * the default value is 20 + 273.15 (hydrogen, you can use 281.15 for methane) :type T_m: float :param T_n: temperature in standard state in [kelvin] |br| * the default value is 273.15 (hydrogen, this value can also be used for methane) :type T_n: float :param p_n: pressure at standard state in [bar] |br| * the default value is 1.01325 (hydrogen, this value can also be used for methane) :type p_n: non-negative float :param Z_n: realgasfactor of hydrogen at standard state |br| * the default value is 1.00062387922965 (hydrogen, you can use 0.997612687740414 for methane) :type Z_n: non-negative float # TODO @Juelich where to use param originalFluidFlows: string that specifies the considered fluid |br| * the default value is None :type originalFluidFlows: str; optional :param nDigits: number of digits used in the round function |br| * the default value is 6 :type nDigits: positive int :param verbose: defines how verbose the console logging is:\n - 0: general model logging, warnings and optimization solver logging are displayed. - 1: warnings are displayed. - 2: no general model logging or warnings are displayed, the optimization solver logging is set to a minimum.\n Note: if required, the optimization solver logging can be separately enabled in the optimizationSpecs of the optimize function. |br| * the default value is 0 :type verbose: integer (0, 1 or 2) :return: tuple (dic_arc_optimalDiameters, dic_scen_PressLevels, dic_scen_MaxViolPress, dic_timeStep_PressLevels, dic_timeStep_MaxViolPress, gdfEdges), with: - dic_arc_optimalDiameters dictionary - pressure levels of postprocessing of robust scenarios dic_scen_PressLevels - violation of pressure bounds of robust scenarios in optimized network determined by postprocessing - dic_scen_MaxViolPress: maximum pressure violation in robust scenarios - pressure levels of postprocessing of timeSteps dic_timeStep_PressLevels - violation of pressure bounds of timeStep scenarios in optimized network determined by postprocessing - dic_timeStep_MaxViolPress: maximum pressure violation in timestep scenarios - geopandas GeoDataFrame (information about diameters in 'diam' column and number of pipelines in 'nbPipes'); None if kwarg gdfEdges was specified as being Node :rtype: return types: - dic_arc_optimalDiameters: dictionary, key: arcs, values: (numberOfPipes, diameter) note usually numberOfPipes is 1, but if we have chosen a merged diameter, then we have two parallel pipes with the same diameter, i.e. numberOfPipes is 2. - dic_scen_PressLevels: dictionary, key: nodePair, value: dict: key: arc, value: pressure level in [bar] - dic_scen_MaxViolPress: dictionary, key: nodePair, value: dict: key: arc, value: non-negative number (zero means no pressure violation) - dic_timeStep_PressLevels: dictionary, key: timeStep, value: dict: key: arc, value: pressure level in [bar] - dic_timeStep_MaxViolPress: dictionary, key: nodePair, value: dict: key: arc, value: non-negative number (zero means no pressure violation) - gdfEdges: geopandas geodataframe; None if kwarg gdfEdges was specified as being Node """ # Do type and value check of input data: isBool(robust) isPandasDataFrameNumber(injectionWithdrawalRates) isPandasSeriesPositiveNumber(distances) isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) # extract diameters for the optimization if dic_diameter_costs is not None: if isinstance(dic_diameter_costs, dict): diameters = list(dic_diameter_costs.keys()) if isinstance(diameters, list): for diam in diameters: utils.isStrictlyPositiveNumber(diam) else: raise TypeError("The input argument has to be a list") isDictionaryPositiveNumber(dic_diameter_costs) if dic_candidateMergedDiam_costs is not None: if isinstance(dic_candidateMergedDiam_costs, dict): for diam in dic_candidateMergedDiam_costs.keys(): utils.isStrictlyPositiveNumber(diam) utils.isPositiveNumber(dic_candidateMergedDiam_costs[diam]) else: raise TypeError("The input argument has to be a list") utils.isString(regColumn1), utils.isString(regColumn2) if gdfEdges is not None: if isinstance(gdfEdges, gpd.GeoDataFrame): if (not regColumn1 in gdfEdges.columns) | (not regColumn2 in gdfEdges.columns): raise ValueError("regColumn1 or regColumn2 not in columns of gdfEdges") else: gdfEdges['nodes'] = gdfEdges.apply(lambda x: (x['nodeIn'], x['nodeOut']), axis=1) else: raise TypeError("gdfEdges has to be a geopandas GeoDataFrame.") if opexForDiameters is not None: if isinstance(opexForDiameters, list): for opex in opexForDiameters: utils.isPositiveNumber(opex) else: raise TypeError("The input argument has to be a list") utils.isPositiveNumber(interestRate) utils.isStrictlyPositiveNumber(economicLifetime) utils.isString(costUnit) utils.isStrictlyPositiveNumber(ir) utils.isStrictlyPositiveNumber(rho_n) if not isinstance(T_m, float): raise TypeError("The input argument has to be an number") if not isinstance(T_n, float): raise TypeError("The input argument has to be an number") utils.isPositiveNumber(p_n) utils.isPositiveNumber(Z_n) if originalFluidFlows is not None: utils.isString(originalFluidFlows) utils.isStrictlyPositiveInt(nDigits) if dic_diameter_costs is None: print("There are no diameters to choose in the optimization. Thus, we consider the diameters and costs:") dic_diameter_costs = {0.1063: 37.51, 0.1307: 38.45, 0.1593: 39.64, 0.2065: 42.12, 0.2588: 45.26, 0.3063: 48.69, 0.3356: 51.07, 0.3844: 55.24, 0.432: 59.86, 0.4796: 64.98, 0.527: 70.56, 0.578: 76.61, 0.625: 82.99, 0.671: 89.95, 0.722: 97.38, 0.7686: 105.28, 0.814: 113.63, 0.864: 122.28, 0.915: 131.56, 0.96: 141.3, 1.011: 151.5, 1.058: 162.17, 1.104: 173.08, 1.155: 184.67, 1.249: 209.24, 1.342: 235.4, 1.444: 263.66, 1.536: 293.78} print(dic_diameter_costs) # create graph with respect to distances utils.output('Creating graph with respect to given distances', verbose, 0) graph, distances = createNetwork(distances) # plot graph if verbose < 1: if gdfEdges is not None: gdfEdges = gdfEdges[gdfEdges.nodes.isin(distances.index)] fig, ax = plt.subplots(figsize=(4,4)) gdfEdges.plot(ax=ax, color='k'), ax.axis('off') else: utils.output("Original Network Graph:", verbose, 0) nx.draw(graph, with_labels=True) plt.show() # Create a minimum spanning tree of the network with a reasonable logic utils.output('Creating a Steiner treee', verbose, 0) inner_nodes = list(injectionWithdrawalRates.columns) graph, distances = createSteinerTree(graph, distances, inner_nodes) utils.output("Steiner tree:", verbose, 0) if verbose < 1: if gdfEdges is not None: gdfEdges = gdfEdges[gdfEdges.nodes.isin(distances.index)] fig, ax = plt.subplots(figsize=(4,4)) gdfEdges.plot(ax=ax, color='k'), ax.axis('off') else: nx.draw(graph, with_labels=True) plt.show() # Compute robust scenarios for spanning tree network utils.output("Compute robust scenario set for tree network (based on " + str(len(graph.nodes)*len(graph.nodes)-len(graph.nodes)) + ' node combinations). Threads: ' + str(threads), verbose, 0) timeStart = time.time() dic_nodePair_flows, entries, exits = generateRobustScenarios(injectionWithdrawalRates, graph, distances, dic_node_minPress, dic_node_maxPress, solver=solver, threads=threads, verbose=verbose) utils.output("Number of robust scenarios: " + str(len(dic_nodePair_flows.keys())) , verbose, 0) utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0) # Compute scenarios for timeSteps utils.output("Compute scenarios for each timestep. Number of timestep scenarios: " + str(injectionWithdrawalRates.shape[0]) + '. Threads: ' + str(threads), verbose, 0) timeStart = time.time() dic_timeStep_flows = computeTimeStepFlows(injectionWithdrawalRates, distances, graph, entries, exits, solver=solver, threads=threads, verbose=verbose) utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0) # Compute equivalent single diameters for looped (parallel) pipes utils.output("Compute equivalent single diameters for looped (parallel) pipes", verbose, 0) # dic_LoopedDiam_costs contains the new computed diameters and its costs dic_LoopedDiam_costs = None # dic_newDiam_oldDiam merges new and old diameters dic_newDiam_oldDiam = None if dic_candidateMergedDiam_costs is not None: dic_LoopedDiam_costs, dic_newDiam_oldDiam = computeLargeMergedDiameters(dic_candidateMergedDiam_costs) # merge all diameters to one dictionary for the optimization model dic_diameter_costs.update(dic_LoopedDiam_costs) # Compute pressure drops for each scenario and diameter and the compute optimal diameters # depending on robust, we do this w.r.t. robust scenarios or every timeStep # dictionary for the pressure coefficients dic_pressureCoef = {} # dictionary for the optimal diameters dic_arc_diam = {} if robust: # we compute the pressure drops for the robust scenarios utils.output("Pressure drop coefficients for diameters with respect to robust scenarios", verbose, 0) dic_pressureCoef = determinePressureDropCoef(dic_nodePair_flows, distances, dic_node_minPress, dic_node_maxPress, list(dic_diameter_costs.keys())) specialScenarionames = list(dic_nodePair_flows.keys()) # Determine optimal discrete pipeline selection by solving a MIP w.r.t. the robust scenarios utils.output('Determining optimal robust pipeline design under the consideration of pressure ' + 'losses and robust scenarios', verbose, 0) # returns dict: key: arc, value: optimal diameter # returns dict: key: nodePair, value: dic: key: node, value: pressure level dic_arc_diam, dic_scen_node_press = determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureCoef, specialScenarionames, dic_node_minPress, dic_node_maxPress, dic_diameter_costs, robust, verbose=verbose, solver=solver, threads=threads) else: # we compute pressure drops for every timeStep scenario. Not robust version! # we compute the pressure drops for the robust scenarios and optimize utils.output("Pressure drop coefficients for diameters with respect to robust scenarios", verbose, 0) dic_pressureCoef = determinePressureDropCoef(dic_timeStep_flows, distances, dic_node_minPress, dic_node_maxPress, list(dic_diameter_costs.keys())) timeSteps = list(dic_timeStep_flows.keys()) # Determine optimal discrete pipeline selection by solving a MIP w.r.t. the timeStep scenarios utils.output('Determining optimal pipeline design under the consideration of pressure losses and every time step', verbose, 0) utils.output('This network design is necessarily robust!', verbose, 0) # returns dict: key: arc, value: optimal diameter # returns dict: key: timeStep, value: dic: key: node, value: pressure level dic_arc_diam, dic_scen_node_press = determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureCoef, timeSteps, dic_node_minPress, dic_node_maxPress, dic_diameter_costs, False, verbose=verbose, solver=solver, threads=threads) if not dic_arc_diam: utils.output("No feasible diameter selections exits", verbose, 0) return None # Do postprocessing: Use a "more" accurate pressure model and apply Postprocessing of master's thesis: # first do postprocessing for special scenarios utils.output("Do postprocessing for robust (special) scenarios. Number of scenarios: " + str(len(dic_nodePair_flows)) + '. Threads: ' + str(threads), verbose, 0) timeStart = time.time() dic_scen_PressLevels, dic_scen_MaxViolPress = postprocessing(graph, distances, dic_arc_diam, dic_nodePair_flows, dic_node_minPress, dic_node_maxPress, threads=threads, verbose=verbose) utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0) # print if some of these scenarios are not feasible for the "more" precise pressure model for scenario in dic_scen_MaxViolPress.keys(): if dic_scen_MaxViolPress[scenario] > 0: utils.output("Robust Scenario " + str(scenario) + " violates pressure bounds by " + str(dic_scen_MaxViolPress[scenario]), verbose, 0) # compute pressure levels for each time step utils.output("Do postprocessing for each timestep scenarios. Number of scenarios: " + str(injectionWithdrawalRates.shape[0]) + '. Threads: ' + str(threads), verbose, 0) timeStart = time.time() dic_timeStep_PressLevels, dic_timeStep_MaxViolPress = postprocessing(graph, distances, dic_arc_diam, dic_timeStep_flows, dic_node_minPress, dic_node_maxPress, threads=threads, verbose=verbose) utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0) for timeStep in dic_timeStep_MaxViolPress.keys(): if dic_timeStep_MaxViolPress[timeStep] > 0: utils.output("Time Step " + str(timeStep) + " violates pressure bounds by " + str(dic_timeStep_MaxViolPress[timeStep]), verbose, 0) # now determine final output, i.e. dictionary: key: arcs, values: (numberOfPipes, diameter) # note usually numberOfPipes is 1, but if we have chosen a merged diameter, then we have two parallel pipes with # the same diameter, i.e. numberOfPipes is 2. dic_arc_optimalDiameters = {} for arc in dic_arc_diam.keys(): if dic_LoopedDiam_costs is not None: if dic_arc_diam[arc] in dic_LoopedDiam_costs.keys(): dic_arc_optimalDiameters[arc] = (2, dic_newDiam_oldDiam[dic_arc_diam[arc]]) else: dic_arc_optimalDiameters[arc] = (1, dic_arc_diam[arc]) else: dic_arc_optimalDiameters[arc] = (1, dic_arc_diam[arc]) if verbose < 1: if gdfEdges is not None: gdfEdges = gdfEdges[gdfEdges.nodes.isin(dic_arc_optimalDiameters)] gdfEdges['diam'] = gdfEdges.apply(lambda x: dic_arc_optimalDiameters[x['nodes']][1], axis=1) gdfEdges['nbPipes'] = gdfEdges.apply(lambda x: dic_arc_optimalDiameters[x['nodes']][0], axis=1) plotOptimizedNetwork(gdfEdges) else: # plot network with new diameters utils.output("Network with optimized diameters, looped pipes are indicated by two colored edges, " + "Thicker edge means larger diameter", verbose, 0) finalG = nx.MultiGraph() for arc in dic_arc_optimalDiameters.keys(): if dic_arc_optimalDiameters[arc][0] == 1: # we have a single not looped pipe finalG.add_edge(arc[0], arc[1], color='black', weight=5 * dic_arc_optimalDiameters[arc][1]) else: # we have a looped pipe finalG.add_edge(arc[0], arc[1], color='r', weight=10 * dic_arc_optimalDiameters[arc][1]) finalG.add_edge(arc[0], arc[1], color='b', weight=5 * dic_arc_optimalDiameters[arc][1]) # pos = nx.circular_layout(finalG) edges = finalG.edges() colors = [] weight = [] for (u, v, attrib_dict) in list(finalG.edges.data()): colors.append(attrib_dict['color']) weight.append(attrib_dict['weight']) nx.draw(finalG, edges=edges, edge_color=colors, width=weight, with_labels=True) plt.show() # Add some output which somehow quantifies the difference between the original and the new # pipeline design (for this additional input argument are required) # TODO @ Juelich just compare original solution to solution dic_arc_optimalDiameters return dic_arc_optimalDiameters, dic_scen_PressLevels, dic_scen_MaxViolPress, dic_timeStep_PressLevels, \ dic_timeStep_MaxViolPress, gdfEdges def plotOptimizedNetwork(gdf_pipes, figsize=(4,4), nodesColumn='nodes', diamColumn='diam', nbPipesColumn='nbPipes', line_scaling=1, gdf_regions=None, pressureLevels=None, pMin=50, pMax=100, cmap='Spectral_r', cbxShift=0.32, cbyShift=0.08, cbWidth=0.4, fontsize=10, cbTitle='Pressure [bar]'): """Plot optimized network, visualizing chosen pipe diameters and, if selected, pressure levels of a scenario. :param gdf_pipes: GeoDataFrame, containing information about the diameters, number of pipes and routes of the pipeline network :type gdf_pipes: geopandas GeoDataFrame :param figsize: figure size, defaults to (4,4) :type figsize: tuple, optional :param nodesColumn: name of the column in gdf_pipes containing a tuple (startNode, endNode) with the name of the nodes being strings, defaults to 'nodes' :type nodesColumn: str, optional :param diamColumn: name of the column in gdf_pipes containing the diameters of the pipelines in m, defaults to 'diam' :type diamColumn: str, optional :param nbPipesColumn: name of the column in gdf_pipes containing the number of parallel pipes along a connection (maximum parallel pipes: 2), defaults to 'nbPipes' :type nbPipesColumn: str, optional :param line_scaling: scaling factor for line width, defaults to 1 :type line_scaling: int, optional :param gdf_regions: GeoDataFrame for background plotting, defaults to None :type gdf_regions: geopandas GeoDataFrame, optional :param pressureLevels: pressure levels at each node for one scenario/ timestep, defaults to None :type pressureLevels: dictionary or series with keys/ indices being the nodes of the network, optional :param pMin: minimum pressure of colorbar, defaults to 50 :type pMin: int, optional :param pMax: maximum pressure of colorbar, defaults to 100 :type pMax: int, optional :param cmap: colormap name, defaults to 'Spectral_r' :type cmap: str, optional :param cbxShift: colorbar x shift, defaults to 0.32 :type cbxShift: float, optional :param cbyShift: colorbar y shift, defaults to 0.08 :type cbyShift: float, optional :param cbWidth: colorbar width, defaults to 0.4 :type cbWidth: float, optional :param fontsize: fontsize of legend and colorbar, defaults to 10 :type fontsize: int, optional :param cbTitle: colorbar title, defaults to 'Pressure [bar]' :type cbTitle: str, optional :return: tuple (fig, ax) :rtype: - fig: matplotlib figure - ax: matplotlib axis """ fig, ax = plt.subplots(figsize=figsize) cmap = mpl.cm.get_cmap(cmap) if gdf_regions is not None: gdf_regions.plot(ax=ax, facecolor='lightgrey', edgecolor='lightgrey') diamMin = gdf_pipes[gdf_pipes[diamColumn] > 0][diamColumn].min() for i, row in gdf_pipes.iterrows(): lw = row[diamColumn]/diamMin*line_scaling if pressureLevels is not None: p = (pressureLevels[row[nodesColumn][0]] + pressureLevels[row[nodesColumn][1]])/2 color = cmap((p-pMin)/(pMax-pMin)) else: color='k' if (row[nbPipesColumn] == 1): gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color=color, linewidth=lw, capstyle='round') else: gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color=color, linewidth=lw*3, capstyle='round') gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color='white', linewidth=lw) ax.axis('off') lines = [] for diam in sorted(gdf_pipes[diamColumn].unique()): line = plt.Line2D(range(1), range(1), linewidth=diam/diamMin*line_scaling, color='k', marker='_', label="{:>1.5}".format(str(diam)) + ' m') lines.append(line) leg = ax.legend(handles=lines, prop={'size': fontsize}, loc=6, bbox_to_anchor=(1,0.5), title='Diameters') leg.get_frame().set_edgecolor('white') if pressureLevels is not None: sm1 = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=pMin, vmax=pMax)) sm1._A = [] cax = fig.add_axes([cbxShift, cbyShift, cbWidth, 0.03]) cb1 = fig.colorbar(sm1, cax=cax, pad=0.05, aspect=7, fraction=0.07, orientation='horizontal') cax.tick_params(labelsize=fontsize) cax.set_xlabel(cbTitle, size=fontsize) cb1.ax.xaxis.set_label_position('top') plt.show() return fig, ax
49.356811
131
0.666678
import pandas as pd from FINE import utils import networkx as nx import math import pyomo.environ as py import warnings from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition import numpy as np import copy from scipy.optimize import fsolve import matplotlib.pyplot as plt import matplotlib as mpl import shapely as shp import time from multiprocessing import Pool import sys from functools import partial try: import geopandas as gpd except ImportError: warnings.warn('The GeoPandas python package could not be imported.') def isPandasDataFrameNumber(dataframe): if not isinstance(dataframe, pd.DataFrame): raise TypeError("The input argument has to be a pandas DataFrame") else: if not dataframe.select_dtypes(exclude=["float", "int"]).empty: raise ValueError("The input pandas DataFrame has to contain only floats or ints") def isPandasSeriesPositiveNumber(pandasSeries): if not isinstance(pandasSeries, pd.Series): raise TypeError("The input argument has to be a pandas series") else: for index in pandasSeries.index: utils.isPositiveNumber(pandasSeries[index]) def isNetworkxGraph(graph): if not isinstance(graph, nx.Graph): raise TypeError("The input argument has to be a networkx graph") def isDictionaryPositiveNumber(dictionary): if not isinstance(dictionary, dict): raise TypeError("The input argument has to be a dictionary") else: for key in dictionary.keys(): utils.isPositiveNumber(dictionary[key]) def checkLowerUpperBoundsOfDicts(lowerDict, upperDict): if not (lowerDict.keys() == upperDict.keys()): raise ValueError("The input arguments have to have the same keys") else: for key in lowerDict.keys(): if lowerDict[key] > upperDict[key]: raise ValueError("The lower bound has to be the smaller than the upper bound") def isListOfStrings(strings): if not isinstance(strings, list): raise TypeError("The input argument has to be a list") else: for string in strings: utils.isString(string) def isBool(boolean): if not isinstance(boolean, bool): raise TypeError("The input argument has to be a bool") def getInjectionWithdrawalRates(componentName='', esM=None, operationVariablesOptimumData=None): if operationVariablesOptimumData is not None: op = operationVariablesOptimumData else: op = esM.componentModelingDict[esM.componentNames[componentName]]. \ getOptimalValues('operationVariablesOptimum')['values'].loc[componentName] if esM is None: mapN = {} for conn in operationVariablesOptimumData.index: loc, loc_ = conn mapN.setdefault(loc, {}).update({loc_: loc + '_' + loc_}) mapN.setdefault(loc_, {}).update({loc: loc_ + '_' + loc}) else: mapN = esM.getComponent(componentName)._mapL # Initialize list for nodal injection and withdrawal time series data injectionWithdrawalRates, nodeIx = [], [] # Reset connections set (not all indices might be in the operationVariablesOptimumData data) connections = set() # For each node loc, compute the injection and withdrawal rates for loc, locConn in mapN.items(): # As in a few cases zero columns/ rows are dropped from data frames, two lists # of eligible connection indices are created. ixIn, ixOut = [], [] for loc_, conn in locConn.items(): if (loc, loc_) in op.index: ixOut.append((loc, loc_)), connections.add((loc, loc_)) if (loc_, loc) in op.index: ixIn.append((loc_, loc)), connections.add((loc_, loc)) # If either list has at least one entry, the incoming and outgoing flows are selected # from the original optimal flow variables and aggregated. The resulting commodity # withdrawals from the network are positive while injections are negative. if (len(ixIn) != 0) | (len(ixOut) != 0): injectionWithdrawalRates.append(op.loc[ixIn].sum() - op.loc[ixOut].sum()) nodeIx.append(loc) # Concat data to a pandas dataframe injectionWithdrawalRates = pd.concat(injectionWithdrawalRates, keys=nodeIx, axis=1) return injectionWithdrawalRates def getNetworkLengthsFromESM(componentName, esM): utils.isString(componentName) utils.isEnergySystemModelInstance(esM) distances = esM.getComponent(componentName).distances.copy() indexMap = esM.getComponent(componentName)._mapC distances.index = [indexMap[ix] for ix in distances.index] return distances def getRefinedShapeFile(shapeFilePath, regColumn1, regColumn2, dic_node_minPress, dic_node_maxPress, minPipeLength, maxPipeLength): # type and value check isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) utils.isString(regColumn1), utils.isString(regColumn2) utils.isStrictlyPositiveNumber(maxPipeLength) utils.isStrictlyPositiveNumber(minPipeLength) # Read shape file with linestrings connecting the entry/ exit nodes of the gas gdf=gpd.read_file(shapeFilePath) if not (gdf.geometry.type == 'LineString').all(): raise ValueError("Geometries of the shape file have to be LineStrings") print('Number of edges before segmentation:', len(gdf)) originalNodesSet = set(gdf[regColumn1]) | set(gdf[regColumn2]) print('Number of nodes before segmentation:', len(originalNodesSet)) # Obtain nodes from shape file, assign names and minimum/ maximum pressure levels to them, delete duplicates coordNames, coords = [], [] pMin, pMax = [], [] lines = [] # Break linestrings into linear pieces for i, row in gdf.iterrows(): # Simplify linestring (to increase the minimum length of pipeline connections wherever possible) line = row.geometry.simplify(minPipeLength) lines.append(line) row.geometry = line # Get new nodes coords_ = [i for i in line.coords] coords.extend(coords_) coordNames_ = [row[regColumn1]] coordNames_.extend([row[regColumn1] + '_' + row[regColumn2] + '_' + str(j) for j in range(len(coords_)-2)]) coordNames_.append(row[regColumn2]) coordNames.extend(coordNames_) # Get averaged lower and upper pressure levels pMin.extend([(dic_node_minPress[row[regColumn1]]*(len(coords_)-j-1) + dic_node_minPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))]) pMax.extend([(dic_node_maxPress[row[regColumn1]]*(len(coords_)-j-1) + dic_node_maxPress[row[regColumn2]]*j)/(len(coords_)-1) for j in range(len(coords_))]) gdf['geometry'] = lines # Create DataFrame of old and new nodes and drop duplicates dfNodes = pd.DataFrame([coordNames, pMin, pMax, coords], index=['nodeName','pMin','pMax','lon_lat']).T dfNodes = dfNodes.drop_duplicates(subset='lon_lat') dfNodes = dfNodes.drop_duplicates(subset='nodeName') # Obtain edges from shape file, assign names to them, delete duplicates nodesIn_nodesOut = [] nodesIn = [] nodesOut = [] lineStrings = [] for i, row in gdf.iterrows(): coords_ = [i for i in row.geometry.coords] for j in range(len(coords_)-1): nodeIn = dfNodes.loc[dfNodes['lon_lat'] == coords_[j],'nodeName'].iloc[0] nodeOut = dfNodes.loc[dfNodes['lon_lat'] == coords_[j+1],'nodeName'].iloc[0] nodesIn.append(nodeIn), nodesOut.append(nodeOut) nodes = [nodeIn,nodeOut] nodes.sort() nodesIn_nodesOut.append('edge_' + nodes[0] + '_' + nodes[1]) lineStrings.append(shp.geometry.LineString([coords_[j],coords_[j+1]])) dfEdges = pd.DataFrame([nodesIn, nodesOut, nodesIn_nodesOut, lineStrings], index=['nodeIn', 'nodeOut','edgeName','geometry']).T dfEdges = dfEdges.drop_duplicates(subset='edgeName') gdfEdges = gpd.GeoDataFrame(dfEdges,crs=gdf.crs).to_crs({'init': 'epsg:3035'}) print('Number of edges after 1. segmentation:', len(gdfEdges)) print('Number of nodes after 1. segmentation:', len(dfNodes)) # Add nodes when line distances are too long newNodes, newLines, newNodesName, newLinesName = [], [], [], [] nodesIn, nodesOut, coords = [], [], [] pMin, pMax = [], [] for i, row in gdfEdges.iterrows(): # If lines are two long, segment them if np.round(row['geometry'].length,2) > maxPipeLength: nbNewNodes = int(np.floor(row['geometry'].length/maxPipeLength)) line = row.geometry newNodes_, newLines_, newNodesName_, newLinesName_ = [], [], [], [] nodesIn_, nodesOut_, coords_ = [], [], [] pMin_, pMax_ = [], [] nodeStart, nodeEnd = line.interpolate(0), line.interpolate(line.length) nodeStartName = row['nodeIn'] pMinIn = dfNodes[dfNodes['nodeName'] == row['nodeIn'] ]['pMin'].iloc[0] pMinOut = dfNodes[dfNodes['nodeName'] == row['nodeOut']]['pMin'].iloc[0] pMaxIn = dfNodes[dfNodes['nodeName'] == row['nodeIn'] ]['pMax'].iloc[0] pMaxOut = dfNodes[dfNodes['nodeName'] == row['nodeOut']]['pMax'].iloc[0] spacing = row['geometry'].length/(nbNewNodes+1) for j in range(1,nbNewNodes+1): newNode = line.interpolate(j*spacing) newNodes_.append(newNode) coords_.append((newNode.x, newNode.y)) newNodeName = row['nodeIn'] + '_' + row['nodeOut'] + '_a_' + str(j) newNodesName_.append(newNodeName) newLine = shp.geometry.LineString([nodeStart,newNode]) newLines_.append(newLine) newLinesName_.append('temp'), nodesIn_.append(nodeStartName), nodesOut_.append(newNodeName) pMin_.append((pMinIn*(nbNewNodes-j+1) + pMinOut*j)/(nbNewNodes+1)) pMax_.append((pMaxIn*(nbNewNodes-j+1) + pMaxOut*j)/(nbNewNodes+1)) nodeStart, nodeStartName = newNode, newNodeName newLines_.append(shp.geometry.LineString([newNode,nodeEnd])) newLinesName_.append('temp') nodesIn_.append(newNodeName), nodesOut_.append(row['nodeOut']) newNodes.extend(newNodes_), newLines.extend(newLines_), newNodesName.extend(newNodesName_) newLinesName.extend(newLinesName_), pMin.extend(pMin_), pMax.extend(pMax_) nodesIn.extend(nodesIn_), nodesOut.extend(nodesOut_), coords.extend(coords_) if len(newNodes) > 0: dfNodes = dfNodes.append(pd.DataFrame([newNodesName, pMin, pMax, coords], index=['nodeName','pMin','pMax','lon_lat']).T) dfEdges = pd.DataFrame([nodesIn, nodesOut, newLinesName, newLines], index=['nodeIn', 'nodeOut','edgeName','geometry']).T gdfEdgesNew = gpd.GeoDataFrame(dfEdges,crs=gdf.crs).to_crs({'init': 'epsg:3035'}) gdfEdges = gdfEdges.append(gdfEdgesNew) gdfEdges = gdfEdges[gdfEdges.geometry.length.round(2) <= maxPipeLength] del gdfEdges['edgeName'] renameDict = {name: 'auxNode' + str(i) for i, name in enumerate(dfNodes.nodeName.values) if name not in originalNodesSet} for node in originalNodesSet: renameDict.update({node:node}) gdfEdges['nodeIn'] = gdfEdges.apply(lambda x: renameDict[x['nodeIn']], axis=1) gdfEdges['nodeOut'] = gdfEdges.apply(lambda x: renameDict[x['nodeOut']], axis=1) gdfEdges['distances'] = gdfEdges['geometry'].length print('Number of edges after 2. segmentation:', len(gdfEdges)) dfNodes['nodeName'] = dfNodes.apply(lambda x: renameDict[x['nodeName']], axis=1) dfNodes['geometry'] = dfNodes.apply(lambda x: shp.geometry.Point(x['lon_lat']), axis=1) del dfNodes['lon_lat'] gdfNodes = gpd.GeoDataFrame(dfNodes,crs=gdf.crs).to_crs({'init': 'epsg:3035'}) print('Number of nodes after 2. segmentation:', len(gdfNodes)) print('Minimum length [m]:', gdfEdges.distances.min(), 'Maximum length [m]:', gdfEdges.distances.max()) distances_new = pd.Series(gdfEdges['distances'].values, index = [(n1, n2) for n1, n2 in zip(gdfEdges['nodeIn'],gdfEdges['nodeOut'])]) dic_node_minPress_new = {n:pMin for n, pMin in zip(gdfNodes['nodeName'], gdfNodes['pMin'])} dic_node_maxPress_new = {n:pMax for n, pMax in zip(gdfNodes['nodeName'], gdfNodes['pMax'])} return distances_new, dic_node_minPress_new, dic_node_maxPress_new, gdfNodes, gdfEdges def createNetwork(distances): # type and value check isPandasSeriesPositiveNumber(distances) for index in distances.index: if not isinstance(index, tuple): raise TypeError("Index of pandas series has to be a tuple") # first check if distances are consistent, i.e. if (u,v) and (v,u) are in distances they have to have the same # length and we will delete one of them # tmp list for reversed edges that we will be delete tmp_edges = [] for edge in distances.index: if (edge[1], edge[0]) in distances.index and (edge[1], edge[0]) not in tmp_edges: assert (distances[edge] == distances[(edge[1], edge[0])]) tmp_edges.append(edge) # delete tmp_edges because reversed edges are already contained and we consider an undirected graph distances = distances.drop(tmp_edges) # get edges for graph edges = distances.index # create empty graph G = nx.Graph() # create graph from given edges and add length as edge attribute for edge in edges: G.add_edge(edge[0], edge[1], length=distances[edge]) return G, distances def createSteinerTree(graph, distances, inner_nodes): from networkx.algorithms import approximation # type and value check isNetworkxGraph(graph) isPandasSeriesPositiveNumber(distances) # compute spanning tree with minimal sum of pipeline lengths S = approximation.steiner_tree(graph, terminal_nodes=inner_nodes, weight='length') # TODO check why function fails when MST function is not called here S = nx.minimum_spanning_tree(S, weight='length') # delete edges that are in graph but not in the tree from the distance matrix edgesToDelete = [] for edge in distances.index: # check if edge or its reversed edge are contained in the tree # you have to check both directions because we have an undirected graph if edge not in S.edges and (edge[1], edge[0]) not in S.edges: edgesToDelete.append(edge) distances = distances.drop(edgesToDelete) return S, distances def _generateRobustScenarios(startNode_endNode, **kwargs): startNode = startNode_endNode[0] endNode = startNode_endNode[1] return startNode_endNode, computeSingleSpecialScenario(startNode=startNode, endNode=endNode, **kwargs) def generateRobustScenarios(injectionWithdrawalRates, graph, distances, dic_node_minPress, dic_node_maxPress, solver='glpk', threads=1, verbose=0): # Type and value checks isPandasDataFrameNumber(injectionWithdrawalRates) isNetworkxGraph(graph) isPandasSeriesPositiveNumber(distances) # get for every entry/exit node the minimal and maximal injection rate and save it in a # dictionary: key: node, value: min Rate; respectively max Rate in [kg/s] # we note that inner nodes a handled separately in the computation of the special scenario dic_nodes_MinCapacity = {} dic_nodes_MaxCapacity = {} # list of entry nodes and exit nodes; note node can be in both for example storages entries = [] exits = [] inners = [] for node in list(injectionWithdrawalRates.columns.values): minRate = injectionWithdrawalRates[node].min() maxRate = injectionWithdrawalRates[node].max() assert (minRate <= maxRate) dic_nodes_MinCapacity[node] = minRate dic_nodes_MaxCapacity[node] = maxRate # if minRate is negative, then node is an entry; if maxRate is positive, then node is an exit if minRate < 0.0: entries.append(node) if maxRate > 0.0: exits.append(node) elif maxRate > 0: exits.append(node) else: inners.append(node) maxPressuresAreEqual = True if len(set(dic_node_maxPress.values())) == 1 else False p_exits = [dic_node_minPress[exit] for exit in exits] p_entries_inners = [dic_node_minPress[node] for node in entries] p_inners = [dic_node_minPress[node] for node in inners] p_entries_inners.extend(p_inners) minPressureExitsIsLarger = True if min(p_exits) >= max(p_entries_inners) else False # compute special scenario for each node combination; see Paper Robinius et. al.(2019); Labbé et. al. (2019) # save arc flows of special scenarios for each node combination; # dictionary: key: node pair, value: dictionary: key: arc, value: arc flow dic_nodePair_flows = {} if maxPressuresAreEqual and minPressureExitsIsLarger: if verbose == 0: print('Reduced robust scenario set can be generated' + ' (pMax is equal at all nodes & pMin at exits is >= at inner and entry nodes).') nodes = [(startNode, endNode) for startNode in entries for endNode in exits if startNode != endNode] else: nodes = [(startNode, endNode) for startNode in graph.nodes for endNode in graph.nodes if startNode != endNode] pool = Pool(threads) for i, values in enumerate(pool.imap(partial(_generateRobustScenarios, graph=graph, distances=distances, entries=entries, exits=exits, dic_nodes_MinCapacity=dic_nodes_MinCapacity, dic_nodes_MaxCapacity=dic_nodes_MaxCapacity, solver=solver), nodes), 1): if verbose == 0: sys.stderr.write('\rPercentage simulated: {:d}%'.format(int(i / len(nodes) * 100))) dic_nodePair_flows[values[0]] = values[1] pool.close() pool.join() return dic_nodePair_flows, entries, exits def computeSingleSpecialScenario(graph, distances, entries, exits, startNode, endNode, dic_nodes_MinCapacity, dic_nodes_MaxCapacity, specialScenario=True, solver='glpk'): # Type and value check isNetworkxGraph(graph) isPandasSeriesPositiveNumber(distances) isListOfStrings(entries) isListOfStrings(exits) utils.isString(startNode) utils.isString(endNode) if isinstance(dic_nodes_MinCapacity, dict) and isinstance(dic_nodes_MaxCapacity, dict): if not (dic_nodes_MinCapacity.keys() == dic_nodes_MaxCapacity.keys()): raise TypeError("Dictionaries for min and max capacity need same keys") for node in dic_nodes_MinCapacity.keys(): if not (isinstance(dic_nodes_MinCapacity[node], float) or isinstance(dic_nodes_MinCapacity[node], int)): raise TypeError("The input argument has to be an number") if not (isinstance(dic_nodes_MaxCapacity[node], float) or isinstance(dic_nodes_MaxCapacity[node], int)): raise TypeError("The input argument has to be an number") if dic_nodes_MaxCapacity[node] < dic_nodes_MinCapacity[node]: raise ValueError("minimal node capacity has to be equal or smaller than maximal node capacity") else: raise TypeError("dic_nodes_MinCapacity and dic_nodes_MinCapacity have to be dictionaries") isBool(specialScenario) # we build concrete Pyomo Model model = py.ConcreteModel() # Description model: we have a simple directed graph. We allow negative flows because a pipe can be used in both # directions by the flows model.Nodes = py.Set(initialize=graph.nodes) # important to use distances.keys() instead of graph.edges such that we do not have key errors later on because # the edges in graph are undirected and in distances.keys() directed model.Arcs = py.Set(initialize=distances.keys(), dimen=2) # create demand variables for every node; # if specialScenario is true, then we compute special scenario, i.e. entry/exit demand variables are bounded by # min(0,minimal_capacity) <= demandVariable <= max(0, maximal_capacity) # demand variables for inner nodes are set to zero # if specialScenario is false, the demand variable is just bounded by the minimal and maximal capacity if specialScenario: def demandCapacities(model, node): if node in entries or node in exits: return min(0, dic_nodes_MinCapacity[node]), max(0, dic_nodes_MaxCapacity[node]) else: return 0, 0 model.Demand = py.Var(model.Nodes, bounds=demandCapacities) else: # we do not compute special scenarios; we just compute flows for given, possibly fixed, demands def demandCapacities(model, node): return dic_nodes_MinCapacity[node], dic_nodes_MaxCapacity[node] model.Demand = py.Var(model.Nodes, bounds=demandCapacities) # create arc flow variables for every arc of the network model.Flow = py.Var(model.Arcs) # compute NodesOut, i.e., set of nodes that are connected to considered node by outgoing arc def nodes_out_init(model, node): retval = [] for (i, j) in model.Arcs: if i == node: retval.append(j) return retval model.NodesOut = py.Set(model.Nodes, initialize=nodes_out_init) # compute NodesIn, i.e., set of nodes connected to considered node by ingoing arc def nodes_in_init(model, node): retval = [] for (i, j) in model.Arcs: if j == node: retval.append(i) return retval model.NodesIn = py.Set(model.Nodes, initialize=nodes_in_init) # add flow balance constraints corresponding to the node demands def flow_balance_rule(model, node): return sum(model.Flow[i, node] for i in model.NodesIn[node]) \ - sum(model.Flow[node, j] for j in model.NodesOut[node]) \ == model.Demand[node] model.FlowBalance_cons = py.Constraint(model.Nodes, rule=flow_balance_rule) # compute unique flow-path P(startNode,endNode) from entry to exit; given by list of nodes of the path pathNodes = nx.shortest_path(graph, source=startNode, target=endNode) # non zero coefficients of objective function dic_arc_coef = {} # determine coefficients for objective function # if for an arc (u,v), u, respectively v, are not in pathNodes, then the coefficient is 0 # if arc (u,v) of pathNodes satisfies P(startNode, u) subset P(startNode,v), then coefficient is 1, otherwise -1 for index in range(0, len(pathNodes) - 1): # check which direction of the arc is contained in the graph if (pathNodes[index], pathNodes[index + 1]) in model.Arcs: dic_arc_coef[(pathNodes[index], pathNodes[index + 1])] = 1 else: dic_arc_coef[(pathNodes[index + 1], pathNodes[index])] = -1 # we set objective def obj_rule(model): return sum(dic_arc_coef[arc] * model.Flow[arc] for arc in dic_arc_coef.keys()) model.Obj = py.Objective(rule=obj_rule, sense=py.maximize) # Create a solver opt = SolverFactory(solver) # Solve optimization model results = opt.solve(model) # status of solver status = results.solver.status # termination condition termCondition = results.solver.termination_condition # save the solution of the flows in a dictionary key: arcs, values: flow dic_scenario_flow = {} if status == SolverStatus.error or status == SolverStatus.aborted or status == SolverStatus.unknown: utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) + '. No output is generated.', 0, 0) elif termCondition == TerminationCondition.infeasibleOrUnbounded or \ termCondition == TerminationCondition.infeasible or \ termCondition == TerminationCondition.unbounded: utils.output('Optimization problem is ' + str(termCondition) + '. No output is generated.', 0, 0) else: # If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown # status), show a warning message. if not termCondition == TerminationCondition.optimal: warnings.warn('Output is generated for a non-optimal solution.') # dic_arcScenario has key (v,w,scenario) and value flow will be needed for MIP for arc in model.Arcs: dic_scenario_flow[arc] = model.Flow[arc].value return dic_scenario_flow def computeLargeMergedDiameters(dic_subSetDiam_costs, nDigits=6): # Type and value check if isinstance(dic_subSetDiam_costs, dict): for diam in dic_subSetDiam_costs.keys(): utils.isStrictlyPositiveNumber(diam) utils.isStrictlyPositiveNumber(dic_subSetDiam_costs[diam]) else: raise TypeError("The input has to be a dictionary") utils.isStrictlyPositiveInt(nDigits) dic_newDiam_costs = {} dic_newDiam_oldDiam = {} for diam in dic_subSetDiam_costs.keys(): # compute new diameter in [m] and its costs in [Euro/m] # for Formula see (1) in Paper Reuß et. al. # since at current state we consider the diameter for a looped pipe the above is # equivalent to 2^(2/5) * diam and thus, we do not have to transform diam from [m] to [mm] newDiam = ((diam ** (5 / 2) + diam ** (5 / 2)) ** (2 / 5)).__round__(nDigits) # costs are two times costs of diam because newDiam represents two looped pipe with diameter diam newCosts = 2 * dic_subSetDiam_costs[diam] dic_newDiam_costs[newDiam] = newCosts dic_newDiam_oldDiam[newDiam] = diam return dic_newDiam_costs, dic_newDiam_oldDiam def determinePressureDropCoef(dic_scenario_flows, distances, dic_node_minPress, dic_node_maxPress, diameters, ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965, nDigits=6): # check type and value if not isinstance(dic_scenario_flows, dict): raise TypeError("The input has to be a dictionary") isPandasSeriesPositiveNumber(distances) isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) if isinstance(diameters, list): for diam in diameters: utils.isPositiveNumber(diam) else: raise TypeError("Diameters has to be a list") utils.isStrictlyPositiveNumber(ir) utils.isStrictlyPositiveNumber(rho_n) if not isinstance(T_m, float): raise TypeError("The input argument has to be an number") if not isinstance(T_n, float): raise TypeError("The input argument has to be an number") utils.isPositiveNumber(p_n) utils.isPositiveNumber(Z_n) utils.isStrictlyPositiveInt(nDigits) # compute for each diameter, scenario, and arc its pressure drop # save results in dic: key: (diameter, scenario Name), value: dic: key: arc, value: pressure drop dic_pressureDropCoef = {} for diameter in diameters: for nodePair in dic_scenario_flows.keys(): # initialize dictionary dic_pressureDropCoef[(diameter, nodePair)] = {} # compute cross section of considered pipe and diameter tmpvalue_A = 0.25 * np.pi * diameter ** 2 for arc in dic_scenario_flows[nodePair].keys(): # check if flow is unequal to zero if dic_scenario_flows[nodePair][arc] != 0.0: # Compute approximation of average pressure flow in pipe (u,v) by # if flow((u,v)) is positive then set p_min to lower pressure bound of v and p_max to # upper pressure bound u # if flow((u,v)) is negative then set p_min to lower pressure bound of u and p_max to # upper pressure bound v if dic_scenario_flows[nodePair][arc] > 0: p_min = dic_node_minPress[arc[1]] p_max = dic_node_maxPress[arc[0]] else: p_min = dic_node_minPress[arc[0]] p_max = dic_node_maxPress[arc[1]] # compute approximation of average pressure p_m = (2 / 3) * (p_max + p_min - (p_max * p_min) / (p_max + p_min)) # approximation for density rho = 0.11922 * p_m ** 0.91192 - 0.17264 # approximation of the realgasfactor Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050 K_m = Z_m / Z_n # approximation of the dynamic viscosity eta = 1.04298 * 10 ** (-10) * p_m ** 1.53560 + 8.79987 * 10 ** (-6) nue = eta / rho # compute velocity tmpvalue_w = (abs(dic_scenario_flows[nodePair][arc]) / rho) / tmpvalue_A # compute reynolds number tmpvalue_Re = tmpvalue_w * (diameter / nue) tmpvalue_alpha = np.exp(-np.exp(6.75 - 0.0025 * tmpvalue_Re)) tmpvalue_Lambda = (64 / tmpvalue_Re) * (1 - tmpvalue_alpha) + tmpvalue_alpha * ( -2 * np.log10(2.7 * (np.log10(tmpvalue_Re) ** 1.2 / tmpvalue_Re) + ir / (3.71 * 1000 * diameter))) ** (-2) # note p_n is in [bar] instead of [PA], thus we divide tmpvalue_C by 10**5 # explanation: we have p_i^2-p_j^2=C. If p_i is in [PA] and we want p_i in [bar] then this leads to # (p_i/10^5)^2-(p_j/10^5)^2=C/10^10 # but we changed p_n in computation C from [PA] to [bar] hence we only divide C by 10^5 tmpvalue_C_bar = tmpvalue_Lambda * 16 * rho_n * T_m * p_n * K_m / (np.pi ** 2 * T_n * 10 ** 5) # compute final pressure drop coefficient depending on the flow tmp_value_C_coef = (distances[arc] / rho_n ** 2) * \ (tmpvalue_C_bar * dic_scenario_flows[nodePair][arc] * abs(dic_scenario_flows[nodePair][arc]) / diameter ** 5) # save pressure drop for considered diameter, scenario, and arc dic_pressureDropCoef[(diameter, nodePair)][arc] = tmp_value_C_coef else: dic_pressureDropCoef[(diameter, nodePair)][arc] = 0 return dic_pressureDropCoef def determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureDropCoef, specialScenarioNames, dic_node_minPress, dic_node_maxPress, dic_diam_costs, robust=True, solver='glpk', threads=4, verbose=0): # type and value checks isNetworkxGraph(graph) isPandasSeriesPositiveNumber(distances) if not isinstance(dic_pressureDropCoef, dict): raise TypeError("The input has to be a dictionary") if isinstance(specialScenarioNames, list): if robust: for scenario in specialScenarioNames: isinstance(scenario, tuple) else: raise TypeError("The input argument has to be a list") isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) if isinstance(dic_diam_costs, dict): for diam in dic_diam_costs.keys(): utils.isStrictlyPositiveNumber(diam) utils.isStrictlyPositiveNumber(dic_diam_costs[diam]) else: raise TypeError("The input has to be a dictionary") if not isinstance(robust, bool): raise TypeError("The input has to be a bool") utils.isString(solver) utils.isPositiveNumber(verbose) # set list of available diameters diameters = dic_diam_costs.keys() # build concrete pyomo model model = py.ConcreteModel() # sets for nodes, arcs, diameters, scenarios model.nodes = py.Set(initialize=graph.nodes) model.arcs = py.Set(initialize=list(distances.keys()), dimen=2) # diameters assuming that each pipe has the same diameter options model.diameters = py.Set(initialize=diameters) # if we have special scenarios, scenario names are tuples, otherwise not if robust: # set indices for each scenario by its nodePair = (startnode, endnode) model.scenarios = py.Set(initialize=specialScenarioNames, dimen=2) else: # set indices for each timeStep number model.scenarios = py.Set(initialize=specialScenarioNames, dimen=1) # create variables binaries x are the same for each scenario # pressure variables are different for each scenario model.x = py.Var(model.arcs, model.diameters, domain=py.Binary) if robust: def pressureBounds(model, node, startnode, endnode): return dic_node_minPress[node] ** 2, dic_node_maxPress[node] ** 2 model.pi = py.Var(model.nodes, model.scenarios, bounds=pressureBounds) else: def pressureBounds(model, node, timeStep): return dic_node_minPress[node] ** 2, dic_node_maxPress[node] ** 2 model.pi = py.Var(model.nodes, model.scenarios, bounds=pressureBounds) # objective: minimize the costs def obj_rule(model): return sum( sum(dic_diam_costs[diam] * distances[arc] * model.x[arc, diam] for diam in model.diameters) for arc in model.arcs) model.Obj = py.Objective(rule=obj_rule) # pressure drop for each cons and each scenario if robust: def pressure_drop(model, arc0, arc1, scenarioStart, scenarioEnd): return model.pi[arc1, (scenarioStart, scenarioEnd)] - model.pi[arc0, (scenarioStart, scenarioEnd)] == \ -sum(dic_pressureDropCoef[(diam, (scenarioStart, scenarioEnd))][(arc0, arc1)] * model.x[arc0, arc1, diam] for diam in model.diameters) model.PressureDrop_cons = py.Constraint(model.arcs, model.scenarios, rule=pressure_drop) else: def pressure_dropNotRobust(model, arc0, arc1, timeStep): return model.pi[arc1, timeStep] - model.pi[arc0, timeStep] == \ -sum(dic_pressureDropCoef[(diam, timeStep)][(arc0, arc1)] * model.x[arc0, arc1, diam] for diam in model.diameters) model.PressureDrop_cons = py.Constraint(model.arcs, model.scenarios, rule=pressure_dropNotRobust) # ensure that a single diameter per arc is chosen def selection_diameter(model, arc0, arc1): return sum(model.x[arc0, arc1, diam] for diam in model.diameters) == 1 model.SelectionDiameter_cons = py.Constraint(model.arcs, rule=selection_diameter) # Create a solver opt = SolverFactory(solver) # Set the specified solver options # Solve optimization problem. The optimization solve time is stored and the solver information is printed. if (verbose == 2) & (solver == 'gurobi'): optimizationSpecs = ' LogToConsole=0' opt.set_options('Threads=' + str(threads) + optimizationSpecs) results = opt.solve(model, tee=True, keepfiles=False) else: results = opt.solve(model, tee=True, report_timing=True, keepfiles=False) # status of solver status = results.solver.status # termination condition termCondition = results.solver.termination_condition # write diameter solution to dictionary: key: arc, value: optimal diameter # write pressure solutions to dictionary; key: scenarioName, value: dict: key: node, value: pressure level in [bar] dic_arc_diam = {} dic_scen_node_press = {} if status == SolverStatus.error or status == SolverStatus.aborted or status == SolverStatus.unknown: utils.output('Solver status: ' + str(status) + ', termination condition: ' + str(termCondition) + '. No output is generated.', 0, 0) elif termCondition == TerminationCondition.infeasibleOrUnbounded or \ termCondition == TerminationCondition.infeasible or \ termCondition == TerminationCondition.unbounded: utils.output('Optimization problem is ' + str(termCondition) + '. No output is generated.', 0, 0) else: # If the solver status is not okay (hence either has a warning, an error, was aborted or has an unknown # status), show a warning message. if not termCondition == TerminationCondition.optimal: warnings.warn('Output is generated for a non-optimal solution.') # initialize dict with empty dict for scenario in specialScenarioNames: dic_scen_node_press[scenario] = {} for v in model.component_objects(py.Var, active=True): varobject = getattr(model, str(v)) for index in varobject: # round because sometimes we are nearly one if str(varobject) == 'x' and round(varobject[index].value) == 1: dic_arc_diam.update({(index[0], index[1]): index[2]}) elif str(varobject) == 'pi': if robust: # need sqrt() because in model pressure is quadratic because of the transformation dic_scen_node_press[(index[1], index[2])].update({index[0]: np.sqrt(varobject[index].value)}) else: # need sqrt() because in model pressure is quadratic because of the transformation dic_scen_node_press[(index[1])].update({index[0]: np.sqrt(varobject[index].value)}) return dic_arc_diam, dic_scen_node_press def _postprocessing(scenario, dic_scenario_flows, graph, **kwargs): dic_scen_PressLevel = {} dic_scen_MaxViolPress = math.inf # copy a list of nodes tmp_nodes = copy.deepcopy(list(graph.nodes)) # we now set iteratively the pressure level of a single node to its upper pressure bound and then compute the # unique pressure levels until we find valid pressure levels or have tested all nodes while tmp_nodes: # we have not found valid pressure levels for this scenario # temporary pressure levels dic_tmp_pressure = {} for node in list(graph.nodes): dic_tmp_pressure[node] = None # choose the node which pressure level is fixed to the upper pressure bound current_node = tmp_nodes[0] validation, tmp_viol = computePressureAtNode(graph=graph, node=current_node, nodeUpperBound=current_node, dic_scenario_flows=dic_scenario_flows[scenario], dic_node_pressure=dic_tmp_pressure, **kwargs) # if validation true, then we have feasible pressure levels; empty list of nodes that have to be # considered if validation: tmp_nodes = [] # we have feasible pressure level and save them dic_scen_PressLevel = dic_tmp_pressure dic_scen_MaxViolPress = tmp_viol else: # remove considered entry from list of nodes that will be considered for fixing the pressure level tmp_nodes.remove(tmp_nodes[0]) # we update the maximal pressure level violation if tmp_viol < dic_scen_MaxViolPress: # save currently best pressure levels dic_scen_PressLevel = copy.deepcopy(dic_tmp_pressure) dic_scen_MaxViolPress = tmp_viol return scenario, dic_scen_PressLevel, dic_scen_MaxViolPress def postprocessing(graph, distances, dic_arc_diam, dic_scenario_flows, dic_node_minPress, dic_node_maxPress, threads=1, verbose=0): # Type and value check isNetworkxGraph(graph) isPandasSeriesPositiveNumber(distances) if not isinstance(dic_scenario_flows, dict): raise TypeError("The input has to be a dictionary") if isinstance(dic_arc_diam, dict): for diam in dic_arc_diam.keys(): utils.isStrictlyPositiveNumber(dic_arc_diam[diam]) else: raise TypeError("The input has to be a dictionary") isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) # best found pressure levels for scenarios; dic key: scenario, value: dic: key: node, value: pressure level in [bar] dic_scen_PressLevel = {} # maximal violation of pressure bounds; zero if no violation exists; dic: key: scenario, value: pressure violation dic_scen_MaxViolPress = {} # we compute "precise" pressure levels for every scenarios pool = Pool(threads) scenarios = [scenario for scenario in dic_scenario_flows.keys()] for i, values in enumerate(pool.imap(partial(_postprocessing, validation=True, graph=graph, dic_arc_diam=dic_arc_diam, distances=distances, dic_node_minPress=dic_node_minPress, dic_node_maxPress=dic_node_maxPress, tmp_violation=0, dic_scenario_flows=dic_scenario_flows), scenarios), 1): if verbose == 0: sys.stderr.write('\rPercentage simulated: {:d}%'.format(int(i / len(scenarios) * 100))) dic_scen_PressLevel[values[0]] = values[1] dic_scen_MaxViolPress[values[0]] = values[2] pool.close() pool.join() return dic_scen_PressLevel, dic_scen_MaxViolPress def computePressureAtNode(validation, node, nodeUpperBound, graph, dic_arc_diam, distances, dic_scenario_flows, dic_node_minPress, dic_node_maxPress, tmp_violation, dic_node_pressure, ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965, nDigits=6): # Type and value check isBool(validation) utils.isString(node) utils.isString(nodeUpperBound) isNetworkxGraph(graph) isPandasSeriesPositiveNumber(distances) if not isinstance(dic_scenario_flows, dict): raise TypeError("The input has to be a dictionary") if isinstance(dic_arc_diam, dict): for diam in dic_arc_diam.keys(): utils.isStrictlyPositiveNumber(dic_arc_diam[diam]) else: raise TypeError("The input has to be a dictionary") isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) utils.isPositiveNumber(tmp_violation) if not isinstance(dic_node_pressure, dict): raise TypeError("The Input has to a dictionary") utils.isStrictlyPositiveNumber(ir) utils.isStrictlyPositiveNumber(rho_n) if not isinstance(T_m, float): raise TypeError("The input argument has to be an number") if not isinstance(T_n, float): raise TypeError("The input argument has to be an number") utils.isPositiveNumber(p_n) utils.isPositiveNumber(Z_n) utils.isStrictlyPositiveInt(nDigits) # if node is equal to nodeUpperBound, we fix its pressure level to the upper bound; base case in recursion if node == nodeUpperBound: dic_node_pressure[node] = dic_node_maxPress[node] # list of arcs arcs = list(distances.keys()) # we now compute the neighbors of the considered node neighbors = graph.neighbors(node) # compute pressure levels for neighbor nodes for neighbor in neighbors: # check if pressure is already computed if dic_node_pressure[neighbor] is None: # check if (node,neighbor) or (neighbor,node) is in graph if (node, neighbor) in arcs: # check flow direction for arc (node,neighbor) if dic_scenario_flows[(node, neighbor)] >= 0.0: # we know pressure level of beginning node of arc; compute pressure level for end node of arc dic_node_pressure[neighbor] = computePressureEndnodeArc((node, neighbor), dic_node_pressure[node], dic_scenario_flows, dic_arc_diam, distances, ir, rho_n, T_m, T_n, p_n, Z_n) else: # we know pressure level of endnode dic_node_pressure[neighbor] = computePressureStartnodeArc((node, neighbor), dic_node_pressure[node], dic_scenario_flows, dic_arc_diam, distances, ir, rho_n, T_m, T_n, p_n, Z_n, tol=10 ** (- nDigits)) else: # we know that arc (neighbor,node) is contained in the graph # check flow direction if dic_scenario_flows[(neighbor, node)] <= 0.0: # we know pressure of start node dic_node_pressure[neighbor] = computePressureEndnodeArc((neighbor, node), dic_node_pressure[node], dic_scenario_flows, dic_arc_diam, distances, ir, rho_n, T_m, T_n, p_n, Z_n) else: # we know pressure level of end node dic_node_pressure[neighbor] = computePressureStartnodeArc((neighbor, node), dic_node_pressure[node], dic_scenario_flows, dic_arc_diam, distances, ir, rho_n, T_m, T_n, p_n, Z_n, tol=10 ** (- nDigits)) # check if new computed pressure level is feasible if dic_node_pressure[neighbor] == - math.inf: # pressure violation is really high tmp_violation = math.inf return False, tmp_violation # check if we violate pressure bounds for neighbor node if dic_node_pressure[neighbor] < dic_node_minPress[neighbor] \ or dic_node_pressure[neighbor] > dic_node_maxPress[neighbor]: # pressure level is not valid validation = False # update pressure bound violation if dic_node_pressure[neighbor] < dic_node_minPress[neighbor]: # update violation and violation node if it is bigger if tmp_violation is None or \ abs(dic_node_minPress[neighbor] - dic_node_pressure[neighbor]) > tmp_violation: tmp_violation = abs(dic_node_minPress[neighbor] - dic_node_pressure[neighbor]) else: if tmp_violation is None or \ abs(dic_node_pressure[neighbor] - dic_node_maxPress[neighbor]) > tmp_violation: tmp_violation = abs(dic_node_pressure[neighbor] - dic_node_maxPress[neighbor]) # compute value for neighbor of tmp validation, tmp_violation = computePressureAtNode(validation, neighbor, nodeUpperBound, graph, dic_arc_diam, distances, dic_scenario_flows, dic_node_minPress, dic_node_maxPress, tmp_violation, dic_node_pressure) return validation, tmp_violation def computePressureStartnodeArc(arc, pressureEndNode, dic_scenario_flows, dic_arc_diam, distances, ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965, tol=10 ** (-4)): # Type and Value check if not isinstance(arc, tuple): raise TypeError("The input has to be a tuple") utils.isStrictlyPositiveNumber(pressureEndNode) if not isinstance(dic_scenario_flows, dict): raise TypeError("The input has to be a dictionary") if isinstance(dic_arc_diam, dict): for diam in dic_arc_diam.keys(): utils.isStrictlyPositiveNumber(dic_arc_diam[diam]) isPandasSeriesPositiveNumber(distances) utils.isStrictlyPositiveNumber(ir) utils.isStrictlyPositiveNumber(rho_n) if not isinstance(T_m, float): raise TypeError("The input argument has to be an number") if not isinstance(T_n, float): raise TypeError("The input argument has to be an number") utils.isPositiveNumber(p_n) utils.isPositiveNumber(Z_n) utils.isStrictlyPositiveNumber(tol) if dic_scenario_flows[arc] == 0.0: return pressureEndNode # define function of nonlinear equation system f(x) = pressure_start^2-pressure_end^2-C # because then root is our valid pressure level solution, because we know pressure_end def f(pressure_start): d = dic_arc_diam[arc] A = 0.25 * math.pi * d ** 2 rho_in = 0.11922 * pressure_start ** 0.91192 - 0.17264 V_in = abs(dic_scenario_flows[arc]) / rho_in w_in = V_in / A eta_in = 1.04298 * 10 ** (-10) * pressure_start ** 1.53560 + 8.79987 * 10 ** (-6) nue_in = eta_in / rho_in Re_in = w_in * (d / nue_in) alpha = math.exp(-math.exp(6.75 - 0.0025 * Re_in)) Lambda = (64 / Re_in) * (1 - alpha) + alpha * (-2 * math.log10( (2.7 * (math.log10(Re_in)) ** 1.2) / Re_in + ir / (3.71 * 1000 * d))) ** (-2) C_tilde = (Lambda * distances[arc] * rho_in * w_in ** 2) / (2 * d) # note pressure_start is in bar p_m = pressure_start - C_tilde / 10 ** 5 if p_m < 0.0: # pressure drop too large no valid pressure assignment possible return -math.inf Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050 K_m = Z_m / Z_n # note flow direction is given by startnode endnode so we square the arcflow C = (Lambda * 16 * distances[arc] * T_m * p_n * K_m) / ( math.pi ** 2 * T_n * rho_n * 10 ** 5 * dic_arc_diam[arc] ** 5) * dic_scenario_flows[arc] ** 2 return pressure_start ** 2 - pressureEndNode ** 2 - C # find root of f, start value pressure_end + 0.5(bar) # x = fsolve(f, pressureEndNode + 0.5) # pressureEndnode + guess for solution depending on flow; you can replace this guess by the approximation of the # pressure drop of the MIP to probably achieve better results x = fsolve(f, pressureEndNode + 0.5 * (dic_scenario_flows[arc] ** 2) / (dic_arc_diam[arc] ** 5)) # check if tolerance is ok assert isinstance(tol, float) # check tolerance of first solution if f(x[0]) <= tol: # value is ok # because x is an array return first entry, we only have one solution for the nonlinear equation system return x[0] else: print('nonlinear equation system failed') # this warning means we could not solve the system, this could be the case if the pressure drop is too large # or when the start value for the nonlinear equation solver is too far away from the solution print("Nonlinear equation system in Postprocessing failed. Try another node which pressure level is" " set to the upper bound") return -math.inf def computePressureEndnodeArc(arc, pressureStartNode, dic_scenario_flows, dic_arc_diam, distances, ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965): # Type and Value check if not isinstance(arc, tuple): raise TypeError("The input has to be a tuple") utils.isStrictlyPositiveNumber(pressureStartNode) if not isinstance(dic_scenario_flows, dict): raise TypeError("The input has to be a dictionary") if isinstance(dic_arc_diam, dict): for diam in dic_arc_diam.keys(): utils.isStrictlyPositiveNumber(dic_arc_diam[diam]) isPandasSeriesPositiveNumber(distances) utils.isStrictlyPositiveNumber(ir) utils.isStrictlyPositiveNumber(rho_n) if not isinstance(T_m, float): raise TypeError("The input argument has to be an number") if not isinstance(T_n, float): raise TypeError("The input argument has to be an number") utils.isPositiveNumber(p_n) utils.isPositiveNumber(Z_n) arcFlow = dic_scenario_flows[arc] if arcFlow != 0: d = dic_arc_diam[arc] A = 0.25 * math.pi * d ** 2 rho_in = 0.11922 * pressureStartNode ** 0.91192 - 0.17264 V_in = abs(arcFlow) / rho_in w_in = V_in / A eta_in = 1.04298 * 10 ** (-10) * pressureStartNode ** 1.53560 + 8.79987 * 10 ** (-6) nue_in = eta_in / rho_in Re_in = w_in * (d / nue_in) alpha = math.exp(-math.exp(6.75 - 0.0025 * Re_in)) Lambda = (64 / Re_in) * (1 - alpha) + alpha * (-2 * math.log10( (2.7 * (math.log10(Re_in)) ** 1.2) / Re_in + ir / (3.71 * 1000 * d))) ** (-2) C_tilde = (Lambda * distances[arc] * rho_in * w_in ** 2) / (2 * d) # note pressure_start is in bar p_m = pressureStartNode - C_tilde / 10 ** 5 if p_m < 0.0: # pressure drop too large no valid pressure assignment possible return -math.inf Z_m = 5.04421 * 10 ** (-4) * p_m ** 1.03905 + 1.00050 K_m = Z_m / Z_n # note flow direction is given by startnode endnode so we square the arcflow C = (Lambda * 16 * distances[arc] * T_m * p_n * K_m) / (math.pi ** 2 * T_n * rho_n * 10 ** 5 * dic_arc_diam[arc] ** 5) * arcFlow ** 2 else: # flow is zero therefore pressure drop is zero C = 0 if pressureStartNode ** 2 - C >= 0: return math.sqrt(pressureStartNode ** 2 - C) else: # pressure drop is too big return negative value, which is a invalid pressure value return -math.inf def _computeTimeStepFlows(index, injectionWithdrawalRates, graph, **kwargs): # compute flows corresponding to demand by fixing demand for every node to given value and then compute # flows by LP dic_nodes_MinCapacity = {} dic_nodes_MaxCapacity = {} activeNodes = injectionWithdrawalRates.columns for node in graph.nodes: if node in activeNodes: dic_nodes_MinCapacity[node] = injectionWithdrawalRates.at[index, node] dic_nodes_MaxCapacity[node] = injectionWithdrawalRates.at[index, node] else: dic_nodes_MinCapacity[node] = 0 dic_nodes_MaxCapacity[node] = 0 # compute flows return index, computeSingleSpecialScenario(dic_nodes_MinCapacity=dic_nodes_MinCapacity, dic_nodes_MaxCapacity=dic_nodes_MaxCapacity, graph=graph, **kwargs) def computeTimeStepFlows(injectionWithdrawalRates, distances, graph, entries, exits, threads=1, verbose=0, solver='glpk'): # Type and value check isPandasDataFrameNumber(injectionWithdrawalRates) isPandasSeriesPositiveNumber(distances) isNetworkxGraph(graph) isListOfStrings(entries) isListOfStrings(exits) # compute for every time step the corresponding flows; dict: key: timeStep, value: dict: key: arc, value: flow dic_timeStep_flows = {} # nodes with nonzero demand are given by columns of dataframe activeNodes = injectionWithdrawalRates.columns pool = Pool(threads) indexList = list(injectionWithdrawalRates.index) for i, values in enumerate(pool.imap(partial(_computeTimeStepFlows, graph=graph, distances=distances, entries=entries, exits=exits, startNode=activeNodes[0], endNode=activeNodes[1], specialScenario=False, injectionWithdrawalRates=injectionWithdrawalRates, solver=solver), indexList), 1): if verbose == 0: sys.stderr.write('\rPercentage simulated: {:d}%'.format(int(i / len(indexList) * 100))) dic_timeStep_flows[values[0]] = values[1] pool.close() pool.join() return dic_timeStep_flows def networkRefinement(distances, maxPipeLength, dic_node_minPress, dic_node_maxPress): # type and value check isPandasSeriesPositiveNumber(distances) isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) if maxPipeLength is not None: utils.isStrictlyPositiveNumber(maxPipeLength) # if maximal pipeline length is a positive number we apply the refinement if maxPipeLength is not None: # we have to check if pipes satisfy maximal pipeline length # list of new arcs that will be added newPipes = [] # list of lengths of new added pipes newPipesLengths = [] # list of split original pipes splitEdges = [] for edge in distances.index: # get length of pipeline pipeLength = distances[edge] if pipeLength > maxPipeLength: # compute number of necessary artificial nodes nArtificialNodes = math.ceil(pipeLength / maxPipeLength) - 1 # compute length of new pipelines newPipeLength = float(pipeLength / (math.ceil(pipeLength / maxPipeLength))) # lower and upper pressure bound for new nodes computed by average of nodes of original edge lowPress = (dic_node_minPress[edge[0]] + dic_node_minPress[edge[1]]) / 2 maxPress = (dic_node_maxPress[edge[0]] + dic_node_maxPress[edge[1]]) / 2 # add first new pipe and its length newPipes.append((edge[0], "v" + str(1) + "_" + str(edge[0]) + "_" + str(edge[1]))) # add length of first new pipe newPipesLengths.append(newPipeLength) # add lower and upper bound for new artificial node dic_node_minPress["v" + str(1) + "_" + str(edge[0]) + "_" + str(edge[1])] = lowPress dic_node_maxPress["v" + str(1) + "_" + str(edge[0]) + "_" + str(edge[1])] = maxPress # add intermediate artificial pipes, its length, and lower/upper pressure bounds for index in range(1, nArtificialNodes): newPipes.append(("v" + str(index) + "_" + str(edge[0]) + "_" + str(edge[1]), "v" + str(index + 1) + "_" + str(edge[0]) + "_" + str(edge[1]))) newPipesLengths.append(newPipeLength) dic_node_minPress["v" + str(index + 1) + "_" + str(edge[0]) + "_" + str(edge[1])] = lowPress dic_node_maxPress["v" + str(index + 1) + "_" + str(edge[0]) + "_" + str(edge[1])] = maxPress # add last new pipe and its length newPipes.append(("v" + str(nArtificialNodes) + "_" + str(edge[0]) + "_" + str(edge[1]), edge[1])) newPipesLengths.append(newPipeLength) # add edge to split edges splitEdges.append(edge) # Now delete edges that have been split distances = distances.drop(splitEdges) # Add new edges distances = distances.append(pd.Series(newPipesLengths, index=newPipes)) # get edges for graph edges = distances.index # create empty graph G = nx.Graph() # create graph from given edges and add length as edge attribute for edge in edges: G.add_edge(edge[0], edge[1], length=distances[edge]) return G, distances, dic_node_minPress, dic_node_maxPress def determineDiscretePipelineDesign(robust, injectionWithdrawalRates, distances, dic_node_minPress, dic_node_maxPress, dic_diameter_costs=None, dic_candidateMergedDiam_costs=None, gdfEdges=None, regColumn1='nodeIn', regColumn2='nodeOut', solver='glpk', opexForDiameters=None, economicLifetime=30, interestRate=0.08, costUnit='€', ir=0.2, rho_n=0.089882, T_m=20 + 273.15, T_n=273.15, p_n=1.01325, Z_n=1.00062387922965, originalFluidFlows=None, nDigits=6, verbose=0, threads=1): # Do type and value check of input data: isBool(robust) isPandasDataFrameNumber(injectionWithdrawalRates) isPandasSeriesPositiveNumber(distances) isDictionaryPositiveNumber(dic_node_minPress) isDictionaryPositiveNumber(dic_node_maxPress) checkLowerUpperBoundsOfDicts(dic_node_minPress, dic_node_maxPress) # extract diameters for the optimization if dic_diameter_costs is not None: if isinstance(dic_diameter_costs, dict): diameters = list(dic_diameter_costs.keys()) if isinstance(diameters, list): for diam in diameters: utils.isStrictlyPositiveNumber(diam) else: raise TypeError("The input argument has to be a list") isDictionaryPositiveNumber(dic_diameter_costs) if dic_candidateMergedDiam_costs is not None: if isinstance(dic_candidateMergedDiam_costs, dict): for diam in dic_candidateMergedDiam_costs.keys(): utils.isStrictlyPositiveNumber(diam) utils.isPositiveNumber(dic_candidateMergedDiam_costs[diam]) else: raise TypeError("The input argument has to be a list") utils.isString(regColumn1), utils.isString(regColumn2) if gdfEdges is not None: if isinstance(gdfEdges, gpd.GeoDataFrame): if (not regColumn1 in gdfEdges.columns) | (not regColumn2 in gdfEdges.columns): raise ValueError("regColumn1 or regColumn2 not in columns of gdfEdges") else: gdfEdges['nodes'] = gdfEdges.apply(lambda x: (x['nodeIn'], x['nodeOut']), axis=1) else: raise TypeError("gdfEdges has to be a geopandas GeoDataFrame.") if opexForDiameters is not None: if isinstance(opexForDiameters, list): for opex in opexForDiameters: utils.isPositiveNumber(opex) else: raise TypeError("The input argument has to be a list") utils.isPositiveNumber(interestRate) utils.isStrictlyPositiveNumber(economicLifetime) utils.isString(costUnit) utils.isStrictlyPositiveNumber(ir) utils.isStrictlyPositiveNumber(rho_n) if not isinstance(T_m, float): raise TypeError("The input argument has to be an number") if not isinstance(T_n, float): raise TypeError("The input argument has to be an number") utils.isPositiveNumber(p_n) utils.isPositiveNumber(Z_n) if originalFluidFlows is not None: utils.isString(originalFluidFlows) utils.isStrictlyPositiveInt(nDigits) if dic_diameter_costs is None: print("There are no diameters to choose in the optimization. Thus, we consider the diameters and costs:") dic_diameter_costs = {0.1063: 37.51, 0.1307: 38.45, 0.1593: 39.64, 0.2065: 42.12, 0.2588: 45.26, 0.3063: 48.69, 0.3356: 51.07, 0.3844: 55.24, 0.432: 59.86, 0.4796: 64.98, 0.527: 70.56, 0.578: 76.61, 0.625: 82.99, 0.671: 89.95, 0.722: 97.38, 0.7686: 105.28, 0.814: 113.63, 0.864: 122.28, 0.915: 131.56, 0.96: 141.3, 1.011: 151.5, 1.058: 162.17, 1.104: 173.08, 1.155: 184.67, 1.249: 209.24, 1.342: 235.4, 1.444: 263.66, 1.536: 293.78} print(dic_diameter_costs) # create graph with respect to distances utils.output('Creating graph with respect to given distances', verbose, 0) graph, distances = createNetwork(distances) # plot graph if verbose < 1: if gdfEdges is not None: gdfEdges = gdfEdges[gdfEdges.nodes.isin(distances.index)] fig, ax = plt.subplots(figsize=(4,4)) gdfEdges.plot(ax=ax, color='k'), ax.axis('off') else: utils.output("Original Network Graph:", verbose, 0) nx.draw(graph, with_labels=True) plt.show() # Create a minimum spanning tree of the network with a reasonable logic utils.output('Creating a Steiner treee', verbose, 0) inner_nodes = list(injectionWithdrawalRates.columns) graph, distances = createSteinerTree(graph, distances, inner_nodes) utils.output("Steiner tree:", verbose, 0) if verbose < 1: if gdfEdges is not None: gdfEdges = gdfEdges[gdfEdges.nodes.isin(distances.index)] fig, ax = plt.subplots(figsize=(4,4)) gdfEdges.plot(ax=ax, color='k'), ax.axis('off') else: nx.draw(graph, with_labels=True) plt.show() # Compute robust scenarios for spanning tree network utils.output("Compute robust scenario set for tree network (based on " + str(len(graph.nodes)*len(graph.nodes)-len(graph.nodes)) + ' node combinations). Threads: ' + str(threads), verbose, 0) timeStart = time.time() dic_nodePair_flows, entries, exits = generateRobustScenarios(injectionWithdrawalRates, graph, distances, dic_node_minPress, dic_node_maxPress, solver=solver, threads=threads, verbose=verbose) utils.output("Number of robust scenarios: " + str(len(dic_nodePair_flows.keys())) , verbose, 0) utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0) # Compute scenarios for timeSteps utils.output("Compute scenarios for each timestep. Number of timestep scenarios: " + str(injectionWithdrawalRates.shape[0]) + '. Threads: ' + str(threads), verbose, 0) timeStart = time.time() dic_timeStep_flows = computeTimeStepFlows(injectionWithdrawalRates, distances, graph, entries, exits, solver=solver, threads=threads, verbose=verbose) utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0) # Compute equivalent single diameters for looped (parallel) pipes utils.output("Compute equivalent single diameters for looped (parallel) pipes", verbose, 0) # dic_LoopedDiam_costs contains the new computed diameters and its costs dic_LoopedDiam_costs = None # dic_newDiam_oldDiam merges new and old diameters dic_newDiam_oldDiam = None if dic_candidateMergedDiam_costs is not None: dic_LoopedDiam_costs, dic_newDiam_oldDiam = computeLargeMergedDiameters(dic_candidateMergedDiam_costs) # merge all diameters to one dictionary for the optimization model dic_diameter_costs.update(dic_LoopedDiam_costs) # Compute pressure drops for each scenario and diameter and the compute optimal diameters # depending on robust, we do this w.r.t. robust scenarios or every timeStep # dictionary for the pressure coefficients dic_pressureCoef = {} # dictionary for the optimal diameters dic_arc_diam = {} if robust: # we compute the pressure drops for the robust scenarios utils.output("Pressure drop coefficients for diameters with respect to robust scenarios", verbose, 0) dic_pressureCoef = determinePressureDropCoef(dic_nodePair_flows, distances, dic_node_minPress, dic_node_maxPress, list(dic_diameter_costs.keys())) specialScenarionames = list(dic_nodePair_flows.keys()) # Determine optimal discrete pipeline selection by solving a MIP w.r.t. the robust scenarios utils.output('Determining optimal robust pipeline design under the consideration of pressure ' + 'losses and robust scenarios', verbose, 0) # returns dict: key: arc, value: optimal diameter # returns dict: key: nodePair, value: dic: key: node, value: pressure level dic_arc_diam, dic_scen_node_press = determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureCoef, specialScenarionames, dic_node_minPress, dic_node_maxPress, dic_diameter_costs, robust, verbose=verbose, solver=solver, threads=threads) else: # we compute pressure drops for every timeStep scenario. Not robust version! # we compute the pressure drops for the robust scenarios and optimize utils.output("Pressure drop coefficients for diameters with respect to robust scenarios", verbose, 0) dic_pressureCoef = determinePressureDropCoef(dic_timeStep_flows, distances, dic_node_minPress, dic_node_maxPress, list(dic_diameter_costs.keys())) timeSteps = list(dic_timeStep_flows.keys()) # Determine optimal discrete pipeline selection by solving a MIP w.r.t. the timeStep scenarios utils.output('Determining optimal pipeline design under the consideration of pressure losses and every time step', verbose, 0) utils.output('This network design is necessarily robust!', verbose, 0) # returns dict: key: arc, value: optimal diameter # returns dict: key: timeStep, value: dic: key: node, value: pressure level dic_arc_diam, dic_scen_node_press = determineOptimalDiscretePipelineSelection(graph, distances, dic_pressureCoef, timeSteps, dic_node_minPress, dic_node_maxPress, dic_diameter_costs, False, verbose=verbose, solver=solver, threads=threads) if not dic_arc_diam: utils.output("No feasible diameter selections exits", verbose, 0) return None # Do postprocessing: Use a "more" accurate pressure model and apply Postprocessing of master's thesis: utils.output("Do postprocessing for robust (special) scenarios. Number of scenarios: " + str(len(dic_nodePair_flows)) + '. Threads: ' + str(threads), verbose, 0) timeStart = time.time() dic_scen_PressLevels, dic_scen_MaxViolPress = postprocessing(graph, distances, dic_arc_diam, dic_nodePair_flows, dic_node_minPress, dic_node_maxPress, threads=threads, verbose=verbose) utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0) for scenario in dic_scen_MaxViolPress.keys(): if dic_scen_MaxViolPress[scenario] > 0: utils.output("Robust Scenario " + str(scenario) + " violates pressure bounds by " + str(dic_scen_MaxViolPress[scenario]), verbose, 0) utils.output("Do postprocessing for each timestep scenarios. Number of scenarios: " + str(injectionWithdrawalRates.shape[0]) + '. Threads: ' + str(threads), verbose, 0) timeStart = time.time() dic_timeStep_PressLevels, dic_timeStep_MaxViolPress = postprocessing(graph, distances, dic_arc_diam, dic_timeStep_flows, dic_node_minPress, dic_node_maxPress, threads=threads, verbose=verbose) utils.output("\t\t(%.4f" % (time.time() - timeStart) + " sec)\n", verbose, 0) for timeStep in dic_timeStep_MaxViolPress.keys(): if dic_timeStep_MaxViolPress[timeStep] > 0: utils.output("Time Step " + str(timeStep) + " violates pressure bounds by " + str(dic_timeStep_MaxViolPress[timeStep]), verbose, 0) dic_arc_optimalDiameters = {} for arc in dic_arc_diam.keys(): if dic_LoopedDiam_costs is not None: if dic_arc_diam[arc] in dic_LoopedDiam_costs.keys(): dic_arc_optimalDiameters[arc] = (2, dic_newDiam_oldDiam[dic_arc_diam[arc]]) else: dic_arc_optimalDiameters[arc] = (1, dic_arc_diam[arc]) else: dic_arc_optimalDiameters[arc] = (1, dic_arc_diam[arc]) if verbose < 1: if gdfEdges is not None: gdfEdges = gdfEdges[gdfEdges.nodes.isin(dic_arc_optimalDiameters)] gdfEdges['diam'] = gdfEdges.apply(lambda x: dic_arc_optimalDiameters[x['nodes']][1], axis=1) gdfEdges['nbPipes'] = gdfEdges.apply(lambda x: dic_arc_optimalDiameters[x['nodes']][0], axis=1) plotOptimizedNetwork(gdfEdges) else: utils.output("Network with optimized diameters, looped pipes are indicated by two colored edges, " + "Thicker edge means larger diameter", verbose, 0) finalG = nx.MultiGraph() for arc in dic_arc_optimalDiameters.keys(): if dic_arc_optimalDiameters[arc][0] == 1: finalG.add_edge(arc[0], arc[1], color='black', weight=5 * dic_arc_optimalDiameters[arc][1]) else: finalG.add_edge(arc[0], arc[1], color='r', weight=10 * dic_arc_optimalDiameters[arc][1]) finalG.add_edge(arc[0], arc[1], color='b', weight=5 * dic_arc_optimalDiameters[arc][1]) edges = finalG.edges() colors = [] weight = [] for (u, v, attrib_dict) in list(finalG.edges.data()): colors.append(attrib_dict['color']) weight.append(attrib_dict['weight']) nx.draw(finalG, edges=edges, edge_color=colors, width=weight, with_labels=True) plt.show() return dic_arc_optimalDiameters, dic_scen_PressLevels, dic_scen_MaxViolPress, dic_timeStep_PressLevels, \ dic_timeStep_MaxViolPress, gdfEdges def plotOptimizedNetwork(gdf_pipes, figsize=(4,4), nodesColumn='nodes', diamColumn='diam', nbPipesColumn='nbPipes', line_scaling=1, gdf_regions=None, pressureLevels=None, pMin=50, pMax=100, cmap='Spectral_r', cbxShift=0.32, cbyShift=0.08, cbWidth=0.4, fontsize=10, cbTitle='Pressure [bar]'): fig, ax = plt.subplots(figsize=figsize) cmap = mpl.cm.get_cmap(cmap) if gdf_regions is not None: gdf_regions.plot(ax=ax, facecolor='lightgrey', edgecolor='lightgrey') diamMin = gdf_pipes[gdf_pipes[diamColumn] > 0][diamColumn].min() for i, row in gdf_pipes.iterrows(): lw = row[diamColumn]/diamMin*line_scaling if pressureLevels is not None: p = (pressureLevels[row[nodesColumn][0]] + pressureLevels[row[nodesColumn][1]])/2 color = cmap((p-pMin)/(pMax-pMin)) else: color='k' if (row[nbPipesColumn] == 1): gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color=color, linewidth=lw, capstyle='round') else: gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color=color, linewidth=lw*3, capstyle='round') gdf_pipes[gdf_pipes.index == i].plot(ax=ax, color='white', linewidth=lw) ax.axis('off') lines = [] for diam in sorted(gdf_pipes[diamColumn].unique()): line = plt.Line2D(range(1), range(1), linewidth=diam/diamMin*line_scaling, color='k', marker='_', label="{:>1.5}".format(str(diam)) + ' m') lines.append(line) leg = ax.legend(handles=lines, prop={'size': fontsize}, loc=6, bbox_to_anchor=(1,0.5), title='Diameters') leg.get_frame().set_edgecolor('white') if pressureLevels is not None: sm1 = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=pMin, vmax=pMax)) sm1._A = [] cax = fig.add_axes([cbxShift, cbyShift, cbWidth, 0.03]) cb1 = fig.colorbar(sm1, cax=cax, pad=0.05, aspect=7, fraction=0.07, orientation='horizontal') cax.tick_params(labelsize=fontsize) cax.set_xlabel(cbTitle, size=fontsize) cb1.ax.xaxis.set_label_position('top') plt.show() return fig, ax
true
true
f70842c6f7480d694d52953c9c6749a115190f85
9,830
py
Python
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2016_06_01/aio/operations_async/_subscriptions_operations_async.py
LianwMS/azure-sdk-for-python
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
[ "MIT" ]
2
2019-05-17T21:24:53.000Z
2020-02-12T11:13:42.000Z
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2016_06_01/aio/operations_async/_subscriptions_operations_async.py
LianwMS/azure-sdk-for-python
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
[ "MIT" ]
15
2019-07-12T18:18:04.000Z
2019-07-25T20:55:51.000Z
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2016_06_01/aio/operations_async/_subscriptions_operations_async.py
LianwMS/azure-sdk-for-python
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
[ "MIT" ]
2
2020-05-21T22:51:22.000Z
2020-05-26T20:53:01.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class SubscriptionsOperations: """SubscriptionsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.resource.subscriptions.v2016_06_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list_locations( self, subscription_id: str, **kwargs ) -> AsyncIterable["models.LocationListResult"]: """Gets all available geo-locations. This operation provides all the locations that are available for resource providers; however, each resource provider may support a subset of this list. :param subscription_id: The ID of the target subscription. :type subscription_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either LocationListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2016_06_01.models.LocationListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.LocationListResult"] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = "2016-06-01" def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_locations.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') else: url = next_link query_parameters = {} # type: Dict[str, Any] # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = 'application/json' # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('LocationListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_locations.metadata = {'url': '/subscriptions/{subscriptionId}/locations'} # type: ignore async def get( self, subscription_id: str, **kwargs ) -> "models.Subscription": """Gets details about a specified subscription. :param subscription_id: The ID of the target subscription. :type subscription_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Subscription, or the result of cls(response) :rtype: ~azure.mgmt.resource.subscriptions.v2016_06_01.models.Subscription :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.Subscription"] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = "2016-06-01" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = 'application/json' # Construct and send request request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Subscription', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}'} # type: ignore def list( self, **kwargs ) -> AsyncIterable["models.SubscriptionListResult"]: """Gets all subscriptions for a tenant. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SubscriptionListResult or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2016_06_01.models.SubscriptionListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SubscriptionListResult"] error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = "2016-06-01" def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') else: url = next_link query_parameters = {} # type: Dict[str, Any] # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = 'application/json' # Construct and send request request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('SubscriptionListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions'} # type: ignore
44.279279
133
0.653713
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class SubscriptionsOperations: models = models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list_locations( self, subscription_id: str, **kwargs ) -> AsyncIterable["models.LocationListResult"]: cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = "2016-06-01" def prepare_request(next_link=None): if not next_link: url = self.list_locations.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') else: url = next_link query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('LocationListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_locations.metadata = {'url': '/subscriptions/{subscriptionId}/locations'} async def get( self, subscription_id: str, **kwargs ) -> "models.Subscription": cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = "2016-06-01" url = self.get.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') header_parameters = {} header_parameters['Accept'] = 'application/json' request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Subscription', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}'} def list( self, **kwargs ) -> AsyncIterable["models.SubscriptionListResult"]: cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) api_version = "2016-06-01" def prepare_request(next_link=None): if not next_link: url = self.list.metadata['url'] query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') else: url = next_link query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('SubscriptionListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions'}
true
true
f70843497fc4ba407966c513c0e2afaa1534c821
1,544
py
Python
python/array.py
itzsoumyadip/vs
acf32cd0bacb26e62854060e0acf5eb41b7a68c8
[ "Unlicense" ]
1
2019-07-05T04:27:05.000Z
2019-07-05T04:27:05.000Z
python/array.py
itzsoumyadip/vs
acf32cd0bacb26e62854060e0acf5eb41b7a68c8
[ "Unlicense" ]
null
null
null
python/array.py
itzsoumyadip/vs
acf32cd0bacb26e62854060e0acf5eb41b7a68c8
[ "Unlicense" ]
null
null
null
from array import* # in array all the value have to be in same type # array in python , dont have fixed size means they are flexible vls = array('i',[5,34,54,23,26,32,2]) ## i = unsigned int # copying array newArr = array(vls.typecode,(a for a in vls)) # fetch one value form vls and assigned in array for e in newArr: print(e,end=" ") # 5 34 54 23 26 32 2 print() # size of an array print(vls.buffer_info() ) # (20113184, 7) # (address ,size) # acessing array for i in vls: print(i) print(vls[1]) while i<len(newArr): print(newArr[i]) i+=1 # array data type print(vls.typecode) # properties ## reversing array vls.reverse() print(vls) #array('i', [2, 32, 26, 23, 54, 34, 5]) ## manual l=len(vls) vls= array(vls.typecode,[vls[i-1] for i in range(l,0,-1)]) # print(vls) ## taking value from user arr = array('i',[]) l=int(input("Enter the length of the array")) for i in range(l): # it loop == length specified by the user v=int(input("ENTER THE NEXT VLAUE")) # value from user arr.append(v) # append at the end of the array print(arr) ## serching for the index no ## manual method i=int(input('enter the value for index no')) k=0 error=0 for e in arr: if e==i: error+=1 break # if value is match with array value ,then loops end k+=1 if error!=0: print(k) else: print('sorry the value you are looking for is not avilable ') ## by python inbuilt function print(arr.index(i))
15.59596
98
0.612047
from array import* vls = array('i',[5,34,54,23,26,32,2]) newArr = array(vls.typecode,(a for a in vls)) for e in newArr: print(e,end=" ") print() print(vls.buffer_info() ) for i in vls: print(i) print(vls[1]) while i<len(newArr): print(newArr[i]) i+=1 print(vls.typecode) vls.reverse() print(vls) l=len(vls) vls= array(vls.typecode,[vls[i-1] for i in range(l,0,-1)]) print(vls) arr = array('i',[]) l=int(input("Enter the length of the array")) for i in range(l): v=int(input("ENTER THE NEXT VLAUE")) arr.append(v) print(arr) i=int(input('enter the value for index no')) k=0 error=0 for e in arr: if e==i: error+=1 break k+=1 if error!=0: print(k) else: print('sorry the value you are looking for is not avilable ') print(arr.index(i))
true
true
f70843ebb0ecba18f71fe77e21d8117ca9a4ba19
3,809
py
Python
drrn/drrn.py
ydai94/tdqn
83c66263cb47016414dbe47ad3b252bb9e681ca8
[ "MIT" ]
22
2019-10-29T22:39:57.000Z
2021-08-12T06:43:11.000Z
drrn/drrn.py
ydai94/tdqn
83c66263cb47016414dbe47ad3b252bb9e681ca8
[ "MIT" ]
1
2021-04-13T13:57:00.000Z
2021-04-13T13:57:00.000Z
drrn/drrn.py
ydai94/tdqn
83c66263cb47016414dbe47ad3b252bb9e681ca8
[ "MIT" ]
11
2019-10-31T19:58:13.000Z
2021-09-13T12:49:45.000Z
import pickle import torch import torch.nn as nn import torch.nn.functional as F from os.path import join as pjoin from memory import ReplayMemory, Transition, State from model import DRRN from util import * import logger import sentencepiece as spm device = torch.device("cuda" if torch.cuda.is_available() else "cpu") class DRRN_Agent: def __init__(self, args): self.gamma = args.gamma self.batch_size = args.batch_size self.sp = spm.SentencePieceProcessor() self.sp.Load(args.spm_path) self.network = DRRN(len(self.sp), args.embedding_dim, args.hidden_dim).to(device) self.memory = ReplayMemory(args.memory_size) self.save_path = args.output_dir self.clip = args.clip self.optimizer = torch.optim.Adam(self.network.parameters(), lr=args.learning_rate) def observe(self, state, act, rew, next_state, next_acts, done): self.memory.push(state, act, rew, next_state, next_acts, done) def build_state(self, obs, infos): """ Returns a state representation built from various info sources. """ obs_ids = [self.sp.EncodeAsIds(o) for o in obs] look_ids = [self.sp.EncodeAsIds(info['look']) for info in infos] inv_ids = [self.sp.EncodeAsIds(info['inv']) for info in infos] return [State(ob, lk, inv) for ob, lk, inv in zip(obs_ids, look_ids, inv_ids)] def encode(self, obs_list): """ Encode a list of observations """ return [self.sp.EncodeAsIds(o) for o in obs_list] def act(self, states, poss_acts, sample=True): """ Returns a string action from poss_acts. """ idxs, values = self.network.act(states, poss_acts, sample) act_ids = [poss_acts[batch][idx] for batch, idx in enumerate(idxs)] return act_ids, idxs, values def update(self): if len(self.memory) < self.batch_size: return transitions = self.memory.sample(self.batch_size) batch = Transition(*zip(*transitions)) # Compute Q(s', a') for all a' # TODO: Use a target network??? next_qvals = self.network(batch.next_state, batch.next_acts) # Take the max over next q-values next_qvals = torch.tensor([vals.max() for vals in next_qvals], device=device) # Zero all the next_qvals that are done next_qvals = next_qvals * (1-torch.tensor(batch.done, dtype=torch.float, device=device)) targets = torch.tensor(batch.reward, dtype=torch.float, device=device) + self.gamma * next_qvals # Next compute Q(s, a) # Nest each action in a list - so that it becomes the only admissible cmd nested_acts = tuple([[a] for a in batch.act]) qvals = self.network(batch.state, nested_acts) # Combine the qvals: Maybe just do a greedy max for generality qvals = torch.cat(qvals) # Compute Huber loss loss = F.smooth_l1_loss(qvals, targets.detach()) self.optimizer.zero_grad() loss.backward() nn.utils.clip_grad_norm_(self.network.parameters(), self.clip) self.optimizer.step() return loss.item() def load(self): try: self.memory = pickle.load(open(pjoin(self.save_path, 'memory.pkl'), 'rb')) self.network = torch.load(pjoin(self.save_path, 'model.pt')) except Exception as e: print("Error saving model.") logging.error(traceback.format_exc()) def save(self): try: pickle.dump(self.memory, open(pjoin(self.save_path, 'memory.pkl'), 'wb')) torch.save(self.network, pjoin(self.save_path, 'model.pt')) except Exception as e: print("Error saving model.") logging.error(traceback.format_exc())
37.712871
104
0.637438
import pickle import torch import torch.nn as nn import torch.nn.functional as F from os.path import join as pjoin from memory import ReplayMemory, Transition, State from model import DRRN from util import * import logger import sentencepiece as spm device = torch.device("cuda" if torch.cuda.is_available() else "cpu") class DRRN_Agent: def __init__(self, args): self.gamma = args.gamma self.batch_size = args.batch_size self.sp = spm.SentencePieceProcessor() self.sp.Load(args.spm_path) self.network = DRRN(len(self.sp), args.embedding_dim, args.hidden_dim).to(device) self.memory = ReplayMemory(args.memory_size) self.save_path = args.output_dir self.clip = args.clip self.optimizer = torch.optim.Adam(self.network.parameters(), lr=args.learning_rate) def observe(self, state, act, rew, next_state, next_acts, done): self.memory.push(state, act, rew, next_state, next_acts, done) def build_state(self, obs, infos): obs_ids = [self.sp.EncodeAsIds(o) for o in obs] look_ids = [self.sp.EncodeAsIds(info['look']) for info in infos] inv_ids = [self.sp.EncodeAsIds(info['inv']) for info in infos] return [State(ob, lk, inv) for ob, lk, inv in zip(obs_ids, look_ids, inv_ids)] def encode(self, obs_list): return [self.sp.EncodeAsIds(o) for o in obs_list] def act(self, states, poss_acts, sample=True): idxs, values = self.network.act(states, poss_acts, sample) act_ids = [poss_acts[batch][idx] for batch, idx in enumerate(idxs)] return act_ids, idxs, values def update(self): if len(self.memory) < self.batch_size: return transitions = self.memory.sample(self.batch_size) batch = Transition(*zip(*transitions)) # TODO: Use a target network??? next_qvals = self.network(batch.next_state, batch.next_acts) # Take the max over next q-values next_qvals = torch.tensor([vals.max() for vals in next_qvals], device=device) # Zero all the next_qvals that are done next_qvals = next_qvals * (1-torch.tensor(batch.done, dtype=torch.float, device=device)) targets = torch.tensor(batch.reward, dtype=torch.float, device=device) + self.gamma * next_qvals # Next compute Q(s, a) # Nest each action in a list - so that it becomes the only admissible cmd nested_acts = tuple([[a] for a in batch.act]) qvals = self.network(batch.state, nested_acts) # Combine the qvals: Maybe just do a greedy max for generality qvals = torch.cat(qvals) # Compute Huber loss loss = F.smooth_l1_loss(qvals, targets.detach()) self.optimizer.zero_grad() loss.backward() nn.utils.clip_grad_norm_(self.network.parameters(), self.clip) self.optimizer.step() return loss.item() def load(self): try: self.memory = pickle.load(open(pjoin(self.save_path, 'memory.pkl'), 'rb')) self.network = torch.load(pjoin(self.save_path, 'model.pt')) except Exception as e: print("Error saving model.") logging.error(traceback.format_exc()) def save(self): try: pickle.dump(self.memory, open(pjoin(self.save_path, 'memory.pkl'), 'wb')) torch.save(self.network, pjoin(self.save_path, 'model.pt')) except Exception as e: print("Error saving model.") logging.error(traceback.format_exc())
true
true
f7084436e83db9a4637aafc8f104c1b71803c8ce
93
py
Python
ai_utils/knowledge/__init__.py
daniel-waruo/ai-utils
6a32239b8eb9b611d3b693454e00aa5626d51e82
[ "MIT" ]
null
null
null
ai_utils/knowledge/__init__.py
daniel-waruo/ai-utils
6a32239b8eb9b611d3b693454e00aa5626d51e82
[ "MIT" ]
null
null
null
ai_utils/knowledge/__init__.py
daniel-waruo/ai-utils
6a32239b8eb9b611d3b693454e00aa5626d51e82
[ "MIT" ]
null
null
null
""" This module has all the tools necessary for me to handle a knowledge logic problem """
31
84
0.731183
true
true
f70844978e398bc63fd41438f8070d8885c10a06
9,790
py
Python
lib/spack/spack/test/llnl/util/filesystem.py
rtohid/spack
6df57bb2d0619a22b0bb0a5028b7caef7f31e722
[ "ECL-2.0", "Apache-2.0", "MIT" ]
null
null
null
lib/spack/spack/test/llnl/util/filesystem.py
rtohid/spack
6df57bb2d0619a22b0bb0a5028b7caef7f31e722
[ "ECL-2.0", "Apache-2.0", "MIT" ]
17
2018-09-20T18:32:50.000Z
2019-12-04T16:58:12.000Z
lib/spack/spack/test/llnl/util/filesystem.py
rtohid/spack
6df57bb2d0619a22b0bb0a5028b7caef7f31e722
[ "ECL-2.0", "Apache-2.0", "MIT" ]
null
null
null
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) """Tests for ``llnl/util/filesystem.py``""" import llnl.util.filesystem as fs import os import stat import pytest @pytest.fixture() def stage(tmpdir_factory): """Creates a stage with the directory structure for the tests.""" s = tmpdir_factory.mktemp('filesystem_test') with s.as_cwd(): # Create source file hierarchy fs.touchp('source/1') fs.touchp('source/a/b/2') fs.touchp('source/a/b/3') fs.touchp('source/c/4') fs.touchp('source/c/d/5') fs.touchp('source/c/d/6') fs.touchp('source/c/d/e/7') # Create symlinks os.symlink(os.path.abspath('source/1'), 'source/2') os.symlink('b/2', 'source/a/b2') os.symlink('a/b', 'source/f') # Create destination directory fs.mkdirp('dest') yield s class TestCopy: """Tests for ``filesystem.copy``""" def test_file_dest(self, stage): """Test using a filename as the destination.""" with fs.working_dir(str(stage)): fs.copy('source/1', 'dest/1') assert os.path.exists('dest/1') def test_dir_dest(self, stage): """Test using a directory as the destination.""" with fs.working_dir(str(stage)): fs.copy('source/1', 'dest') assert os.path.exists('dest/1') def check_added_exe_permissions(src, dst): src_mode = os.stat(src).st_mode dst_mode = os.stat(dst).st_mode for perm in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH]: if src_mode & perm: assert dst_mode & perm class TestInstall: """Tests for ``filesystem.install``""" def test_file_dest(self, stage): """Test using a filename as the destination.""" with fs.working_dir(str(stage)): fs.install('source/1', 'dest/1') assert os.path.exists('dest/1') check_added_exe_permissions('source/1', 'dest/1') def test_dir_dest(self, stage): """Test using a directory as the destination.""" with fs.working_dir(str(stage)): fs.install('source/1', 'dest') assert os.path.exists('dest/1') check_added_exe_permissions('source/1', 'dest/1') class TestCopyTree: """Tests for ``filesystem.copy_tree``""" def test_existing_dir(self, stage): """Test copying to an existing directory.""" with fs.working_dir(str(stage)): fs.copy_tree('source', 'dest') assert os.path.exists('dest/a/b/2') def test_non_existing_dir(self, stage): """Test copying to a non-existing directory.""" with fs.working_dir(str(stage)): fs.copy_tree('source', 'dest/sub/directory') assert os.path.exists('dest/sub/directory/a/b/2') def test_parent_dir(self, stage): """Test copying to from a parent directory.""" # Make sure we get the right error if we try to copy a parent into # a descendent directory. with pytest.raises(ValueError, matches="Cannot copy"): with fs.working_dir(str(stage)): fs.copy_tree('source', 'source/sub/directory') # Only point with this check is to make sure we don't try to perform # the copy. with pytest.raises(IOError, matches="No such file or directory"): with fs.working_dir(str(stage)): fs.copy_tree('foo/ba', 'foo/bar') def test_symlinks_true(self, stage): """Test copying with symlink preservation.""" with fs.working_dir(str(stage)): fs.copy_tree('source', 'dest', symlinks=True) assert os.path.exists('dest/2') assert os.path.islink('dest/2') assert os.path.exists('dest/a/b2') with fs.working_dir('dest/a'): assert os.path.exists(os.readlink('b2')) assert (os.path.realpath('dest/f/2') == os.path.abspath('dest/a/b/2')) assert os.path.realpath('dest/2') == os.path.abspath('dest/1') def test_symlinks_true_ignore(self, stage): """Test copying when specifying relative paths that should be ignored """ with fs.working_dir(str(stage)): ignore = lambda p: p in ['c/d/e', 'a'] fs.copy_tree('source', 'dest', symlinks=True, ignore=ignore) assert not os.path.exists('dest/a') assert os.path.exists('dest/c/d') assert not os.path.exists('dest/c/d/e') def test_symlinks_false(self, stage): """Test copying without symlink preservation.""" with fs.working_dir(str(stage)): fs.copy_tree('source', 'dest', symlinks=False) assert os.path.exists('dest/2') assert not os.path.islink('dest/2') class TestInstallTree: """Tests for ``filesystem.install_tree``""" def test_existing_dir(self, stage): """Test installing to an existing directory.""" with fs.working_dir(str(stage)): fs.install_tree('source', 'dest') assert os.path.exists('dest/a/b/2') def test_non_existing_dir(self, stage): """Test installing to a non-existing directory.""" with fs.working_dir(str(stage)): fs.install_tree('source', 'dest/sub/directory') assert os.path.exists('dest/sub/directory/a/b/2') def test_symlinks_true(self, stage): """Test installing with symlink preservation.""" with fs.working_dir(str(stage)): fs.install_tree('source', 'dest', symlinks=True) assert os.path.exists('dest/2') assert os.path.islink('dest/2') def test_symlinks_false(self, stage): """Test installing without symlink preservation.""" with fs.working_dir(str(stage)): fs.install_tree('source', 'dest', symlinks=False) assert os.path.exists('dest/2') assert not os.path.islink('dest/2') def test_move_transaction_commit(tmpdir): fake_library = tmpdir.mkdir('lib').join('libfoo.so') fake_library.write('Just some fake content.') old_md5 = fs.hash_directory(str(tmpdir)) with fs.replace_directory_transaction(str(tmpdir.join('lib'))): fake_library = tmpdir.mkdir('lib').join('libfoo.so') fake_library.write('Other content.') new_md5 = fs.hash_directory(str(tmpdir)) assert old_md5 != fs.hash_directory(str(tmpdir)) assert new_md5 == fs.hash_directory(str(tmpdir)) def test_move_transaction_rollback(tmpdir): fake_library = tmpdir.mkdir('lib').join('libfoo.so') fake_library.write('Just some fake content.') h = fs.hash_directory(str(tmpdir)) try: with fs.replace_directory_transaction(str(tmpdir.join('lib'))): assert h != fs.hash_directory(str(tmpdir)) fake_library = tmpdir.mkdir('lib').join('libfoo.so') fake_library.write('Other content.') raise RuntimeError('') except RuntimeError: pass assert h == fs.hash_directory(str(tmpdir)) @pytest.mark.regression('10601') @pytest.mark.regression('10603') def test_recursive_search_of_headers_from_prefix( installation_dir_with_headers ): # Try to inspect recursively from <prefix> and ensure we don't get # subdirectories of the '<prefix>/include' path prefix = str(installation_dir_with_headers) header_list = fs.find_all_headers(prefix) # Check that the header files we expect are all listed assert os.path.join(prefix, 'include', 'ex3.h') in header_list assert os.path.join(prefix, 'include', 'boost', 'ex3.h') in header_list assert os.path.join(prefix, 'path', 'to', 'ex1.h') in header_list assert os.path.join(prefix, 'path', 'to', 'subdir', 'ex2.h') in header_list # Check that when computing directories we exclude <prefix>/include/boost include_dirs = header_list.directories assert os.path.join(prefix, 'include') in include_dirs assert os.path.join(prefix, 'include', 'boost') not in include_dirs assert os.path.join(prefix, 'path', 'to') in include_dirs assert os.path.join(prefix, 'path', 'to', 'subdir') in include_dirs @pytest.mark.parametrize('list_of_headers,expected_directories', [ (['/pfx/include/foo.h', '/pfx/include/subdir/foo.h'], ['/pfx/include']), (['/pfx/include/foo.h', '/pfx/subdir/foo.h'], ['/pfx/include', '/pfx/subdir']), (['/pfx/include/subdir/foo.h', '/pfx/subdir/foo.h'], ['/pfx/include', '/pfx/subdir']) ]) def test_computation_of_header_directories( list_of_headers, expected_directories ): hl = fs.HeaderList(list_of_headers) assert hl.directories == expected_directories def test_headers_directory_setter(): hl = fs.HeaderList( ['/pfx/include/subdir/foo.h', '/pfx/include/subdir/bar.h'] ) # Set directories using a list hl.directories = ['/pfx/include/subdir'] assert hl.directories == ['/pfx/include/subdir'] # If it's a single directory it's fine to not wrap it into a list # when setting the property hl.directories = '/pfx/include/subdir' assert hl.directories == ['/pfx/include/subdir'] # Paths are normalized, so it doesn't matter how many backslashes etc. # are present in the original directory being used hl.directories = '/pfx/include//subdir/' assert hl.directories == ['/pfx/include/subdir'] # Setting an empty list is allowed and returns an empty list hl.directories = [] assert hl.directories == [] # Setting directories to None also returns an empty list hl.directories = None assert hl.directories == []
32.742475
79
0.631359
import llnl.util.filesystem as fs import os import stat import pytest @pytest.fixture() def stage(tmpdir_factory): s = tmpdir_factory.mktemp('filesystem_test') with s.as_cwd(): fs.touchp('source/1') fs.touchp('source/a/b/2') fs.touchp('source/a/b/3') fs.touchp('source/c/4') fs.touchp('source/c/d/5') fs.touchp('source/c/d/6') fs.touchp('source/c/d/e/7') os.symlink(os.path.abspath('source/1'), 'source/2') os.symlink('b/2', 'source/a/b2') os.symlink('a/b', 'source/f') fs.mkdirp('dest') yield s class TestCopy: def test_file_dest(self, stage): with fs.working_dir(str(stage)): fs.copy('source/1', 'dest/1') assert os.path.exists('dest/1') def test_dir_dest(self, stage): with fs.working_dir(str(stage)): fs.copy('source/1', 'dest') assert os.path.exists('dest/1') def check_added_exe_permissions(src, dst): src_mode = os.stat(src).st_mode dst_mode = os.stat(dst).st_mode for perm in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH]: if src_mode & perm: assert dst_mode & perm class TestInstall: def test_file_dest(self, stage): with fs.working_dir(str(stage)): fs.install('source/1', 'dest/1') assert os.path.exists('dest/1') check_added_exe_permissions('source/1', 'dest/1') def test_dir_dest(self, stage): with fs.working_dir(str(stage)): fs.install('source/1', 'dest') assert os.path.exists('dest/1') check_added_exe_permissions('source/1', 'dest/1') class TestCopyTree: def test_existing_dir(self, stage): with fs.working_dir(str(stage)): fs.copy_tree('source', 'dest') assert os.path.exists('dest/a/b/2') def test_non_existing_dir(self, stage): with fs.working_dir(str(stage)): fs.copy_tree('source', 'dest/sub/directory') assert os.path.exists('dest/sub/directory/a/b/2') def test_parent_dir(self, stage): with pytest.raises(ValueError, matches="Cannot copy"): with fs.working_dir(str(stage)): fs.copy_tree('source', 'source/sub/directory') # the copy. with pytest.raises(IOError, matches="No such file or directory"): with fs.working_dir(str(stage)): fs.copy_tree('foo/ba', 'foo/bar') def test_symlinks_true(self, stage): with fs.working_dir(str(stage)): fs.copy_tree('source', 'dest', symlinks=True) assert os.path.exists('dest/2') assert os.path.islink('dest/2') assert os.path.exists('dest/a/b2') with fs.working_dir('dest/a'): assert os.path.exists(os.readlink('b2')) assert (os.path.realpath('dest/f/2') == os.path.abspath('dest/a/b/2')) assert os.path.realpath('dest/2') == os.path.abspath('dest/1') def test_symlinks_true_ignore(self, stage): with fs.working_dir(str(stage)): ignore = lambda p: p in ['c/d/e', 'a'] fs.copy_tree('source', 'dest', symlinks=True, ignore=ignore) assert not os.path.exists('dest/a') assert os.path.exists('dest/c/d') assert not os.path.exists('dest/c/d/e') def test_symlinks_false(self, stage): with fs.working_dir(str(stage)): fs.copy_tree('source', 'dest', symlinks=False) assert os.path.exists('dest/2') assert not os.path.islink('dest/2') class TestInstallTree: def test_existing_dir(self, stage): with fs.working_dir(str(stage)): fs.install_tree('source', 'dest') assert os.path.exists('dest/a/b/2') def test_non_existing_dir(self, stage): with fs.working_dir(str(stage)): fs.install_tree('source', 'dest/sub/directory') assert os.path.exists('dest/sub/directory/a/b/2') def test_symlinks_true(self, stage): with fs.working_dir(str(stage)): fs.install_tree('source', 'dest', symlinks=True) assert os.path.exists('dest/2') assert os.path.islink('dest/2') def test_symlinks_false(self, stage): with fs.working_dir(str(stage)): fs.install_tree('source', 'dest', symlinks=False) assert os.path.exists('dest/2') assert not os.path.islink('dest/2') def test_move_transaction_commit(tmpdir): fake_library = tmpdir.mkdir('lib').join('libfoo.so') fake_library.write('Just some fake content.') old_md5 = fs.hash_directory(str(tmpdir)) with fs.replace_directory_transaction(str(tmpdir.join('lib'))): fake_library = tmpdir.mkdir('lib').join('libfoo.so') fake_library.write('Other content.') new_md5 = fs.hash_directory(str(tmpdir)) assert old_md5 != fs.hash_directory(str(tmpdir)) assert new_md5 == fs.hash_directory(str(tmpdir)) def test_move_transaction_rollback(tmpdir): fake_library = tmpdir.mkdir('lib').join('libfoo.so') fake_library.write('Just some fake content.') h = fs.hash_directory(str(tmpdir)) try: with fs.replace_directory_transaction(str(tmpdir.join('lib'))): assert h != fs.hash_directory(str(tmpdir)) fake_library = tmpdir.mkdir('lib').join('libfoo.so') fake_library.write('Other content.') raise RuntimeError('') except RuntimeError: pass assert h == fs.hash_directory(str(tmpdir)) @pytest.mark.regression('10601') @pytest.mark.regression('10603') def test_recursive_search_of_headers_from_prefix( installation_dir_with_headers ): # Try to inspect recursively from <prefix> and ensure we don't get prefix = str(installation_dir_with_headers) header_list = fs.find_all_headers(prefix) assert os.path.join(prefix, 'include', 'ex3.h') in header_list assert os.path.join(prefix, 'include', 'boost', 'ex3.h') in header_list assert os.path.join(prefix, 'path', 'to', 'ex1.h') in header_list assert os.path.join(prefix, 'path', 'to', 'subdir', 'ex2.h') in header_list include_dirs = header_list.directories assert os.path.join(prefix, 'include') in include_dirs assert os.path.join(prefix, 'include', 'boost') not in include_dirs assert os.path.join(prefix, 'path', 'to') in include_dirs assert os.path.join(prefix, 'path', 'to', 'subdir') in include_dirs @pytest.mark.parametrize('list_of_headers,expected_directories', [ (['/pfx/include/foo.h', '/pfx/include/subdir/foo.h'], ['/pfx/include']), (['/pfx/include/foo.h', '/pfx/subdir/foo.h'], ['/pfx/include', '/pfx/subdir']), (['/pfx/include/subdir/foo.h', '/pfx/subdir/foo.h'], ['/pfx/include', '/pfx/subdir']) ]) def test_computation_of_header_directories( list_of_headers, expected_directories ): hl = fs.HeaderList(list_of_headers) assert hl.directories == expected_directories def test_headers_directory_setter(): hl = fs.HeaderList( ['/pfx/include/subdir/foo.h', '/pfx/include/subdir/bar.h'] ) hl.directories = ['/pfx/include/subdir'] assert hl.directories == ['/pfx/include/subdir'] hl.directories = '/pfx/include/subdir' assert hl.directories == ['/pfx/include/subdir'] # are present in the original directory being used hl.directories = '/pfx/include//subdir/' assert hl.directories == ['/pfx/include/subdir'] # Setting an empty list is allowed and returns an empty list hl.directories = [] assert hl.directories == [] # Setting directories to None also returns an empty list hl.directories = None assert hl.directories == []
true
true
f70844acde7c3e99d4f4ea1b217c618aa5a8b698
9,285
py
Python
mnogoznal/wsd.py
dustalov/mnogoznal
bacea1576d31e0d2ad5456159a57950899a116f6
[ "MIT" ]
10
2017-08-11T08:47:25.000Z
2018-04-07T15:13:37.000Z
mnogoznal/wsd.py
dustalov/mnogoznal
bacea1576d31e0d2ad5456159a57950899a116f6
[ "MIT" ]
1
2019-10-31T17:59:49.000Z
2019-10-31T17:59:49.000Z
mnogoznal/wsd.py
nlpub/watasense
bacea1576d31e0d2ad5456159a57950899a116f6
[ "MIT" ]
1
2018-02-18T15:03:24.000Z
2018-02-18T15:03:24.000Z
import abc import csv from collections import namedtuple, defaultdict, OrderedDict, Counter import numpy as np from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.metrics.pairwise import cosine_similarity as sim from sklearn.pipeline import Pipeline STOP_POS = {'CONJ', 'INTJ', 'PART', 'PR', 'UNKNOWN'} Synset = namedtuple('Synset', 'id synonyms hypernyms bag') class Inventory(object): """Sense inventory representation and loader.""" synsets = {} index = defaultdict(list) def __init__(self, inventory_path): """ During the construction, BaseWSD parses the given sense inventory file. """ def field_to_bag(field): return {word: freq for record in field.split(', ') for word, freq in (self.lexeme(record),) if record} with open(inventory_path, 'r', encoding='utf-8', newline='') as f: reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE) for row in reader: id = row[0] synonyms = field_to_bag(row[2]) hypernyms = field_to_bag(row[4]) self.synsets[id] = Synset( id=id, synonyms=synonyms, hypernyms=hypernyms, bag={**synonyms, **hypernyms} ) for word in self.synsets[id].bag: self.index[word].append(id) def lexeme(self, record): """ Parse the sense representations like 'word#sid:freq'. Actually, we do not care about the sid field because we use synset identifiers instead. """ if '#' in record: word, tail = record.split('#', 1) else: word, tail = record, None if tail: if ':' in tail: sid, tail = tail.split(':', 1) else: sid, tail = tail, None if tail: freq = float(tail) else: freq = 1 return word, freq Span = namedtuple('Span', 'token pos lemma index') class BaseWSD(object): """ Base class for word sense disambiguation routines. Should not be used. Descendant classes must implement the disambiguate_word() method. """ __metaclass__ = abc.ABCMeta def __init__(self, inventory): self.inventory = inventory def lemmatize(self, sentence): """ This method transforms the given sentence into the dict that maps the word indices to their lemmas. It also excludes those words which part of speech is in the stop list. """ return {i: lemma for i, (_, lemma, pos) in enumerate(sentence) if pos not in STOP_POS} @abc.abstractmethod def disambiguate_word(self, sentence, index): """ Return word sense identifier for the given word in the sentence. """ if not sentence or not isinstance(sentence, list): raise ValueError('sentence should be a list') if not isinstance(index, int) or index < 0 or index >= len(sentence): raise ValueError('index should be in [0...%d]' % len(sentence)) def disambiguate(self, sentence): """ Return word sense identifiers corresponding to the words in the given sentence. """ result = OrderedDict() for index, span in enumerate(sentence): # here, span is (token, pos, lemma), but we also need index span = Span(*span, index) result[span] = self.disambiguate_word(sentence, index) return result class OneBaseline(BaseWSD): """ A simple baseline that treats every word as monosemeous. Not thread-safe. """ counter = {} def __init__(self): super().__init__(None) def disambiguate_word(self, sentence, index): super().disambiguate_word(sentence, index) word, _, _ = sentence[index] if word not in self.counter: self.counter[word] = len(self.counter) return str(self.counter[word]) class SingletonsBaseline(BaseWSD): """ A simple baseline that puts every instance into a different cluster. Not thread-safe. """ counter = 0 def __init__(self): super().__init__(None) def disambiguate_word(self, sentence, index): super().disambiguate_word(sentence, index) self.counter += 1 return str(self.counter) class SparseWSD(BaseWSD): """ A simple sparse word sense disambiguation. """ sparse = Pipeline([('dict', DictVectorizer()), ('tfidf', TfidfTransformer())]) def __init__(self, inventory): super().__init__(inventory) self.sparse.fit([synset.bag for synset in self.inventory.synsets.values()]) def disambiguate_word(self, sentence, index): super().disambiguate_word(sentence, index) lemmas = self.lemmatize(sentence) if index not in lemmas: return svector = self.sparse.transform(Counter(lemmas.values())) # sentence vector def search(query): """ Map synset identifiers to the cosine similarity value. This function calls the function query(id) that retrieves the corresponding dict of words. """ return Counter({id: sim(svector, self.sparse.transform(query(id))).item(0) for id in self.inventory.index[lemmas[index]]}) candidates = search(lambda id: self.inventory.synsets[id].synonyms) # give the hypernyms a chance if nothing is found if not candidates: candidates = search(lambda id: self.inventory.synsets[id].bag) if not candidates: return for id, _ in candidates.most_common(1): return id class DenseWSD(BaseWSD): """ A word sense disambiguation approach that is based on SenseGram. """ class densedict(dict): """ A handy dict that transforms a synset into its dense representation. """ def __init__(self, synsets, sensegram): self.synsets = synsets self.sensegram = sensegram def __missing__(self, id): value = self[id] = self.sensegram(self.synsets[id].bag.keys()) return value def __init__(self, inventory, wv): super().__init__(inventory) self.wv = wv self.dense = self.densedict(self.inventory.synsets, self.sensegram) def sensegram(self, words): """ This is a simple implementation of SenseGram. It just averages the embeddings corresponding to the given words. """ vectors = self.words_vec(set(words)) if not vectors: return return np.mean(np.vstack(tuple(vectors.values())), axis=0).reshape(1, -1) def words_vec(self, words, use_norm=False): """ Return a dict that maps the given words to their embeddings. """ if callable(getattr(self.wv, 'words_vec', None)): return self.wv.words_vec(words, use_norm) return {word: self.wv.word_vec(word, use_norm) for word in words if word in self.wv} def disambiguate_word(self, sentence, index): super().disambiguate_word(sentence, index) lemmas = self.lemmatize(sentence) if index not in lemmas: return svector = self.sensegram(lemmas.values()) # sentence vector if svector is None: return # map synset identifiers to the cosine similarity value candidates = Counter({id: sim(svector, self.dense[id]).item(0) for id in self.inventory.index[lemmas[index]] if self.dense[id] is not None}) if not candidates: return for id, _ in candidates.most_common(1): return id class LeskWSD(BaseWSD): """ A word sense disambiguation approach that is based on Lesk method. """ def __init__(self, inventory): super().__init__(inventory) def disambiguate_word(self, sentence, word_index): super().disambiguate_word(sentence, word_index) lemmas = self.lemmatize(sentence) if word_index not in lemmas: return mentions_dict = dict() for synset_number in self.inventory.index[lemmas[word_index]]: mentions_dict[synset_number] = 0 for context_word in lemmas.values(): if context_word != lemmas[word_index]: if context_word in self.inventory.synsets[synset_number].synonyms: mentions_dict[synset_number] = mentions_dict[synset_number] + 1 elif context_word in self.inventory.synsets[synset_number].hypernyms: mentions_dict[synset_number] = mentions_dict[synset_number] + \ self.inventory.synsets[synset_number].hypernyms[context_word] if len(mentions_dict) > 0: return max(mentions_dict, key=mentions_dict.get) else: return
30.146104
116
0.596446
import abc import csv from collections import namedtuple, defaultdict, OrderedDict, Counter import numpy as np from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.metrics.pairwise import cosine_similarity as sim from sklearn.pipeline import Pipeline STOP_POS = {'CONJ', 'INTJ', 'PART', 'PR', 'UNKNOWN'} Synset = namedtuple('Synset', 'id synonyms hypernyms bag') class Inventory(object): synsets = {} index = defaultdict(list) def __init__(self, inventory_path): def field_to_bag(field): return {word: freq for record in field.split(', ') for word, freq in (self.lexeme(record),) if record} with open(inventory_path, 'r', encoding='utf-8', newline='') as f: reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE) for row in reader: id = row[0] synonyms = field_to_bag(row[2]) hypernyms = field_to_bag(row[4]) self.synsets[id] = Synset( id=id, synonyms=synonyms, hypernyms=hypernyms, bag={**synonyms, **hypernyms} ) for word in self.synsets[id].bag: self.index[word].append(id) def lexeme(self, record): if '#' in record: word, tail = record.split('#', 1) else: word, tail = record, None if tail: if ':' in tail: sid, tail = tail.split(':', 1) else: sid, tail = tail, None if tail: freq = float(tail) else: freq = 1 return word, freq Span = namedtuple('Span', 'token pos lemma index') class BaseWSD(object): __metaclass__ = abc.ABCMeta def __init__(self, inventory): self.inventory = inventory def lemmatize(self, sentence): return {i: lemma for i, (_, lemma, pos) in enumerate(sentence) if pos not in STOP_POS} @abc.abstractmethod def disambiguate_word(self, sentence, index): if not sentence or not isinstance(sentence, list): raise ValueError('sentence should be a list') if not isinstance(index, int) or index < 0 or index >= len(sentence): raise ValueError('index should be in [0...%d]' % len(sentence)) def disambiguate(self, sentence): result = OrderedDict() for index, span in enumerate(sentence): span = Span(*span, index) result[span] = self.disambiguate_word(sentence, index) return result class OneBaseline(BaseWSD): counter = {} def __init__(self): super().__init__(None) def disambiguate_word(self, sentence, index): super().disambiguate_word(sentence, index) word, _, _ = sentence[index] if word not in self.counter: self.counter[word] = len(self.counter) return str(self.counter[word]) class SingletonsBaseline(BaseWSD): counter = 0 def __init__(self): super().__init__(None) def disambiguate_word(self, sentence, index): super().disambiguate_word(sentence, index) self.counter += 1 return str(self.counter) class SparseWSD(BaseWSD): sparse = Pipeline([('dict', DictVectorizer()), ('tfidf', TfidfTransformer())]) def __init__(self, inventory): super().__init__(inventory) self.sparse.fit([synset.bag for synset in self.inventory.synsets.values()]) def disambiguate_word(self, sentence, index): super().disambiguate_word(sentence, index) lemmas = self.lemmatize(sentence) if index not in lemmas: return svector = self.sparse.transform(Counter(lemmas.values())) def search(query): return Counter({id: sim(svector, self.sparse.transform(query(id))).item(0) for id in self.inventory.index[lemmas[index]]}) candidates = search(lambda id: self.inventory.synsets[id].synonyms) if not candidates: candidates = search(lambda id: self.inventory.synsets[id].bag) if not candidates: return for id, _ in candidates.most_common(1): return id class DenseWSD(BaseWSD): class densedict(dict): def __init__(self, synsets, sensegram): self.synsets = synsets self.sensegram = sensegram def __missing__(self, id): value = self[id] = self.sensegram(self.synsets[id].bag.keys()) return value def __init__(self, inventory, wv): super().__init__(inventory) self.wv = wv self.dense = self.densedict(self.inventory.synsets, self.sensegram) def sensegram(self, words): vectors = self.words_vec(set(words)) if not vectors: return return np.mean(np.vstack(tuple(vectors.values())), axis=0).reshape(1, -1) def words_vec(self, words, use_norm=False): if callable(getattr(self.wv, 'words_vec', None)): return self.wv.words_vec(words, use_norm) return {word: self.wv.word_vec(word, use_norm) for word in words if word in self.wv} def disambiguate_word(self, sentence, index): super().disambiguate_word(sentence, index) lemmas = self.lemmatize(sentence) if index not in lemmas: return svector = self.sensegram(lemmas.values()) if svector is None: return candidates = Counter({id: sim(svector, self.dense[id]).item(0) for id in self.inventory.index[lemmas[index]] if self.dense[id] is not None}) if not candidates: return for id, _ in candidates.most_common(1): return id class LeskWSD(BaseWSD): def __init__(self, inventory): super().__init__(inventory) def disambiguate_word(self, sentence, word_index): super().disambiguate_word(sentence, word_index) lemmas = self.lemmatize(sentence) if word_index not in lemmas: return mentions_dict = dict() for synset_number in self.inventory.index[lemmas[word_index]]: mentions_dict[synset_number] = 0 for context_word in lemmas.values(): if context_word != lemmas[word_index]: if context_word in self.inventory.synsets[synset_number].synonyms: mentions_dict[synset_number] = mentions_dict[synset_number] + 1 elif context_word in self.inventory.synsets[synset_number].hypernyms: mentions_dict[synset_number] = mentions_dict[synset_number] + \ self.inventory.synsets[synset_number].hypernyms[context_word] if len(mentions_dict) > 0: return max(mentions_dict, key=mentions_dict.get) else: return
true
true
f708476a2cc43393cd5ba3aa180c3286fe8d674e
2,712
py
Python
run.py
nyamzy/Password_Manager
6f109b2ace6eef23148ae42be0b62124c9fb5e37
[ "Unlicense" ]
null
null
null
run.py
nyamzy/Password_Manager
6f109b2ace6eef23148ae42be0b62124c9fb5e37
[ "Unlicense" ]
null
null
null
run.py
nyamzy/Password_Manager
6f109b2ace6eef23148ae42be0b62124c9fb5e37
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python3 from password_manager import User, Credentials def create_user(fname, lname, uname, email, password): ''' Function that creates a new user ''' new_user = User(fname, lname, uname, email, password) return new_user def save_users(user): ''' Function that saves a new user ''' user.save_user() def del_user(user): ''' Function that deletes a user ''' user.delete_contact() def find_user(username): ''' Function that finds a user by username and returns the user ''' return User.find_by_username(username) def display_users(): ''' Function that returns all the saved users ''' return User.display_users() def check_existing_users(username): ''' Function that checks if a user is existing and returns a Boolean ''' return User.user_exist(username) def main(): print("Hi there. Welcome to your password manager app. Kindly let me know your name.") user_name = input() print(f"Hi {user_name}! What would you like to do today?") print("\n") while True: print("Use the following numbers: 1 - Create new user account, 2 - Display user accounts, 3 - Find account, 4 - Delete account, 5 - Exit") number = input() if number == '1': print("New User Account Creation") print("-"*10) print("First name ...") f_name = input() print("Last name ...") l_name = input() print("Username ...") u_name = input() print("Email address ...") e_address = input() print("Password ...") password = input() save_users(create_user(f_name, l_name, u_name, e_address, password)) #Create and save new user details print("\n") print(f"New User {u_name} created") print('\n') elif number == '2': if display_users(): print("User Accounts:") print('\n') for user in display_users(): print(f"{user.first_name} {user.last_name} {user.user_name} {user.email}") print('\n') else: print('\n') print("Please create an account") elif number == '3': print("Enter the username you want to search for:") search_username = input() if check_existing_users(search_username): search_user = find_user(search_username) print(f"{search_user.first_name} {search_user.last_name}") print("-"*10) print(f"Username....{search_user.user_name}") print(f"Email address....{search_user.email}") else: print("That user does not exist.") elif number == '5': print("Bye. Come again soon.") break else: print("Please use the correct number") if __name__ == '__main__': main()
23.179487
142
0.620575
from password_manager import User, Credentials def create_user(fname, lname, uname, email, password): new_user = User(fname, lname, uname, email, password) return new_user def save_users(user): user.save_user() def del_user(user): user.delete_contact() def find_user(username): return User.find_by_username(username) def display_users(): return User.display_users() def check_existing_users(username): return User.user_exist(username) def main(): print("Hi there. Welcome to your password manager app. Kindly let me know your name.") user_name = input() print(f"Hi {user_name}! What would you like to do today?") print("\n") while True: print("Use the following numbers: 1 - Create new user account, 2 - Display user accounts, 3 - Find account, 4 - Delete account, 5 - Exit") number = input() if number == '1': print("New User Account Creation") print("-"*10) print("First name ...") f_name = input() print("Last name ...") l_name = input() print("Username ...") u_name = input() print("Email address ...") e_address = input() print("Password ...") password = input() save_users(create_user(f_name, l_name, u_name, e_address, password)) print("\n") print(f"New User {u_name} created") print('\n') elif number == '2': if display_users(): print("User Accounts:") print('\n') for user in display_users(): print(f"{user.first_name} {user.last_name} {user.user_name} {user.email}") print('\n') else: print('\n') print("Please create an account") elif number == '3': print("Enter the username you want to search for:") search_username = input() if check_existing_users(search_username): search_user = find_user(search_username) print(f"{search_user.first_name} {search_user.last_name}") print("-"*10) print(f"Username....{search_user.user_name}") print(f"Email address....{search_user.email}") else: print("That user does not exist.") elif number == '5': print("Bye. Come again soon.") break else: print("Please use the correct number") if __name__ == '__main__': main()
true
true
f708481f6e02fece16961a60a2c795ef64ec141f
10,933
py
Python
project/systems/ecgresnet_ensemble_auxout.py
HabibMrad/uncertainty
1646a9b07d1179045dd0375149250d5ac7501004
[ "Apache-2.0" ]
1
2022-01-04T23:25:06.000Z
2022-01-04T23:25:06.000Z
project/systems/ecgresnet_ensemble_auxout.py
HabibMrad/uncertainty
1646a9b07d1179045dd0375149250d5ac7501004
[ "Apache-2.0" ]
null
null
null
project/systems/ecgresnet_ensemble_auxout.py
HabibMrad/uncertainty
1646a9b07d1179045dd0375149250d5ac7501004
[ "Apache-2.0" ]
null
null
null
import sys import os import torch import pandas as pd import datetime from argparse import ArgumentParser import numpy as np from torch import nn, optim import torch.nn.functional as F from torch.utils.data import DataLoader, random_split from icecream import ic import pytorch_lightning as pl from pytorch_lightning.metrics import functional as FM from network.ecgresnet_auxout import ECGResNet_AuxOut from utils.helpers import create_results_directory from utils.focalloss_weights import FocalLoss class ECGResNetEnsemble_AuxOutSystem(pl.LightningModule): """ This class implements the ECGResNet with ensemble and auxiliary output in PyTorch Lightning. It can estimate the epistemic and aleatoric uncertainty of its predictions. """ def __init__(self, in_channels, n_grps, N, num_classes, dropout, first_width, stride, dilation, learning_rate, ensemble_size, n_logit_samples, loss_weights=None, **kwargs): """ Initializes the ECGResNetEnsemble_AuxOutSystem Args: in_channels: number of channels of input n_grps: number of ResNet groups N: number of blocks per groups num_classes: number of classes of the classification problem dropout: probability of an argument to get zeroed in the dropout layer first_width: width of the first input stride: tuple with stride value per block per group dilation: spacing between the kernel points of the convolutional layers learning_rate: the learning rate of the model ensemble_size: the number of models that make up the ensemble n_logit_samples: number of logit samples of the auxiliary output loss_weights: array of weights for the loss term """ super().__init__() self.save_hyperparameters() self.learning_rate = learning_rate self.num_classes = num_classes self.ensemble_size = ensemble_size self.n_logit_samples = n_logit_samples self.IDs = torch.empty(0).type(torch.LongTensor) self.predicted_labels = torch.empty(0).type(torch.LongTensor) self.correct_predictions = torch.empty(0).type(torch.BoolTensor) self.epistemic_uncertainty = torch.empty(0).type(torch.FloatTensor) self.aleatoric_uncertainty = torch.empty(0).type(torch.FloatTensor) self.total_uncertainty = torch.empty(0).type(torch.FloatTensor) self.models = [] self.optimizers = [] for i in range(self.ensemble_size): self.models.append(ECGResNet_AuxOut(in_channels, n_grps, N, num_classes, dropout, first_width, stride, dilation) ) if loss_weights is not None: weights = torch.tensor(loss_weights, dtype = torch.float) else: weights = loss_weights self.loss = FocalLoss(gamma=1, weights = weights) def forward(self, x, model_idx): """Performs a forward through a single ensemble member. Args: x (tensor): Input data. model_idx (int): Index of the ensemble member. Returns: output1: Output at the auxiliary point of the ensemble member output2: Output at the end of the ensemble member output2_log_var: The log variance of the ensemble_member """ output1, output2_mean, output2_log_var = self.models[model_idx](x) return output1, output2_mean, output2_log_var def training_step(self, batch, batch_idx, optimizer_idx): """Performs a training step for all ensemble members. Args: batch (dict): Output of the dataloader. batch_idx (int): Index no. of this batch. Returns: tensor: Total loss for this step. """ data, target = batch['waveform'], batch['label'] losses = [] for model_idx in range(self.ensemble_size): # Make prediction output1, output2_mean, output2_log_var = self(data, model_idx) # Sample from logits, returning a vector x_i x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True) train_loss1 = self.loss(output1, target) train_loss2 = self.loss(x_i, target) total_train_loss = (0.3 * train_loss1) + train_loss2 # Update weights for each model using individual optimizers self.manual_backward(total_train_loss, self.optimizers[model_idx]) self.optimizers[model_idx].step() self.optimizers[model_idx].zero_grad() losses.append(total_train_loss.item()) self.log('model_{}_train_loss'.format(model_idx), total_train_loss) average_train_loss = np.mean(losses) self.log('average_train_loss', average_train_loss) return {'loss': average_train_loss} def validation_step(self, batch, batch_idx): prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes) data, target = batch['waveform'], batch['label'] # Predict for each model for model_idx in range(self.ensemble_size): # Make prediction _, output2_mean, output2_log_var = self(data, model_idx) # Sample from logits, returning avector x_i x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True) prediction_individual[:, model_idx] = x_i # Calculate mean over predictions from individual ensemble members prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1) val_loss = self.loss(prediction_ensemble_mean, target) acc = FM.accuracy(prediction_ensemble_mean, target) # loss is tensor. The Checkpoint Callback is monitoring 'checkpoint_on' metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()} self.log('val_acc', acc.item()) self.log('val_loss', val_loss.item()) return metrics def test_step(self, batch, batch_idx, save_to_csv=False): prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes) aleatoric_var = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes) data, target = batch['waveform'], batch['label'] # Predict for each model for model_idx, model in enumerate(self.models): # Make prediction _, output2_mean, output2_log_var = self(data, model_idx) # Sample from logits, returning a vector x_i x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True) prediction_individual[:, model_idx] = x_i.data # Take exponent to get the variance output2_var = output2_log_var.exp() aleatoric_var[:, model_idx] = output2_var.data # Calculate mean and variance over predictions from individual ensemble members prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1) prediction_ensemble_var = torch.var(prediction_individual, dim=1) # Get the average aleatoric uncertainty for each prediction prediction_aleatoric_var = torch.mean(aleatoric_var, dim=1) # Select the predicted labels predicted_labels = prediction_ensemble_mean.argmax(dim=1) test_loss = self.loss(prediction_ensemble_mean, target) acc = FM.accuracy(prediction_ensemble_mean, target) # Get the epistemic variance of the predicted labels by selecting the variance of # the labels with highest average Softmax value predicted_labels_var = torch.gather(prediction_ensemble_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu() # Get the aleatoric variance of the predicted labels by selecting the variance of # the labels with highest average Softmax value predicted_labels_aleatoric_var = torch.gather(prediction_aleatoric_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu() total_var = predicted_labels_var + predicted_labels_aleatoric_var # Log and save metrics self.log('test_acc', acc.item()) self.log('test_loss', test_loss.item()) self.IDs = torch.cat((self.IDs, batch['id']), 0) self.predicted_labels = torch.cat((self.predicted_labels, predicted_labels), 0) self.epistemic_uncertainty = torch.cat((self.epistemic_uncertainty, predicted_labels_var), 0) self.aleatoric_uncertainty = torch.cat((self.aleatoric_uncertainty, predicted_labels_aleatoric_var), 0) self.total_uncertainty = torch.cat((self.total_uncertainty, total_var), 0) self.correct_predictions = torch.cat((self.correct_predictions, torch.eq(predicted_labels, target.data.cpu())), 0) return {'test_loss': test_loss.item(), 'test_acc': acc.item(), 'test_loss': test_loss.item()} def configure_optimizers(self): """ Initialize an optimizer for each model in the ensemble """ for i in range(self.ensemble_size): self.optimizers.append(optim.Adam(self.models[i].parameters(), lr=self.learning_rate)) return self.optimizers def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument('--model_name', type=str, default='ensemble_none') parser.add_argument('--ensemble_size', type=int, default=5) parser.add_argument('--ensembling_method', type=bool, default=True) parser.add_argument('--n_logit_samples', type=int, default=100) return parser def save_results(self): """ Combine results into single dataframe and save to disk as .csv file """ results = pd.concat([ pd.DataFrame(self.IDs.numpy(), columns= ['ID']), pd.DataFrame(self.predicted_labels.numpy(), columns= ['predicted_label']), pd.DataFrame(self.correct_predictions.numpy(), columns= ['correct_prediction']), pd.DataFrame(self.epistemic_uncertainty.numpy(), columns= ['epistemic_uncertainty']), pd.DataFrame(self.aleatoric_uncertainty.numpy(), columns= ['aleatoric_uncertainty']), pd.DataFrame(self.total_uncertainty.numpy(), columns= ['total_uncertainty']), ], axis=1) create_results_directory() results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)
44.084677
156
0.668892
import sys import os import torch import pandas as pd import datetime from argparse import ArgumentParser import numpy as np from torch import nn, optim import torch.nn.functional as F from torch.utils.data import DataLoader, random_split from icecream import ic import pytorch_lightning as pl from pytorch_lightning.metrics import functional as FM from network.ecgresnet_auxout import ECGResNet_AuxOut from utils.helpers import create_results_directory from utils.focalloss_weights import FocalLoss class ECGResNetEnsemble_AuxOutSystem(pl.LightningModule): def __init__(self, in_channels, n_grps, N, num_classes, dropout, first_width, stride, dilation, learning_rate, ensemble_size, n_logit_samples, loss_weights=None, **kwargs): super().__init__() self.save_hyperparameters() self.learning_rate = learning_rate self.num_classes = num_classes self.ensemble_size = ensemble_size self.n_logit_samples = n_logit_samples self.IDs = torch.empty(0).type(torch.LongTensor) self.predicted_labels = torch.empty(0).type(torch.LongTensor) self.correct_predictions = torch.empty(0).type(torch.BoolTensor) self.epistemic_uncertainty = torch.empty(0).type(torch.FloatTensor) self.aleatoric_uncertainty = torch.empty(0).type(torch.FloatTensor) self.total_uncertainty = torch.empty(0).type(torch.FloatTensor) self.models = [] self.optimizers = [] for i in range(self.ensemble_size): self.models.append(ECGResNet_AuxOut(in_channels, n_grps, N, num_classes, dropout, first_width, stride, dilation) ) if loss_weights is not None: weights = torch.tensor(loss_weights, dtype = torch.float) else: weights = loss_weights self.loss = FocalLoss(gamma=1, weights = weights) def forward(self, x, model_idx): output1, output2_mean, output2_log_var = self.models[model_idx](x) return output1, output2_mean, output2_log_var def training_step(self, batch, batch_idx, optimizer_idx): data, target = batch['waveform'], batch['label'] losses = [] for model_idx in range(self.ensemble_size): output1, output2_mean, output2_log_var = self(data, model_idx) x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True) train_loss1 = self.loss(output1, target) train_loss2 = self.loss(x_i, target) total_train_loss = (0.3 * train_loss1) + train_loss2 self.manual_backward(total_train_loss, self.optimizers[model_idx]) self.optimizers[model_idx].step() self.optimizers[model_idx].zero_grad() losses.append(total_train_loss.item()) self.log('model_{}_train_loss'.format(model_idx), total_train_loss) average_train_loss = np.mean(losses) self.log('average_train_loss', average_train_loss) return {'loss': average_train_loss} def validation_step(self, batch, batch_idx): prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes) data, target = batch['waveform'], batch['label'] for model_idx in range(self.ensemble_size): _, output2_mean, output2_log_var = self(data, model_idx) x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True) prediction_individual[:, model_idx] = x_i prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1) val_loss = self.loss(prediction_ensemble_mean, target) acc = FM.accuracy(prediction_ensemble_mean, target) metrics = {'val_loss': val_loss.item(), 'val_acc': acc.item()} self.log('val_acc', acc.item()) self.log('val_loss', val_loss.item()) return metrics def test_step(self, batch, batch_idx, save_to_csv=False): prediction_individual = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes) aleatoric_var = torch.empty(batch['label'].shape[0], self.ensemble_size, self.num_classes) data, target = batch['waveform'], batch['label'] for model_idx, model in enumerate(self.models): _, output2_mean, output2_log_var = self(data, model_idx) x_i = self.models[model_idx].sample_logits(self.n_logit_samples, output2_mean, output2_log_var, average=True) prediction_individual[:, model_idx] = x_i.data output2_var = output2_log_var.exp() aleatoric_var[:, model_idx] = output2_var.data prediction_ensemble_mean = F.softmax(torch.mean(prediction_individual, dim=1), dim=1) prediction_ensemble_var = torch.var(prediction_individual, dim=1) prediction_aleatoric_var = torch.mean(aleatoric_var, dim=1) predicted_labels = prediction_ensemble_mean.argmax(dim=1) test_loss = self.loss(prediction_ensemble_mean, target) acc = FM.accuracy(prediction_ensemble_mean, target) predicted_labels_var = torch.gather(prediction_ensemble_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu() predicted_labels_aleatoric_var = torch.gather(prediction_aleatoric_var, 1, prediction_ensemble_mean.argmax(dim=1).unsqueeze_(1))[:, 0].cpu() total_var = predicted_labels_var + predicted_labels_aleatoric_var self.log('test_acc', acc.item()) self.log('test_loss', test_loss.item()) self.IDs = torch.cat((self.IDs, batch['id']), 0) self.predicted_labels = torch.cat((self.predicted_labels, predicted_labels), 0) self.epistemic_uncertainty = torch.cat((self.epistemic_uncertainty, predicted_labels_var), 0) self.aleatoric_uncertainty = torch.cat((self.aleatoric_uncertainty, predicted_labels_aleatoric_var), 0) self.total_uncertainty = torch.cat((self.total_uncertainty, total_var), 0) self.correct_predictions = torch.cat((self.correct_predictions, torch.eq(predicted_labels, target.data.cpu())), 0) return {'test_loss': test_loss.item(), 'test_acc': acc.item(), 'test_loss': test_loss.item()} def configure_optimizers(self): for i in range(self.ensemble_size): self.optimizers.append(optim.Adam(self.models[i].parameters(), lr=self.learning_rate)) return self.optimizers def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument('--model_name', type=str, default='ensemble_none') parser.add_argument('--ensemble_size', type=int, default=5) parser.add_argument('--ensembling_method', type=bool, default=True) parser.add_argument('--n_logit_samples', type=int, default=100) return parser def save_results(self): results = pd.concat([ pd.DataFrame(self.IDs.numpy(), columns= ['ID']), pd.DataFrame(self.predicted_labels.numpy(), columns= ['predicted_label']), pd.DataFrame(self.correct_predictions.numpy(), columns= ['correct_prediction']), pd.DataFrame(self.epistemic_uncertainty.numpy(), columns= ['epistemic_uncertainty']), pd.DataFrame(self.aleatoric_uncertainty.numpy(), columns= ['aleatoric_uncertainty']), pd.DataFrame(self.total_uncertainty.numpy(), columns= ['total_uncertainty']), ], axis=1) create_results_directory() results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)
true
true
f70848729c6f81574d502884536d2da9fba9af60
7,069
py
Python
hubploy/auth.py
tjcrone/hubploy
1767689c5277400a473b9f239a851f75051d8240
[ "BSD-3-Clause" ]
null
null
null
hubploy/auth.py
tjcrone/hubploy
1767689c5277400a473b9f239a851f75051d8240
[ "BSD-3-Clause" ]
null
null
null
hubploy/auth.py
tjcrone/hubploy
1767689c5277400a473b9f239a851f75051d8240
[ "BSD-3-Clause" ]
null
null
null
""" Setup authentication from various providers """ import json import os import subprocess import shutil from hubploy.config import get_config from ruamel.yaml import YAML yaml = YAML(typ='rt') def registry_auth(deployment): """ Do appropriate registry authentication for given deployment """ config = get_config(deployment) if 'images' in config and 'registry' in config['images']: registry = config['images']['registry'] provider = registry.get('provider') if provider == 'gcloud': registry_auth_gcloud( deployment, **registry['gcloud'] ) elif provider == 'aws': registry_auth_aws( deployment, **registry['aws'] ) elif provider == 'azure': registry_auth_azure( deployment, **registry['azure'] ) else: raise ValueError( f'Unknown provider {provider} found in hubploy.yaml') def registry_auth_gcloud(deployment, project, service_key): """ Setup GCR authentication with a service_key This changes *global machine state* on where docker can push to! """ service_key_path = os.path.join( 'deployments', deployment, 'secrets', service_key ) subprocess.check_call([ 'gcloud', 'auth', 'activate-service-account', '--key-file', os.path.abspath(service_key_path) ]) subprocess.check_call([ 'gcloud', 'auth', 'configure-docker' ]) def registry_auth_aws(deployment, project, zone, service_key): """ Setup AWS authentication to ECR container registry This changes *global machine state* on where docker can push to! """ service_key_path = os.path.join( 'deployments', deployment, 'secrets', service_key ) if not os.path.isfile(service_key_path): raise FileNotFoundError( f'The service_key file {service_key_path} does not exist') # move credentials to standard location cred_dir = os.path.expanduser('~/.aws') if not os.path.isdir(cred_dir): os.mkdir(cred_dir) shutil.copyfile(service_key_path, os.path.join(cred_dir, 'credentials')) registry = f'{project}.dkr.ecr.{zone}.amazonaws.com' # amazon-ecr-credential-helper installed in .circleci/config.yaml # this adds necessary line to authenticate docker with ecr docker_config_dir = os.path.expanduser('~/.docker') os.makedirs(docker_config_dir, exist_ok=True) docker_config = os.path.join(docker_config_dir, 'config.json') if os.path.exists(docker_config): with open(docker_config, 'r') as f: config = json.load(f) else: config = {'credHelpers': {}} config['credHelpers'][registry] = 'ecr-login' with open(docker_config, 'w') as f: json.dump(config, f) def registry_auth_azure(deployment, resource_group, registry, auth_file): """ Azure authentication for ACR In hubploy.yaml include: registry: provider: azure azure: resource_group: resource_group_name registry: registry_name auth_file: azure_auth_file.yaml The azure_service_principal.json file should have the following keys: appId, tenant, password. This is the format produced by the az command when creating a service principal. See https://docs.microsoft.com/en-us/azure/aks/kubernetes-service-principal """ # parse Azure auth file auth_file_path = os.path.join('deployments', deployment, 'secrets', auth_file) with open(auth_file_path) as f: auth = yaml.load(f) # log in subprocess.check_call([ 'az', 'login', '--service-principal', '--user', auth['appId'], '--tenant', auth['tenant'], '--password', auth['password'] ]) # log in to ACR subprocess.check_call([ 'az', 'acr', 'login', '--name', registry ]) def cluster_auth(deployment): """ Do appropriate cluster authentication for given deployment """ config = get_config(deployment) if 'cluster' in config: cluster = config['cluster'] provider = cluster.get('provider') if provider == 'gcloud': cluster_auth_gcloud( deployment, **cluster['gcloud'] ) elif provider == 'aws': cluster_auth_aws( deployment, **cluster['aws'] ) elif provider == 'azure': cluster_auth_azure( deployment, **cluster['azure'] ) else: raise ValueError( f'Unknown provider {provider} found in hubploy.yaml') def cluster_auth_gcloud(deployment, project, cluster, zone, service_key): """ Setup GKE authentication with service_key This changes *global machine state* on what current kubernetes cluster is! """ service_key_path = os.path.join( 'deployments', deployment, 'secrets', service_key ) subprocess.check_call([ 'gcloud', 'auth', 'activate-service-account', '--key-file', os.path.abspath(service_key_path) ]) subprocess.check_call([ 'gcloud', 'container', 'clusters', f'--zone={zone}', f'--project={project}', 'get-credentials', cluster ]) def cluster_auth_aws(deployment, project, cluster, zone, service_key): """ Setup AWS authentication with service_key This changes *global machine state* on what current kubernetes cluster is! """ # move credentials to standard location service_key_path = os.path.join( 'deployments', deployment, 'secrets', service_key ) cred_dir = os.path.expanduser('~/.aws') if not os.path.isdir(cred_dir): os.mkdir(cred_dir) shutil.copyfile(service_key_path, os.path.join(cred_dir, 'credentials')) subprocess.check_call(['aws2', 'eks', 'update-kubeconfig', '--name', cluster, '--region', zone]) def cluster_auth_azure(deployment, resource_group, cluster, auth_file): """ Azure authentication for AKS In hubploy.yaml include: cluster: provider: azure azure: resource_group: resource_group_name cluster: cluster_name auth_file: azure_auth_file.yaml The azure_service_principal.json file should have the following keys: appId, tenant, password. This is the format produced by the az command when creating a service principal. """ # parse Azure auth file auth_file_path = os.path.join('deployments', deployment, 'secrets', auth_file) with open(auth_file_path) as f: auth = yaml.load(f) # log in subprocess.check_call([ 'az', 'login', '--service-principal', '--user', auth['appId'], '--tenant', auth['tenant'], '--password', auth['password'] ]) # get cluster credentials subprocess.check_call([ 'az', 'aks', 'get-credentials', '--name', cluster, '--resource-group', resource_group ])
28.735772
82
0.625407
import json import os import subprocess import shutil from hubploy.config import get_config from ruamel.yaml import YAML yaml = YAML(typ='rt') def registry_auth(deployment): config = get_config(deployment) if 'images' in config and 'registry' in config['images']: registry = config['images']['registry'] provider = registry.get('provider') if provider == 'gcloud': registry_auth_gcloud( deployment, **registry['gcloud'] ) elif provider == 'aws': registry_auth_aws( deployment, **registry['aws'] ) elif provider == 'azure': registry_auth_azure( deployment, **registry['azure'] ) else: raise ValueError( f'Unknown provider {provider} found in hubploy.yaml') def registry_auth_gcloud(deployment, project, service_key): service_key_path = os.path.join( 'deployments', deployment, 'secrets', service_key ) subprocess.check_call([ 'gcloud', 'auth', 'activate-service-account', '--key-file', os.path.abspath(service_key_path) ]) subprocess.check_call([ 'gcloud', 'auth', 'configure-docker' ]) def registry_auth_aws(deployment, project, zone, service_key): service_key_path = os.path.join( 'deployments', deployment, 'secrets', service_key ) if not os.path.isfile(service_key_path): raise FileNotFoundError( f'The service_key file {service_key_path} does not exist') cred_dir = os.path.expanduser('~/.aws') if not os.path.isdir(cred_dir): os.mkdir(cred_dir) shutil.copyfile(service_key_path, os.path.join(cred_dir, 'credentials')) registry = f'{project}.dkr.ecr.{zone}.amazonaws.com' docker_config_dir = os.path.expanduser('~/.docker') os.makedirs(docker_config_dir, exist_ok=True) docker_config = os.path.join(docker_config_dir, 'config.json') if os.path.exists(docker_config): with open(docker_config, 'r') as f: config = json.load(f) else: config = {'credHelpers': {}} config['credHelpers'][registry] = 'ecr-login' with open(docker_config, 'w') as f: json.dump(config, f) def registry_auth_azure(deployment, resource_group, registry, auth_file): auth_file_path = os.path.join('deployments', deployment, 'secrets', auth_file) with open(auth_file_path) as f: auth = yaml.load(f) subprocess.check_call([ 'az', 'login', '--service-principal', '--user', auth['appId'], '--tenant', auth['tenant'], '--password', auth['password'] ]) subprocess.check_call([ 'az', 'acr', 'login', '--name', registry ]) def cluster_auth(deployment): config = get_config(deployment) if 'cluster' in config: cluster = config['cluster'] provider = cluster.get('provider') if provider == 'gcloud': cluster_auth_gcloud( deployment, **cluster['gcloud'] ) elif provider == 'aws': cluster_auth_aws( deployment, **cluster['aws'] ) elif provider == 'azure': cluster_auth_azure( deployment, **cluster['azure'] ) else: raise ValueError( f'Unknown provider {provider} found in hubploy.yaml') def cluster_auth_gcloud(deployment, project, cluster, zone, service_key): service_key_path = os.path.join( 'deployments', deployment, 'secrets', service_key ) subprocess.check_call([ 'gcloud', 'auth', 'activate-service-account', '--key-file', os.path.abspath(service_key_path) ]) subprocess.check_call([ 'gcloud', 'container', 'clusters', f'--zone={zone}', f'--project={project}', 'get-credentials', cluster ]) def cluster_auth_aws(deployment, project, cluster, zone, service_key): service_key_path = os.path.join( 'deployments', deployment, 'secrets', service_key ) cred_dir = os.path.expanduser('~/.aws') if not os.path.isdir(cred_dir): os.mkdir(cred_dir) shutil.copyfile(service_key_path, os.path.join(cred_dir, 'credentials')) subprocess.check_call(['aws2', 'eks', 'update-kubeconfig', '--name', cluster, '--region', zone]) def cluster_auth_azure(deployment, resource_group, cluster, auth_file): auth_file_path = os.path.join('deployments', deployment, 'secrets', auth_file) with open(auth_file_path) as f: auth = yaml.load(f) subprocess.check_call([ 'az', 'login', '--service-principal', '--user', auth['appId'], '--tenant', auth['tenant'], '--password', auth['password'] ]) subprocess.check_call([ 'az', 'aks', 'get-credentials', '--name', cluster, '--resource-group', resource_group ])
true
true
f708492421111894a9ec36490aef33347ccb2b29
2,055
py
Python
examples/simple/app.py
cdpath/fastapi_login
4e9cde923b60a6852a24b34292e2a10f4333ced3
[ "MIT" ]
null
null
null
examples/simple/app.py
cdpath/fastapi_login
4e9cde923b60a6852a24b34292e2a10f4333ced3
[ "MIT" ]
null
null
null
examples/simple/app.py
cdpath/fastapi_login
4e9cde923b60a6852a24b34292e2a10f4333ced3
[ "MIT" ]
null
null
null
import uuid from fastapi import Depends, FastAPI, HTTPException from fastapi.responses import HTMLResponse from fastapi.security import OAuth2PasswordRequestForm from pydantic import BaseSettings, BaseModel, UUID4 from fastapi_login import LoginManager from fastapi_login.exceptions import InvalidCredentialsException class Settings(BaseSettings): secret: str # autmatically taken from environement variable class UserCreate(BaseModel): email: str password: str class User(UserCreate): id: UUID4 DEFAULT_SETTINGS = Settings(_env_file=".env") DB = { "users": {} } TOKEN_URL = "/auth/token" app = FastAPI() manager = LoginManager(DEFAULT_SETTINGS.secret, TOKEN_URL) @manager.user_loader def get_user(email: str): return DB["users"].get(email) @app.get("/") def index(): with open("./templates/index.html", 'r') as f: return HTMLResponse(content=f.read()) @app.post("/auth/register") def register(user: UserCreate): if user.email in DB["users"]: raise HTTPException(status_code=400, detail="A user with this email already exists") else: db_user = User(**user.dict(), id=uuid.uuid4()) # PLEASE hash your passwords in real world applications DB["users"][db_user.email] = db_user return {"detail": "Successfull registered"} @app.post(TOKEN_URL) def login(data: OAuth2PasswordRequestForm = Depends()): email = data.username password = data.password user = get_user(email) # we are using the same function to retrieve the user if not user: raise InvalidCredentialsException # you can also use your own HTTPException elif password != user.password: raise InvalidCredentialsException access_token = manager.create_access_token( data=dict(sub=email) ) return {'access_token': access_token, 'token_type': 'bearer'} @app.get("/private") def private_route(user=Depends(manager)): return {"detail": f"Welcome {user.email}"} if __name__ == "__main__": import uvicorn uvicorn.run("app:app")
24.759036
92
0.709976
import uuid from fastapi import Depends, FastAPI, HTTPException from fastapi.responses import HTMLResponse from fastapi.security import OAuth2PasswordRequestForm from pydantic import BaseSettings, BaseModel, UUID4 from fastapi_login import LoginManager from fastapi_login.exceptions import InvalidCredentialsException class Settings(BaseSettings): secret: str class UserCreate(BaseModel): email: str password: str class User(UserCreate): id: UUID4 DEFAULT_SETTINGS = Settings(_env_file=".env") DB = { "users": {} } TOKEN_URL = "/auth/token" app = FastAPI() manager = LoginManager(DEFAULT_SETTINGS.secret, TOKEN_URL) @manager.user_loader def get_user(email: str): return DB["users"].get(email) @app.get("/") def index(): with open("./templates/index.html", 'r') as f: return HTMLResponse(content=f.read()) @app.post("/auth/register") def register(user: UserCreate): if user.email in DB["users"]: raise HTTPException(status_code=400, detail="A user with this email already exists") else: db_user = User(**user.dict(), id=uuid.uuid4()) DB["users"][db_user.email] = db_user return {"detail": "Successfull registered"} @app.post(TOKEN_URL) def login(data: OAuth2PasswordRequestForm = Depends()): email = data.username password = data.password user = get_user(email) if not user: raise InvalidCredentialsException elif password != user.password: raise InvalidCredentialsException access_token = manager.create_access_token( data=dict(sub=email) ) return {'access_token': access_token, 'token_type': 'bearer'} @app.get("/private") def private_route(user=Depends(manager)): return {"detail": f"Welcome {user.email}"} if __name__ == "__main__": import uvicorn uvicorn.run("app:app")
true
true
f7084a1fac2c524a1eab21ba916d5d53938c4ebb
20,361
py
Python
python/ray/tune/tests/test_function_api.py
jacobhjkim/ray
936cb5929c455102d5638ff5d59c80c4ae94770f
[ "Apache-2.0" ]
null
null
null
python/ray/tune/tests/test_function_api.py
jacobhjkim/ray
936cb5929c455102d5638ff5d59c80c4ae94770f
[ "Apache-2.0" ]
43
2021-02-27T08:02:40.000Z
2022-03-26T07:02:23.000Z
python/ray/tune/tests/test_function_api.py
jacobhjkim/ray
936cb5929c455102d5638ff5d59c80c4ae94770f
[ "Apache-2.0" ]
null
null
null
import json import os import sys import shutil import tempfile import unittest import ray import ray.cloudpickle as cloudpickle from ray.rllib import _register_all from ray import tune from ray.tune.logger import NoopLogger from ray.tune.utils.trainable import TrainableUtil from ray.tune.function_runner import with_parameters, wrap_function, \ FuncCheckpointUtil from ray.tune.result import DEFAULT_METRIC, TRAINING_ITERATION def creator_generator(logdir): def logger_creator(config): return NoopLogger(config, logdir) return logger_creator class FuncCheckpointUtilTest(unittest.TestCase): def setUp(self): self.logdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.logdir) def testEmptyCheckpoint(self): checkpoint_dir = FuncCheckpointUtil.mk_null_checkpoint_dir(self.logdir) assert FuncCheckpointUtil.is_null_checkpoint(checkpoint_dir) def testTempCheckpointDir(self): checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir) assert FuncCheckpointUtil.is_temp_checkpoint_dir(checkpoint_dir) def testConvertTempToPermanent(self): checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir) new_checkpoint_dir = FuncCheckpointUtil.create_perm_checkpoint( checkpoint_dir, self.logdir, step=4) assert new_checkpoint_dir == TrainableUtil.find_checkpoint_dir( new_checkpoint_dir) assert os.path.exists(new_checkpoint_dir) assert not FuncCheckpointUtil.is_temp_checkpoint_dir( new_checkpoint_dir) tmp_checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir( self.logdir) assert tmp_checkpoint_dir != new_checkpoint_dir class FunctionCheckpointingTest(unittest.TestCase): def setUp(self): self.logdir = tempfile.mkdtemp() self.logger_creator = creator_generator(self.logdir) def tearDown(self): shutil.rmtree(self.logdir) def testCheckpointReuse(self): """Test that repeated save/restore never reuses same checkpoint dir.""" def train(config, checkpoint_dir=None): if checkpoint_dir: count = sum("checkpoint-" in path for path in os.listdir(checkpoint_dir)) assert count == 1, os.listdir(checkpoint_dir) for step in range(20): with tune.checkpoint_dir(step=step) as checkpoint_dir: path = os.path.join(checkpoint_dir, "checkpoint-{}".format(step)) open(path, "a").close() tune.report(test=step) wrapped = wrap_function(train) checkpoint = None for i in range(5): new_trainable = wrapped(logger_creator=self.logger_creator) if checkpoint: new_trainable.restore(checkpoint) for i in range(2): result = new_trainable.train() checkpoint = new_trainable.save() new_trainable.stop() assert result[TRAINING_ITERATION] == 10 def testCheckpointReuseObject(self): """Test that repeated save/restore never reuses same checkpoint dir.""" def train(config, checkpoint_dir=None): if checkpoint_dir: count = sum("checkpoint-" in path for path in os.listdir(checkpoint_dir)) assert count == 1, os.listdir(checkpoint_dir) for step in range(20): with tune.checkpoint_dir(step=step) as checkpoint_dir: path = os.path.join(checkpoint_dir, "checkpoint-{}".format(step)) open(path, "a").close() tune.report(test=step) wrapped = wrap_function(train) checkpoint = None for i in range(5): new_trainable = wrapped(logger_creator=self.logger_creator) if checkpoint: new_trainable.restore_from_object(checkpoint) for i in range(2): result = new_trainable.train() checkpoint = new_trainable.save_to_object() new_trainable.stop() self.assertTrue(result[TRAINING_ITERATION] == 10) def testCheckpointReuseObjectWithoutTraining(self): """Test that repeated save/restore never reuses same checkpoint dir.""" def train(config, checkpoint_dir=None): if checkpoint_dir: count = sum("checkpoint-" in path for path in os.listdir(checkpoint_dir)) assert count == 1, os.listdir(checkpoint_dir) for step in range(20): with tune.checkpoint_dir(step=step) as checkpoint_dir: path = os.path.join(checkpoint_dir, "checkpoint-{}".format(step)) open(path, "a").close() tune.report(test=step) wrapped = wrap_function(train) new_trainable = wrapped(logger_creator=self.logger_creator) for i in range(2): result = new_trainable.train() checkpoint = new_trainable.save_to_object() new_trainable.stop() new_trainable2 = wrapped(logger_creator=self.logger_creator) new_trainable2.restore_from_object(checkpoint) new_trainable2.stop() new_trainable2 = wrapped(logger_creator=self.logger_creator) new_trainable2.restore_from_object(checkpoint) result = new_trainable2.train() new_trainable2.stop() self.assertTrue(result[TRAINING_ITERATION] == 3) def testReuseNullCheckpoint(self): def train(config, checkpoint_dir=None): assert not checkpoint_dir for step in range(10): tune.report(test=step) # Create checkpoint wrapped = wrap_function(train) checkpoint = None new_trainable = wrapped(logger_creator=self.logger_creator) new_trainable.train() checkpoint = new_trainable.save() new_trainable.stop() # Use the checkpoint a couple of times for i in range(3): new_trainable = wrapped(logger_creator=self.logger_creator) new_trainable.restore(checkpoint) new_trainable.stop() # Make sure the result is still good new_trainable = wrapped(logger_creator=self.logger_creator) new_trainable.restore(checkpoint) result = new_trainable.train() checkpoint = new_trainable.save() new_trainable.stop() self.assertTrue(result[TRAINING_ITERATION] == 1) def testMultipleNullCheckpoints(self): def train(config, checkpoint_dir=None): assert not checkpoint_dir for step in range(10): tune.report(test=step) wrapped = wrap_function(train) checkpoint = None for i in range(5): new_trainable = wrapped(logger_creator=self.logger_creator) if checkpoint: new_trainable.restore(checkpoint) result = new_trainable.train() checkpoint = new_trainable.save() new_trainable.stop() self.assertTrue(result[TRAINING_ITERATION] == 1) def testMultipleNullMemoryCheckpoints(self): def train(config, checkpoint_dir=None): assert not checkpoint_dir for step in range(10): tune.report(test=step) wrapped = wrap_function(train) checkpoint = None for i in range(5): new_trainable = wrapped(logger_creator=self.logger_creator) if checkpoint: new_trainable.restore_from_object(checkpoint) result = new_trainable.train() checkpoint = new_trainable.save_to_object() new_trainable.stop() assert result[TRAINING_ITERATION] == 1 def testFunctionNoCheckpointing(self): def train(config, checkpoint_dir=None): if checkpoint_dir: assert os.path.exists(checkpoint_dir) for step in range(10): tune.report(test=step) wrapped = wrap_function(train) new_trainable = wrapped(logger_creator=self.logger_creator) result = new_trainable.train() checkpoint = new_trainable.save() new_trainable.stop() new_trainable2 = wrapped(logger_creator=self.logger_creator) new_trainable2.restore(checkpoint) result = new_trainable2.train() self.assertEquals(result[TRAINING_ITERATION], 1) checkpoint = new_trainable2.save() new_trainable2.stop() def testFunctionRecurringSave(self): """This tests that save and restore are commutative.""" def train(config, checkpoint_dir=None): if checkpoint_dir: assert os.path.exists(checkpoint_dir) for step in range(10): if step % 3 == 0: with tune.checkpoint_dir(step=step) as checkpoint_dir: path = os.path.join(checkpoint_dir, "checkpoint") with open(path, "w") as f: f.write(json.dumps({"step": step})) tune.report(test=step) wrapped = wrap_function(train) new_trainable = wrapped(logger_creator=self.logger_creator) new_trainable.train() checkpoint_obj = new_trainable.save_to_object() new_trainable.restore_from_object(checkpoint_obj) checkpoint = new_trainable.save() new_trainable.stop() new_trainable2 = wrapped(logger_creator=self.logger_creator) new_trainable2.restore(checkpoint) new_trainable2.train() new_trainable2.stop() def testFunctionImmediateSave(self): """This tests that save and restore are commutative.""" def train(config, checkpoint_dir=None): if checkpoint_dir: assert os.path.exists(checkpoint_dir) for step in range(10): with tune.checkpoint_dir(step=step) as checkpoint_dir: print(checkpoint_dir) path = os.path.join(checkpoint_dir, "checkpoint-{}".format(step)) open(path, "w").close() tune.report(test=step) wrapped = wrap_function(train) new_trainable = wrapped(logger_creator=self.logger_creator) new_trainable.train() new_trainable.train() checkpoint_obj = new_trainable.save_to_object() new_trainable.stop() new_trainable2 = wrapped(logger_creator=self.logger_creator) new_trainable2.restore_from_object(checkpoint_obj) checkpoint_obj = new_trainable2.save_to_object() new_trainable2.train() result = new_trainable2.train() assert sum("tmp" in path for path in os.listdir(self.logdir)) == 1 new_trainable2.stop() assert sum("tmp" in path for path in os.listdir(self.logdir)) == 0 assert result[TRAINING_ITERATION] == 4 class FunctionApiTest(unittest.TestCase): def setUp(self): ray.init(num_cpus=4, num_gpus=0, object_store_memory=150 * 1024 * 1024) def tearDown(self): ray.shutdown() _register_all() # re-register the evicted objects def testCheckpointError(self): def train(config, checkpoint_dir=False): pass with self.assertRaises(ValueError): tune.run(train, checkpoint_freq=1) with self.assertRaises(ValueError): tune.run(train, checkpoint_at_end=True) def testCheckpointFunctionAtEnd(self): def train(config, checkpoint_dir=False): for i in range(10): tune.report(test=i) with tune.checkpoint_dir(step=10) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log") with open(checkpoint_path, "w") as f: f.write("hello") [trial] = tune.run(train).trials assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log")) def testCheckpointFunctionAtEndContext(self): def train(config, checkpoint_dir=False): for i in range(10): tune.report(test=i) with tune.checkpoint_dir(step=10) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log") with open(checkpoint_path, "w") as f: f.write("hello") [trial] = tune.run(train).trials assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log")) def testVariousCheckpointFunctionAtEnd(self): def train(config, checkpoint_dir=False): for i in range(10): with tune.checkpoint_dir(step=i) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log") with open(checkpoint_path, "w") as f: f.write("hello") tune.report(test=i) with tune.checkpoint_dir(step=i) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log2") with open(checkpoint_path, "w") as f: f.write("goodbye") [trial] = tune.run(train, keep_checkpoints_num=3).trials assert os.path.exists( os.path.join(trial.checkpoint.value, "ckpt.log2")) def testReuseCheckpoint(self): def train(config, checkpoint_dir=None): itr = 0 if checkpoint_dir: with open(os.path.join(checkpoint_dir, "ckpt.log"), "r") as f: itr = int(f.read()) + 1 for i in range(itr, config["max_iter"]): with tune.checkpoint_dir(step=i) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log") with open(checkpoint_path, "w") as f: f.write(str(i)) tune.report(test=i, training_iteration=i) [trial] = tune.run( train, config={ "max_iter": 5 }, ).trials last_ckpt = trial.checkpoint.value assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log")) analysis = tune.run(train, config={"max_iter": 10}, restore=last_ckpt) trial_dfs = list(analysis.trial_dataframes.values()) assert len(trial_dfs[0]["training_iteration"]) == 5 def testRetry(self): def train(config, checkpoint_dir=None): restored = bool(checkpoint_dir) itr = 0 if checkpoint_dir: with open(os.path.join(checkpoint_dir, "ckpt.log"), "r") as f: itr = int(f.read()) + 1 for i in range(itr, 10): if i == 5 and not restored: raise Exception("try to fail me") with tune.checkpoint_dir(step=i) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log") with open(checkpoint_path, "w") as f: f.write(str(i)) tune.report(test=i, training_iteration=i) analysis = tune.run(train, max_failures=3) last_ckpt = analysis.trials[0].checkpoint.value assert os.path.exists(os.path.join(last_ckpt, "ckpt.log")) trial_dfs = list(analysis.trial_dataframes.values()) assert len(trial_dfs[0]["training_iteration"]) == 10 def testEnabled(self): def train(config, checkpoint_dir=None): is_active = tune.is_session_enabled() if is_active: tune.report(active=is_active) return is_active assert train({}) is False analysis = tune.run(train) t = analysis.trials[0] assert t.last_result["active"] def testBlankCheckpoint(self): def train(config, checkpoint_dir=None): restored = bool(checkpoint_dir) itr = 0 if checkpoint_dir: with open(os.path.join(checkpoint_dir, "ckpt.log"), "r") as f: itr = int(f.read()) + 1 for i in range(itr, 10): if i == 5 and not restored: raise Exception("try to fail me") with tune.checkpoint_dir(step=itr) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log") with open(checkpoint_path, "w") as f: f.write(str(i)) tune.report(test=i, training_iteration=i) analysis = tune.run(train, max_failures=3) trial_dfs = list(analysis.trial_dataframes.values()) assert len(trial_dfs[0]["training_iteration"]) == 10 def testWithParameters(self): class Data: def __init__(self): self.data = [0] * 500_000 data = Data() data.data[100] = 1 def train(config, data=None): data.data[101] = 2 # Changes are local tune.report(metric=len(data.data), hundred=data.data[100]) trial_1, trial_2 = tune.run( with_parameters(train, data=data), num_samples=2).trials self.assertEquals(data.data[101], 0) self.assertEquals(trial_1.last_result["metric"], 500_000) self.assertEquals(trial_1.last_result["hundred"], 1) self.assertEquals(trial_2.last_result["metric"], 500_000) self.assertEquals(trial_2.last_result["hundred"], 1) self.assertTrue(str(trial_1).startswith("train_")) # With checkpoint dir parameter def train(config, checkpoint_dir="DIR", data=None): data.data[101] = 2 # Changes are local tune.report(metric=len(data.data), cp=checkpoint_dir) trial_1, trial_2 = tune.run( with_parameters(train, data=data), num_samples=2).trials self.assertEquals(data.data[101], 0) self.assertEquals(trial_1.last_result["metric"], 500_000) self.assertEquals(trial_1.last_result["cp"], "DIR") self.assertEquals(trial_2.last_result["metric"], 500_000) self.assertEquals(trial_2.last_result["cp"], "DIR") self.assertTrue(str(trial_1).startswith("train_")) def testWithParameters2(self): class Data: def __init__(self): import numpy as np self.data = np.random.rand((2 * 1024 * 1024)) def train(config, data=None): tune.report(metric=len(data.data)) trainable = tune.with_parameters(train, data=Data()) dumped = cloudpickle.dumps(trainable) assert sys.getsizeof(dumped) < 100 * 1024 def testReturnAnonymous(self): def train(config): return config["a"] trial_1, trial_2 = tune.run( train, config={ "a": tune.grid_search([4, 8]) }).trials self.assertEquals(trial_1.last_result[DEFAULT_METRIC], 4) self.assertEquals(trial_2.last_result[DEFAULT_METRIC], 8) def testReturnSpecific(self): def train(config): return {"m": config["a"]} trial_1, trial_2 = tune.run( train, config={ "a": tune.grid_search([4, 8]) }).trials self.assertEquals(trial_1.last_result["m"], 4) self.assertEquals(trial_2.last_result["m"], 8) def testYieldAnonymous(self): def train(config): for i in range(10): yield config["a"] + i trial_1, trial_2 = tune.run( train, config={ "a": tune.grid_search([4, 8]) }).trials self.assertEquals(trial_1.last_result[DEFAULT_METRIC], 4 + 9) self.assertEquals(trial_2.last_result[DEFAULT_METRIC], 8 + 9) def testYieldSpecific(self): def train(config): for i in range(10): yield {"m": config["a"] + i} trial_1, trial_2 = tune.run( train, config={ "a": tune.grid_search([4, 8]) }).trials self.assertEquals(trial_1.last_result["m"], 4 + 9) self.assertEquals(trial_2.last_result["m"], 8 + 9)
37.916201
79
0.602475
import json import os import sys import shutil import tempfile import unittest import ray import ray.cloudpickle as cloudpickle from ray.rllib import _register_all from ray import tune from ray.tune.logger import NoopLogger from ray.tune.utils.trainable import TrainableUtil from ray.tune.function_runner import with_parameters, wrap_function, \ FuncCheckpointUtil from ray.tune.result import DEFAULT_METRIC, TRAINING_ITERATION def creator_generator(logdir): def logger_creator(config): return NoopLogger(config, logdir) return logger_creator class FuncCheckpointUtilTest(unittest.TestCase): def setUp(self): self.logdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.logdir) def testEmptyCheckpoint(self): checkpoint_dir = FuncCheckpointUtil.mk_null_checkpoint_dir(self.logdir) assert FuncCheckpointUtil.is_null_checkpoint(checkpoint_dir) def testTempCheckpointDir(self): checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir) assert FuncCheckpointUtil.is_temp_checkpoint_dir(checkpoint_dir) def testConvertTempToPermanent(self): checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir(self.logdir) new_checkpoint_dir = FuncCheckpointUtil.create_perm_checkpoint( checkpoint_dir, self.logdir, step=4) assert new_checkpoint_dir == TrainableUtil.find_checkpoint_dir( new_checkpoint_dir) assert os.path.exists(new_checkpoint_dir) assert not FuncCheckpointUtil.is_temp_checkpoint_dir( new_checkpoint_dir) tmp_checkpoint_dir = FuncCheckpointUtil.mk_temp_checkpoint_dir( self.logdir) assert tmp_checkpoint_dir != new_checkpoint_dir class FunctionCheckpointingTest(unittest.TestCase): def setUp(self): self.logdir = tempfile.mkdtemp() self.logger_creator = creator_generator(self.logdir) def tearDown(self): shutil.rmtree(self.logdir) def testCheckpointReuse(self): def train(config, checkpoint_dir=None): if checkpoint_dir: count = sum("checkpoint-" in path for path in os.listdir(checkpoint_dir)) assert count == 1, os.listdir(checkpoint_dir) for step in range(20): with tune.checkpoint_dir(step=step) as checkpoint_dir: path = os.path.join(checkpoint_dir, "checkpoint-{}".format(step)) open(path, "a").close() tune.report(test=step) wrapped = wrap_function(train) checkpoint = None for i in range(5): new_trainable = wrapped(logger_creator=self.logger_creator) if checkpoint: new_trainable.restore(checkpoint) for i in range(2): result = new_trainable.train() checkpoint = new_trainable.save() new_trainable.stop() assert result[TRAINING_ITERATION] == 10 def testCheckpointReuseObject(self): def train(config, checkpoint_dir=None): if checkpoint_dir: count = sum("checkpoint-" in path for path in os.listdir(checkpoint_dir)) assert count == 1, os.listdir(checkpoint_dir) for step in range(20): with tune.checkpoint_dir(step=step) as checkpoint_dir: path = os.path.join(checkpoint_dir, "checkpoint-{}".format(step)) open(path, "a").close() tune.report(test=step) wrapped = wrap_function(train) checkpoint = None for i in range(5): new_trainable = wrapped(logger_creator=self.logger_creator) if checkpoint: new_trainable.restore_from_object(checkpoint) for i in range(2): result = new_trainable.train() checkpoint = new_trainable.save_to_object() new_trainable.stop() self.assertTrue(result[TRAINING_ITERATION] == 10) def testCheckpointReuseObjectWithoutTraining(self): def train(config, checkpoint_dir=None): if checkpoint_dir: count = sum("checkpoint-" in path for path in os.listdir(checkpoint_dir)) assert count == 1, os.listdir(checkpoint_dir) for step in range(20): with tune.checkpoint_dir(step=step) as checkpoint_dir: path = os.path.join(checkpoint_dir, "checkpoint-{}".format(step)) open(path, "a").close() tune.report(test=step) wrapped = wrap_function(train) new_trainable = wrapped(logger_creator=self.logger_creator) for i in range(2): result = new_trainable.train() checkpoint = new_trainable.save_to_object() new_trainable.stop() new_trainable2 = wrapped(logger_creator=self.logger_creator) new_trainable2.restore_from_object(checkpoint) new_trainable2.stop() new_trainable2 = wrapped(logger_creator=self.logger_creator) new_trainable2.restore_from_object(checkpoint) result = new_trainable2.train() new_trainable2.stop() self.assertTrue(result[TRAINING_ITERATION] == 3) def testReuseNullCheckpoint(self): def train(config, checkpoint_dir=None): assert not checkpoint_dir for step in range(10): tune.report(test=step) wrapped = wrap_function(train) checkpoint = None new_trainable = wrapped(logger_creator=self.logger_creator) new_trainable.train() checkpoint = new_trainable.save() new_trainable.stop() for i in range(3): new_trainable = wrapped(logger_creator=self.logger_creator) new_trainable.restore(checkpoint) new_trainable.stop() new_trainable = wrapped(logger_creator=self.logger_creator) new_trainable.restore(checkpoint) result = new_trainable.train() checkpoint = new_trainable.save() new_trainable.stop() self.assertTrue(result[TRAINING_ITERATION] == 1) def testMultipleNullCheckpoints(self): def train(config, checkpoint_dir=None): assert not checkpoint_dir for step in range(10): tune.report(test=step) wrapped = wrap_function(train) checkpoint = None for i in range(5): new_trainable = wrapped(logger_creator=self.logger_creator) if checkpoint: new_trainable.restore(checkpoint) result = new_trainable.train() checkpoint = new_trainable.save() new_trainable.stop() self.assertTrue(result[TRAINING_ITERATION] == 1) def testMultipleNullMemoryCheckpoints(self): def train(config, checkpoint_dir=None): assert not checkpoint_dir for step in range(10): tune.report(test=step) wrapped = wrap_function(train) checkpoint = None for i in range(5): new_trainable = wrapped(logger_creator=self.logger_creator) if checkpoint: new_trainable.restore_from_object(checkpoint) result = new_trainable.train() checkpoint = new_trainable.save_to_object() new_trainable.stop() assert result[TRAINING_ITERATION] == 1 def testFunctionNoCheckpointing(self): def train(config, checkpoint_dir=None): if checkpoint_dir: assert os.path.exists(checkpoint_dir) for step in range(10): tune.report(test=step) wrapped = wrap_function(train) new_trainable = wrapped(logger_creator=self.logger_creator) result = new_trainable.train() checkpoint = new_trainable.save() new_trainable.stop() new_trainable2 = wrapped(logger_creator=self.logger_creator) new_trainable2.restore(checkpoint) result = new_trainable2.train() self.assertEquals(result[TRAINING_ITERATION], 1) checkpoint = new_trainable2.save() new_trainable2.stop() def testFunctionRecurringSave(self): def train(config, checkpoint_dir=None): if checkpoint_dir: assert os.path.exists(checkpoint_dir) for step in range(10): if step % 3 == 0: with tune.checkpoint_dir(step=step) as checkpoint_dir: path = os.path.join(checkpoint_dir, "checkpoint") with open(path, "w") as f: f.write(json.dumps({"step": step})) tune.report(test=step) wrapped = wrap_function(train) new_trainable = wrapped(logger_creator=self.logger_creator) new_trainable.train() checkpoint_obj = new_trainable.save_to_object() new_trainable.restore_from_object(checkpoint_obj) checkpoint = new_trainable.save() new_trainable.stop() new_trainable2 = wrapped(logger_creator=self.logger_creator) new_trainable2.restore(checkpoint) new_trainable2.train() new_trainable2.stop() def testFunctionImmediateSave(self): def train(config, checkpoint_dir=None): if checkpoint_dir: assert os.path.exists(checkpoint_dir) for step in range(10): with tune.checkpoint_dir(step=step) as checkpoint_dir: print(checkpoint_dir) path = os.path.join(checkpoint_dir, "checkpoint-{}".format(step)) open(path, "w").close() tune.report(test=step) wrapped = wrap_function(train) new_trainable = wrapped(logger_creator=self.logger_creator) new_trainable.train() new_trainable.train() checkpoint_obj = new_trainable.save_to_object() new_trainable.stop() new_trainable2 = wrapped(logger_creator=self.logger_creator) new_trainable2.restore_from_object(checkpoint_obj) checkpoint_obj = new_trainable2.save_to_object() new_trainable2.train() result = new_trainable2.train() assert sum("tmp" in path for path in os.listdir(self.logdir)) == 1 new_trainable2.stop() assert sum("tmp" in path for path in os.listdir(self.logdir)) == 0 assert result[TRAINING_ITERATION] == 4 class FunctionApiTest(unittest.TestCase): def setUp(self): ray.init(num_cpus=4, num_gpus=0, object_store_memory=150 * 1024 * 1024) def tearDown(self): ray.shutdown() _register_all() def testCheckpointError(self): def train(config, checkpoint_dir=False): pass with self.assertRaises(ValueError): tune.run(train, checkpoint_freq=1) with self.assertRaises(ValueError): tune.run(train, checkpoint_at_end=True) def testCheckpointFunctionAtEnd(self): def train(config, checkpoint_dir=False): for i in range(10): tune.report(test=i) with tune.checkpoint_dir(step=10) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log") with open(checkpoint_path, "w") as f: f.write("hello") [trial] = tune.run(train).trials assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log")) def testCheckpointFunctionAtEndContext(self): def train(config, checkpoint_dir=False): for i in range(10): tune.report(test=i) with tune.checkpoint_dir(step=10) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log") with open(checkpoint_path, "w") as f: f.write("hello") [trial] = tune.run(train).trials assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log")) def testVariousCheckpointFunctionAtEnd(self): def train(config, checkpoint_dir=False): for i in range(10): with tune.checkpoint_dir(step=i) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log") with open(checkpoint_path, "w") as f: f.write("hello") tune.report(test=i) with tune.checkpoint_dir(step=i) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log2") with open(checkpoint_path, "w") as f: f.write("goodbye") [trial] = tune.run(train, keep_checkpoints_num=3).trials assert os.path.exists( os.path.join(trial.checkpoint.value, "ckpt.log2")) def testReuseCheckpoint(self): def train(config, checkpoint_dir=None): itr = 0 if checkpoint_dir: with open(os.path.join(checkpoint_dir, "ckpt.log"), "r") as f: itr = int(f.read()) + 1 for i in range(itr, config["max_iter"]): with tune.checkpoint_dir(step=i) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log") with open(checkpoint_path, "w") as f: f.write(str(i)) tune.report(test=i, training_iteration=i) [trial] = tune.run( train, config={ "max_iter": 5 }, ).trials last_ckpt = trial.checkpoint.value assert os.path.exists(os.path.join(trial.checkpoint.value, "ckpt.log")) analysis = tune.run(train, config={"max_iter": 10}, restore=last_ckpt) trial_dfs = list(analysis.trial_dataframes.values()) assert len(trial_dfs[0]["training_iteration"]) == 5 def testRetry(self): def train(config, checkpoint_dir=None): restored = bool(checkpoint_dir) itr = 0 if checkpoint_dir: with open(os.path.join(checkpoint_dir, "ckpt.log"), "r") as f: itr = int(f.read()) + 1 for i in range(itr, 10): if i == 5 and not restored: raise Exception("try to fail me") with tune.checkpoint_dir(step=i) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log") with open(checkpoint_path, "w") as f: f.write(str(i)) tune.report(test=i, training_iteration=i) analysis = tune.run(train, max_failures=3) last_ckpt = analysis.trials[0].checkpoint.value assert os.path.exists(os.path.join(last_ckpt, "ckpt.log")) trial_dfs = list(analysis.trial_dataframes.values()) assert len(trial_dfs[0]["training_iteration"]) == 10 def testEnabled(self): def train(config, checkpoint_dir=None): is_active = tune.is_session_enabled() if is_active: tune.report(active=is_active) return is_active assert train({}) is False analysis = tune.run(train) t = analysis.trials[0] assert t.last_result["active"] def testBlankCheckpoint(self): def train(config, checkpoint_dir=None): restored = bool(checkpoint_dir) itr = 0 if checkpoint_dir: with open(os.path.join(checkpoint_dir, "ckpt.log"), "r") as f: itr = int(f.read()) + 1 for i in range(itr, 10): if i == 5 and not restored: raise Exception("try to fail me") with tune.checkpoint_dir(step=itr) as checkpoint_dir: checkpoint_path = os.path.join(checkpoint_dir, "ckpt.log") with open(checkpoint_path, "w") as f: f.write(str(i)) tune.report(test=i, training_iteration=i) analysis = tune.run(train, max_failures=3) trial_dfs = list(analysis.trial_dataframes.values()) assert len(trial_dfs[0]["training_iteration"]) == 10 def testWithParameters(self): class Data: def __init__(self): self.data = [0] * 500_000 data = Data() data.data[100] = 1 def train(config, data=None): data.data[101] = 2 tune.report(metric=len(data.data), hundred=data.data[100]) trial_1, trial_2 = tune.run( with_parameters(train, data=data), num_samples=2).trials self.assertEquals(data.data[101], 0) self.assertEquals(trial_1.last_result["metric"], 500_000) self.assertEquals(trial_1.last_result["hundred"], 1) self.assertEquals(trial_2.last_result["metric"], 500_000) self.assertEquals(trial_2.last_result["hundred"], 1) self.assertTrue(str(trial_1).startswith("train_")) def train(config, checkpoint_dir="DIR", data=None): data.data[101] = 2 tune.report(metric=len(data.data), cp=checkpoint_dir) trial_1, trial_2 = tune.run( with_parameters(train, data=data), num_samples=2).trials self.assertEquals(data.data[101], 0) self.assertEquals(trial_1.last_result["metric"], 500_000) self.assertEquals(trial_1.last_result["cp"], "DIR") self.assertEquals(trial_2.last_result["metric"], 500_000) self.assertEquals(trial_2.last_result["cp"], "DIR") self.assertTrue(str(trial_1).startswith("train_")) def testWithParameters2(self): class Data: def __init__(self): import numpy as np self.data = np.random.rand((2 * 1024 * 1024)) def train(config, data=None): tune.report(metric=len(data.data)) trainable = tune.with_parameters(train, data=Data()) dumped = cloudpickle.dumps(trainable) assert sys.getsizeof(dumped) < 100 * 1024 def testReturnAnonymous(self): def train(config): return config["a"] trial_1, trial_2 = tune.run( train, config={ "a": tune.grid_search([4, 8]) }).trials self.assertEquals(trial_1.last_result[DEFAULT_METRIC], 4) self.assertEquals(trial_2.last_result[DEFAULT_METRIC], 8) def testReturnSpecific(self): def train(config): return {"m": config["a"]} trial_1, trial_2 = tune.run( train, config={ "a": tune.grid_search([4, 8]) }).trials self.assertEquals(trial_1.last_result["m"], 4) self.assertEquals(trial_2.last_result["m"], 8) def testYieldAnonymous(self): def train(config): for i in range(10): yield config["a"] + i trial_1, trial_2 = tune.run( train, config={ "a": tune.grid_search([4, 8]) }).trials self.assertEquals(trial_1.last_result[DEFAULT_METRIC], 4 + 9) self.assertEquals(trial_2.last_result[DEFAULT_METRIC], 8 + 9) def testYieldSpecific(self): def train(config): for i in range(10): yield {"m": config["a"] + i} trial_1, trial_2 = tune.run( train, config={ "a": tune.grid_search([4, 8]) }).trials self.assertEquals(trial_1.last_result["m"], 4 + 9) self.assertEquals(trial_2.last_result["m"], 8 + 9)
true
true
f7084adeb4e464e80a35a46eed5e75ed2ec77fdd
12,350
py
Python
cinder/api/v2/volumes.py
traghavendra/cinder-train
49af592c61da3216c04f5771b8ebf0927c5ce1c8
[ "Apache-2.0" ]
null
null
null
cinder/api/v2/volumes.py
traghavendra/cinder-train
49af592c61da3216c04f5771b8ebf0927c5ce1c8
[ "Apache-2.0" ]
28
2017-08-17T14:46:05.000Z
2022-03-29T12:42:12.000Z
cinder/api/v2/volumes.py
traghavendra/cinder-train
49af592c61da3216c04f5771b8ebf0927c5ce1c8
[ "Apache-2.0" ]
3
2017-04-27T16:11:40.000Z
2020-02-12T21:27:00.000Z
# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes api.""" from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import uuidutils from six.moves import http_client import webob from webob import exc from cinder.api import api_utils from cinder.api import common from cinder.api.contrib import scheduler_hints from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import volumes from cinder.api.v2.views import volumes as volume_views from cinder.api import validation from cinder import exception from cinder import group as group_api from cinder.i18n import _ from cinder.image import glance from cinder import objects from cinder import utils from cinder import volume as cinder_volume from cinder.volume import volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) class VolumeController(wsgi.Controller): """The Volumes API controller for the OpenStack API.""" _view_builder_class = volume_views.ViewBuilder def __init__(self, ext_mgr): self.volume_api = cinder_volume.API() self.group_api = group_api.API() self.ext_mgr = ext_mgr super(VolumeController, self).__init__() def show(self, req, id): """Return data about the given volume.""" context = req.environ['cinder.context'] # Not found exception will be handled at the wsgi level vol = self.volume_api.get(context, id, viewable_admin_meta=True) req.cache_db_volume(vol) api_utils.add_visible_admin_metadata(vol) return self._view_builder.detail(req, vol) def delete(self, req, id): """Delete a volume.""" context = req.environ['cinder.context'] cascade = utils.get_bool_param('cascade', req.params) LOG.info("Delete volume with id: %s", id) # Not found exception will be handled at the wsgi level volume = self.volume_api.get(context, id) self.volume_api.delete(context, volume, cascade=cascade) return webob.Response(status_int=http_client.ACCEPTED) def index(self, req): """Returns a summary list of volumes.""" return self._get_volumes(req, is_detail=False) def detail(self, req): """Returns a detailed list of volumes.""" return self._get_volumes(req, is_detail=True) def _get_volumes(self, req, is_detail): """Returns a list of volumes, transformed through view builder.""" context = req.environ['cinder.context'] params = req.params.copy() marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params) filters = params # NOTE(wanghao): Always removing glance_metadata since we support it # only in API version >= VOLUME_LIST_GLANCE_METADATA. filters.pop('glance_metadata', None) api_utils.remove_invalid_filter_options( context, filters, self._get_volume_filter_options()) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in sort_keys: sort_keys[sort_keys.index('name')] = 'display_name' if 'name' in filters: filters['display_name'] = filters.pop('name') self.volume_api.check_volume_filters(filters) volumes = self.volume_api.get_all(context, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, viewable_admin_meta=True, offset=offset) for volume in volumes: api_utils.add_visible_admin_metadata(volume) req.cache_db_volumes(volumes.objects) if is_detail: volumes = self._view_builder.detail_list(req, volumes) else: volumes = self._view_builder.summary_list(req, volumes) return volumes def _image_uuid_from_ref(self, image_ref, context): # If the image ref was generated by nova api, strip image_ref # down to an id. image_uuid = None try: image_uuid = image_ref.split('/').pop() except AttributeError: msg = _("Invalid imageRef provided.") raise exc.HTTPBadRequest(explanation=msg) image_service = glance.get_default_image_service() # First see if this is an actual image ID if uuidutils.is_uuid_like(image_uuid): try: image = image_service.show(context, image_uuid) if 'id' in image: return image['id'] except Exception: # Pass and see if there is a matching image name pass # Could not find by ID, check if it is an image name try: params = {'filters': {'name': image_ref}} images = list(image_service.detail(context, **params)) if len(images) > 1: msg = _("Multiple matches found for '%s', use an ID to be more" " specific.") % image_ref raise exc.HTTPConflict(explanation=msg) for img in images: return img['id'] except exc.HTTPConflict: raise except Exception: # Pass the other exception and let default not found error # handling take care of it pass msg = _("Invalid image identifier or unable to " "access requested image.") raise exc.HTTPBadRequest(explanation=msg) @wsgi.response(http_client.ACCEPTED) @validation.schema(volumes.create, mv.V2_BASE_VERSION) def create(self, req, body): """Creates a new volume.""" LOG.debug('Create volume request body: %s', body) context = req.environ['cinder.context'] # NOTE (pooja_jadhav) To fix bug 1774155, scheduler hints is not # loaded as a standard extension. If user passes # OS-SCH-HNT:scheduler_hints in the request body, then it will be # validated in the create method and this method will add # scheduler_hints in body['volume']. body = scheduler_hints.create(req, body) volume = body['volume'] kwargs = {} self.validate_name_and_description(volume, check_length=False) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in volume: volume['display_name'] = volume.pop('name') # NOTE(thingee): v2 API allows description instead of # display_description if 'description' in volume: volume['display_description'] = volume.pop('description') if 'image_id' in volume: volume['imageRef'] = volume.pop('image_id') req_volume_type = volume.get('volume_type', None) if req_volume_type: # Not found exception will be handled at the wsgi level kwargs['volume_type'] = ( objects.VolumeType.get_by_name_or_id(context, req_volume_type)) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: # Not found exception will be handled at the wsgi level kwargs['snapshot'] = self.volume_api.get_snapshot(context, snapshot_id) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: # Not found exception will be handled at the wsgi level kwargs['source_volume'] = \ self.volume_api.get_volume(context, source_volid) else: kwargs['source_volume'] = None kwargs['group'] = None kwargs['consistencygroup'] = None consistencygroup_id = volume.get('consistencygroup_id') if consistencygroup_id is not None: # Not found exception will be handled at the wsgi level kwargs['group'] = self.group_api.get(context, consistencygroup_id) size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] elif size is None and kwargs['source_volume'] is not None: size = kwargs['source_volume']['size'] LOG.info("Create volume of %s GB", size) image_ref = volume.get('imageRef') if image_ref is not None: image_uuid = self._image_uuid_from_ref(image_ref, context) kwargs['image_id'] = image_uuid kwargs['availability_zone'] = volume.get('availability_zone', None) kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) kwargs['multiattach'] = utils.get_bool_param('multiattach', volume) if kwargs.get('multiattach', False): msg = ("The option 'multiattach' " "is deprecated and will be removed in a future " "release. The default behavior going forward will " "be to specify multiattach enabled volume types.") versionutils.report_deprecated_feature(LOG, msg) try: new_volume = self.volume_api.create( context, size, volume.get('display_name'), volume.get('display_description'), **kwargs) except exception.VolumeTypeDefaultMisconfiguredError as err: raise webob.exc.HTTPInternalServerError(explanation=err.msg) retval = self._view_builder.detail(req, new_volume) return retval def _get_volume_filter_options(self): """Return volume search options allowed by non-admin.""" return common.get_enabled_resource_filters('volume')['volume'] @validation.schema(volumes.update, mv.V2_BASE_VERSION, mv.get_prior_version(mv.SUPPORT_VOLUME_SCHEMA_CHANGES)) @validation.schema(volumes.update_volume_v353, mv.SUPPORT_VOLUME_SCHEMA_CHANGES) def update(self, req, id, body): """Update a volume.""" context = req.environ['cinder.context'] update_dict = body['volume'] self.validate_name_and_description(update_dict, check_length=False) # NOTE(thingee): v2 API allows name instead of display_name if 'name' in update_dict: update_dict['display_name'] = update_dict.pop('name') # NOTE(thingee): v2 API allows description instead of # display_description if 'description' in update_dict: update_dict['display_description'] = update_dict.pop('description') # Not found and Invalid exceptions will be handled at the wsgi level try: volume = self.volume_api.get(context, id, viewable_admin_meta=True) volume_utils.notify_about_volume_usage(context, volume, 'update.start') self.volume_api.update(context, volume, update_dict) except exception.InvalidVolumeMetadataSize as error: raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg) volume.update(update_dict) api_utils.add_visible_admin_metadata(volume) volume_utils.notify_about_volume_usage(context, volume, 'update.end') return self._view_builder.detail(req, volume) def create_resource(ext_mgr): return wsgi.Resource(VolumeController(ext_mgr))
38.235294
79
0.629393
from oslo_config import cfg from oslo_log import log as logging from oslo_log import versionutils from oslo_utils import uuidutils from six.moves import http_client import webob from webob import exc from cinder.api import api_utils from cinder.api import common from cinder.api.contrib import scheduler_hints from cinder.api import microversions as mv from cinder.api.openstack import wsgi from cinder.api.schemas import volumes from cinder.api.v2.views import volumes as volume_views from cinder.api import validation from cinder import exception from cinder import group as group_api from cinder.i18n import _ from cinder.image import glance from cinder import objects from cinder import utils from cinder import volume as cinder_volume from cinder.volume import volume_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) class VolumeController(wsgi.Controller): _view_builder_class = volume_views.ViewBuilder def __init__(self, ext_mgr): self.volume_api = cinder_volume.API() self.group_api = group_api.API() self.ext_mgr = ext_mgr super(VolumeController, self).__init__() def show(self, req, id): context = req.environ['cinder.context'] vol = self.volume_api.get(context, id, viewable_admin_meta=True) req.cache_db_volume(vol) api_utils.add_visible_admin_metadata(vol) return self._view_builder.detail(req, vol) def delete(self, req, id): context = req.environ['cinder.context'] cascade = utils.get_bool_param('cascade', req.params) LOG.info("Delete volume with id: %s", id) volume = self.volume_api.get(context, id) self.volume_api.delete(context, volume, cascade=cascade) return webob.Response(status_int=http_client.ACCEPTED) def index(self, req): return self._get_volumes(req, is_detail=False) def detail(self, req): return self._get_volumes(req, is_detail=True) def _get_volumes(self, req, is_detail): context = req.environ['cinder.context'] params = req.params.copy() marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params) filters = params filters.pop('glance_metadata', None) api_utils.remove_invalid_filter_options( context, filters, self._get_volume_filter_options()) if 'name' in sort_keys: sort_keys[sort_keys.index('name')] = 'display_name' if 'name' in filters: filters['display_name'] = filters.pop('name') self.volume_api.check_volume_filters(filters) volumes = self.volume_api.get_all(context, marker, limit, sort_keys=sort_keys, sort_dirs=sort_dirs, filters=filters, viewable_admin_meta=True, offset=offset) for volume in volumes: api_utils.add_visible_admin_metadata(volume) req.cache_db_volumes(volumes.objects) if is_detail: volumes = self._view_builder.detail_list(req, volumes) else: volumes = self._view_builder.summary_list(req, volumes) return volumes def _image_uuid_from_ref(self, image_ref, context): image_uuid = None try: image_uuid = image_ref.split('/').pop() except AttributeError: msg = _("Invalid imageRef provided.") raise exc.HTTPBadRequest(explanation=msg) image_service = glance.get_default_image_service() if uuidutils.is_uuid_like(image_uuid): try: image = image_service.show(context, image_uuid) if 'id' in image: return image['id'] except Exception: pass try: params = {'filters': {'name': image_ref}} images = list(image_service.detail(context, **params)) if len(images) > 1: msg = _("Multiple matches found for '%s', use an ID to be more" " specific.") % image_ref raise exc.HTTPConflict(explanation=msg) for img in images: return img['id'] except exc.HTTPConflict: raise except Exception: pass msg = _("Invalid image identifier or unable to " "access requested image.") raise exc.HTTPBadRequest(explanation=msg) @wsgi.response(http_client.ACCEPTED) @validation.schema(volumes.create, mv.V2_BASE_VERSION) def create(self, req, body): LOG.debug('Create volume request body: %s', body) context = req.environ['cinder.context'] body = scheduler_hints.create(req, body) volume = body['volume'] kwargs = {} self.validate_name_and_description(volume, check_length=False) if 'name' in volume: volume['display_name'] = volume.pop('name') if 'description' in volume: volume['display_description'] = volume.pop('description') if 'image_id' in volume: volume['imageRef'] = volume.pop('image_id') req_volume_type = volume.get('volume_type', None) if req_volume_type: kwargs['volume_type'] = ( objects.VolumeType.get_by_name_or_id(context, req_volume_type)) kwargs['metadata'] = volume.get('metadata', None) snapshot_id = volume.get('snapshot_id') if snapshot_id is not None: kwargs['snapshot'] = self.volume_api.get_snapshot(context, snapshot_id) else: kwargs['snapshot'] = None source_volid = volume.get('source_volid') if source_volid is not None: kwargs['source_volume'] = \ self.volume_api.get_volume(context, source_volid) else: kwargs['source_volume'] = None kwargs['group'] = None kwargs['consistencygroup'] = None consistencygroup_id = volume.get('consistencygroup_id') if consistencygroup_id is not None: kwargs['group'] = self.group_api.get(context, consistencygroup_id) size = volume.get('size', None) if size is None and kwargs['snapshot'] is not None: size = kwargs['snapshot']['volume_size'] elif size is None and kwargs['source_volume'] is not None: size = kwargs['source_volume']['size'] LOG.info("Create volume of %s GB", size) image_ref = volume.get('imageRef') if image_ref is not None: image_uuid = self._image_uuid_from_ref(image_ref, context) kwargs['image_id'] = image_uuid kwargs['availability_zone'] = volume.get('availability_zone', None) kwargs['scheduler_hints'] = volume.get('scheduler_hints', None) kwargs['multiattach'] = utils.get_bool_param('multiattach', volume) if kwargs.get('multiattach', False): msg = ("The option 'multiattach' " "is deprecated and will be removed in a future " "release. The default behavior going forward will " "be to specify multiattach enabled volume types.") versionutils.report_deprecated_feature(LOG, msg) try: new_volume = self.volume_api.create( context, size, volume.get('display_name'), volume.get('display_description'), **kwargs) except exception.VolumeTypeDefaultMisconfiguredError as err: raise webob.exc.HTTPInternalServerError(explanation=err.msg) retval = self._view_builder.detail(req, new_volume) return retval def _get_volume_filter_options(self): return common.get_enabled_resource_filters('volume')['volume'] @validation.schema(volumes.update, mv.V2_BASE_VERSION, mv.get_prior_version(mv.SUPPORT_VOLUME_SCHEMA_CHANGES)) @validation.schema(volumes.update_volume_v353, mv.SUPPORT_VOLUME_SCHEMA_CHANGES) def update(self, req, id, body): context = req.environ['cinder.context'] update_dict = body['volume'] self.validate_name_and_description(update_dict, check_length=False) if 'name' in update_dict: update_dict['display_name'] = update_dict.pop('name') if 'description' in update_dict: update_dict['display_description'] = update_dict.pop('description') try: volume = self.volume_api.get(context, id, viewable_admin_meta=True) volume_utils.notify_about_volume_usage(context, volume, 'update.start') self.volume_api.update(context, volume, update_dict) except exception.InvalidVolumeMetadataSize as error: raise webob.exc.HTTPRequestEntityTooLarge(explanation=error.msg) volume.update(update_dict) api_utils.add_visible_admin_metadata(volume) volume_utils.notify_about_volume_usage(context, volume, 'update.end') return self._view_builder.detail(req, volume) def create_resource(ext_mgr): return wsgi.Resource(VolumeController(ext_mgr))
true
true
f7084c889dba9a7baf5467baf350f88f62040e86
3,180
py
Python
pyscript/torch/utils.py
takuto0831/Competition-utils
c738e199c6a771a0c58b9cd237660bb76b4be4fb
[ "MIT" ]
null
null
null
pyscript/torch/utils.py
takuto0831/Competition-utils
c738e199c6a771a0c58b9cd237660bb76b4be4fb
[ "MIT" ]
null
null
null
pyscript/torch/utils.py
takuto0831/Competition-utils
c738e199c6a771a0c58b9cd237660bb76b4be4fb
[ "MIT" ]
null
null
null
import os import random import subprocess import numpy as np import torch import time try: import torch_xla import torch_xla.core.xla_model as xm XLA = True except ModuleNotFoundError: XLA = False def freeze_module(module): for i, param in enumerate(module.parameters()): param.requires_grad = False def fit_state_dict(state_dict, model): ''' Ignore size mismatch when loading state_dict ''' for name, param in model.named_parameters(): new_param = state_dict[name] if new_param.size() != param.size(): print(f'Size mismatch in {name}: {new_param.shape} -> {param.shape}') state_dict.pop(name) def get_device(arg): if isinstance(arg, torch.device) or \ (XLA and isinstance(arg, xm.xla_device)): device = arg elif arg is None or isinstance(arg, (list, tuple)): if XLA: device = xm.xla_device() else: device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') elif isinstance(arg, str): if arg == 'xla' and XLA: device = xm.xla_device() else: device = torch.device(arg) if isinstance(arg, (list, tuple)): if isinstance(arg[0], int): device_ids = list(arg) elif isinstance(arg[0], str) and arg[0].isnumeric(): device_ids = [ int(a) for a in arg ] else: raise ValueError(f'Invalid device: {arg}') else: if device.type == 'cuda': assert torch.cuda.is_available() if device.index is None: device_count = torch.cuda.device_count() if device_count > 1: device_ids = list(range(device_count)) else: device_ids = [0] else: device_ids = [device.index] else: device_ids = [device.index] return device, device_ids def seed_everything(random_state=0, deterministic=False): random.seed(random_state) os.environ['PYTHONHASHSEED'] = str(random_state) np.random.seed(random_state) torch.manual_seed(random_state) torch.cuda.manual_seed(random_state) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False else: torch.backends.cudnn.deterministic = False def get_gpu_memory(): """ Code borrowed from: https://discuss.pytorch.org/t/access-gpu-memory-usage-in-pytorch/3192/4 Get the current gpu usage. Returns ------- usage: dict Keys are device ids as integers. Values are memory usage as integers in MB. """ result = subprocess.check_output( [ 'nvidia-smi', '--query-gpu=memory.used', '--format=csv,nounits,noheader' ], encoding='utf-8') # Convert lines into a dictionary gpu_memory = [int(x) for x in result.strip().split('\n')] gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory)) return gpu_memory_map def get_time(time_format='%H:%M:%S'): return time.strftime(time_format, time.localtime())
28.909091
81
0.60283
import os import random import subprocess import numpy as np import torch import time try: import torch_xla import torch_xla.core.xla_model as xm XLA = True except ModuleNotFoundError: XLA = False def freeze_module(module): for i, param in enumerate(module.parameters()): param.requires_grad = False def fit_state_dict(state_dict, model): for name, param in model.named_parameters(): new_param = state_dict[name] if new_param.size() != param.size(): print(f'Size mismatch in {name}: {new_param.shape} -> {param.shape}') state_dict.pop(name) def get_device(arg): if isinstance(arg, torch.device) or \ (XLA and isinstance(arg, xm.xla_device)): device = arg elif arg is None or isinstance(arg, (list, tuple)): if XLA: device = xm.xla_device() else: device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') elif isinstance(arg, str): if arg == 'xla' and XLA: device = xm.xla_device() else: device = torch.device(arg) if isinstance(arg, (list, tuple)): if isinstance(arg[0], int): device_ids = list(arg) elif isinstance(arg[0], str) and arg[0].isnumeric(): device_ids = [ int(a) for a in arg ] else: raise ValueError(f'Invalid device: {arg}') else: if device.type == 'cuda': assert torch.cuda.is_available() if device.index is None: device_count = torch.cuda.device_count() if device_count > 1: device_ids = list(range(device_count)) else: device_ids = [0] else: device_ids = [device.index] else: device_ids = [device.index] return device, device_ids def seed_everything(random_state=0, deterministic=False): random.seed(random_state) os.environ['PYTHONHASHSEED'] = str(random_state) np.random.seed(random_state) torch.manual_seed(random_state) torch.cuda.manual_seed(random_state) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False else: torch.backends.cudnn.deterministic = False def get_gpu_memory(): result = subprocess.check_output( [ 'nvidia-smi', '--query-gpu=memory.used', '--format=csv,nounits,noheader' ], encoding='utf-8') gpu_memory = [int(x) for x in result.strip().split('\n')] gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory)) return gpu_memory_map def get_time(time_format='%H:%M:%S'): return time.strftime(time_format, time.localtime())
true
true
f7084ca79cb058035b4d0b6e1c6f2ddb82b38e82
1,272
py
Python
resources_portal/__init__.py
AlexsLemonade/python-flask-demo
247780aa5ecfd6ce9b876514f96b2048549857d2
[ "BSD-3-Clause" ]
null
null
null
resources_portal/__init__.py
AlexsLemonade/python-flask-demo
247780aa5ecfd6ce9b876514f96b2048549857d2
[ "BSD-3-Clause" ]
null
null
null
resources_portal/__init__.py
AlexsLemonade/python-flask-demo
247780aa5ecfd6ce9b876514f96b2048549857d2
[ "BSD-3-Clause" ]
null
null
null
import os import resources_portal.models # noqa from flask import Flask from flask_migrate import Migrate from flask_restful import Api from resources_portal.db import db from resources_portal.views import user migrate = Migrate() def initialize_routes(api: Api): api.add_resource(user.UsersApi, "/users") api.add_resource(user.UserApi, "/users/<user_id>") def set_database_URI(app: Flask): database_URI_template = "postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}" app.config["SQLALCHEMY_DATABASE_URI"] = database_URI_template.format( DB_USER=app.config["DB_USER"], DB_PASSWORD=app.config["DB_PASSWORD"], DB_HOST=os.environ["DB_HOST"], DB_PORT=app.config["DB_PORT"], DB_NAME=app.config["DB_NAME"], ) def create_app(test_config=None): # create and configure the app app = Flask(__name__) app.config.from_envvar("RESOURCES_PORTAL_CONFIG_FILE") set_database_URI(app) api = Api(app) initialize_routes(api) # ensure the instance folder exists try: os.makedirs(app.instance_path) except OSError: pass db.init_app(app) migrate.init_app(app, db) from resources_portal.schemas import ma ma.init_app(app) return app
24.461538
96
0.706761
import os import resources_portal.models from flask import Flask from flask_migrate import Migrate from flask_restful import Api from resources_portal.db import db from resources_portal.views import user migrate = Migrate() def initialize_routes(api: Api): api.add_resource(user.UsersApi, "/users") api.add_resource(user.UserApi, "/users/<user_id>") def set_database_URI(app: Flask): database_URI_template = "postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}" app.config["SQLALCHEMY_DATABASE_URI"] = database_URI_template.format( DB_USER=app.config["DB_USER"], DB_PASSWORD=app.config["DB_PASSWORD"], DB_HOST=os.environ["DB_HOST"], DB_PORT=app.config["DB_PORT"], DB_NAME=app.config["DB_NAME"], ) def create_app(test_config=None): app = Flask(__name__) app.config.from_envvar("RESOURCES_PORTAL_CONFIG_FILE") set_database_URI(app) api = Api(app) initialize_routes(api) try: os.makedirs(app.instance_path) except OSError: pass db.init_app(app) migrate.init_app(app, db) from resources_portal.schemas import ma ma.init_app(app) return app
true
true
f7084d77def231fe4c7eb0e32eecd4fdc2733e9a
37,936
py
Python
Packs/IntSight/Integrations/IntSight/IntSight.py
AVAILOTECH/x2o-demisto-content
b5d4c182833fd7314592c9984d078b2df170cf77
[ "MIT" ]
null
null
null
Packs/IntSight/Integrations/IntSight/IntSight.py
AVAILOTECH/x2o-demisto-content
b5d4c182833fd7314592c9984d078b2df170cf77
[ "MIT" ]
null
null
null
Packs/IntSight/Integrations/IntSight/IntSight.py
AVAILOTECH/x2o-demisto-content
b5d4c182833fd7314592c9984d078b2df170cf77
[ "MIT" ]
null
null
null
from CommonServerPython import * reload(sys) sys.setdefaultencoding('utf-8') # pylint: disable=E1101 requests.packages.urllib3.disable_warnings() URL = demisto.getParam('server') if URL[-1] != '/': URL += '/' if not demisto.getParam('proxy'): del os.environ['HTTP_PROXY'] del os.environ['HTTPS_PROXY'] del os.environ['http_proxy'] del os.environ['https_proxy'] VALIDATE_CERT = not demisto.params().get('insecure', True) ID_AND_API_KEY = demisto.getParam('credentials')['identifier'] + ':' + demisto.getParam('credentials')['password'] ENCODED_AUTH_KEY = base64.b64encode(ID_AND_API_KEY.encode("utf-8")) MSSP_ACCOUNT_ID = demisto.getParam('mssp_sub_account_id') HEADERS = {'Authorization': 'Basic {}'.format(ENCODED_AUTH_KEY.decode()), 'Content-Type': 'application/json', 'Account-Id': demisto.getParam('credentials')['identifier']} # Change the Account-Id to the sub account id, so all actions will be on the sub account. if MSSP_ACCOUNT_ID: HEADERS['Account-Id'] = MSSP_ACCOUNT_ID IOC_TYPE_TO_DBOT_TYPE = { 'IpAddresses': 'ip', 'Urls': 'url', 'Domains': 'domain', 'Hashes': 'hash' } DEFAULT_TIME_RANGE = '1 day' SEVERITY_LEVEL = { 'All': 0, 'Low': 1, 'Medium': 2, 'High': 3 } def http_request(method, path, json_data=None, params=None, json_response=False): """ Send the request to IntSights and return the JSON response """ try: response = requests.request(method, URL + path, headers=HEADERS, json=json_data, params=params, verify=VALIDATE_CERT) except requests.exceptions.SSLError: raise Exception('Connection error in the API call to IntSights.\nCheck your not secure parameter.') except requests.ConnectionError: raise Exception('Connection error in the API call to IntSights.\nCheck your Server URL parameter.') if response.status_code < 200 or response.status_code > 299: if not (response.text == 'SeverityNotChanged' or response.text == 'TagExist' or response.text == 'IocBlocklistStatusNotChanged'): return_error('Error in API call to IntSights service %s - [%d] %s' % (path, response.status_code, response.text)) if response.status_code == 204: return [] # type: ignore if json_response: try: return response.json() except ValueError: raise Exception('Error in API call to IntSights service - check your configured URL address') return response def convert_iso_string_to_python_date(date_in_iso_format): iso_format = "%Y-%m-%dT%H:%M:%S" date_in_python_format = datetime.strptime(date_in_iso_format, iso_format) return date_in_python_format def convert_python_date_to_unix_millisecond(python_date_object): timestamp_in_unix_millisecond = date_to_timestamp(python_date_object, 'datetime.datetime') return timestamp_in_unix_millisecond def increase_iso_by_x_days(date_in_iso_format, num_of_days): date_in_python_format = convert_iso_string_to_python_date(date_in_iso_format) new_date_in_python_format = date_in_python_format + timedelta(days=int(num_of_days)) new_date_in_iso_format = new_date_in_python_format.isoformat() return new_date_in_iso_format def remove_milliseconds_from_iso(date_in_iso_format): date_parts_arr = date_in_iso_format.split('.') date_in_iso_without_milliseconds = date_parts_arr[0] return date_in_iso_without_milliseconds def increase_timestamp_by_x_days(date_in_unix_ms_timestamp, num_of_days): date_in_iso = timestamp_to_datestring(date_in_unix_ms_timestamp) date_in_iso_without_ms = remove_milliseconds_from_iso(date_in_iso) date_in_iso_plus_x_days = increase_iso_by_x_days(date_in_iso_without_ms, num_of_days) timestamp_in_unix_ms_plus_x_days = date_to_timestamp(date_in_iso_plus_x_days) return timestamp_in_unix_ms_plus_x_days def update_params_with_end_and_start_date(params, oldest_day_to_search_in_unix_timestamp, now_date_in_unix_timestamp): params['foundDateFrom'] = oldest_day_to_search_in_unix_timestamp params['foundDateTo'] = now_date_in_unix_timestamp params['sourceDateFrom'] = oldest_day_to_search_in_unix_timestamp params['sourceDateTo'] = now_date_in_unix_timestamp def update_params_with_delta_arg(params, time_delta_in_days_int): now_date_in_iso = datetime.utcnow().isoformat() now_date_in_iso_without_ms = remove_milliseconds_from_iso(now_date_in_iso) now_date_in_unix_timestamp = date_to_timestamp(now_date_in_iso_without_ms) oldest_day_to_search_in_unix_timestamp = increase_timestamp_by_x_days(now_date_in_unix_timestamp, -1 * time_delta_in_days_int) update_params_with_end_and_start_date(params, oldest_day_to_search_in_unix_timestamp, now_date_in_unix_timestamp) del params['time-delta'] def update_params_dict_according_to_delta_arg(params, time_delta_in_days_int): if 'foundDateFrom' in params or 'foundDateTo' in params: demisto.debug( "ERROR in get_alerts() - can't use found-date-to or found-date-from arguments with time-delta argument") return_error("Error: can't assign delta when assigned both found-date-to or found-date-from") else: update_params_with_delta_arg(params, time_delta_in_days_int) return params def handle_filters(found_date_from=None): """ Apply filters to alert list """ args_camel_case = { 'alert-type': 'alertType', 'source-type': 'sourceType', 'network-type': 'networkType', 'source-date-from': 'sourceDateFrom', 'source-date-to': 'sourceDateTo', 'found-date-from': 'foundDateFrom', 'found-date-to': 'foundDateTo', 'is-flagged': 'isFlagged', 'is-closed': 'isClosed', 'source-ID': 'sourceId', 'first-seen-from': 'firstSeenFrom', 'first-seen-to': 'firstSeenTo', 'last-seen-from': 'lastSeenFrom', 'last-seen-to': 'lastSeenTo', 'value': 'iocValue', } params = {} for key in demisto.args(): if demisto.getArg(key): params[args_camel_case.get(key) or key] = demisto.getArg(key) if demisto.getArg('time-delta'): time_delta_in_days = demisto.getArg('time-delta') update_params_dict_according_to_delta_arg(params, int(time_delta_in_days)) elif found_date_from: params['foundDateFrom'] = found_date_from return params def get_alerts_helper(params): demisto.info("Executing get_alerts with params: {}".format(params)) response = http_request('GET', 'public/v1/data/alerts/alerts-list', params=params, json_response=True) alerts_human_readable = [] alerts_context = [] for alert_id in response: alert_human_readable, alert_context = get_alert_by_id_helper(alert_id) alerts_human_readable.append(alert_human_readable) alerts_context.append(alert_context) return alerts_human_readable, alerts_context def extract_mail(replies): if not replies: return '' mails = [] for reply in replies: mails.append(reply.get('Email')) return '\n'.join(mails) def extract_remediation(remidiations): if not remidiations: return '' remedies = [] string_format = "{0} - Status: {1}" for remedy in remidiations: remedies.append(string_format.format(remedy.get('Value'), remedy.get('Status'))) return '\n'.join(remedies) def hash_identifier(hash_val): if md5Regex.match(hash_val): return 'MD5' if sha1Regex.match(hash_val): return 'SHA1' if sha256Regex.match(hash_val): return 'SHA256' return 'Unknown' def extract_tags(tags): pretty_tags = [] string_format = "ID: {0} - Name: {1}" for tag in tags: pretty_tags.append(string_format.format(tag.get('_id'), tag.get('Name'))) return pretty_tags def get_alerts(): """ Gets all alerts and returns as a list. """ alerts_human_readable, alerts_context = get_alerts_helper(handle_filters()) headers = ['ID', 'Severity', 'Type', 'FoundDate', 'SourceType', 'SourceURL', 'SourceEmail', 'SourceNetworkType', 'IsClosed', 'Closed', 'IsFlagged', 'Images', 'Tags', 'Description', 'Title', 'TakedownStatus', 'SubType'] demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alerts_context}, 'Contents': alerts_context, 'HumanReadable': tableToMarkdown('IntSights Alerts', alerts_human_readable, headers=headers, removeNull=False), 'ContentsFormat': formats['json'] }) def alert_to_readable(alert, parse_tags): """ Convert alert to readable format """ is_closed = demisto.get(alert, 'IsClosed') if is_closed is None: is_closed = demisto.get(alert, 'Closed.IsClosed') readable = { 'ID': demisto.get(alert, '_id'), 'Severity': demisto.get(alert, 'Details.Severity'), 'Type': demisto.get(alert, 'Details.Type'), 'FoundDate': demisto.get(alert, 'FoundDate'), 'SourceType': demisto.get(alert, 'Details.Source.Type'), 'SourceURL': demisto.get(alert, 'Details.Source.URL'), 'SourceEmail': demisto.get(alert, 'Details.Source.Email'), 'SourceNetworkType': demisto.get(alert, 'Details.Source.NetworkType'), 'IsClosed': is_closed, 'IsFlagged': demisto.get(alert, 'IsFlagged'), 'Assets': demisto.get(alert, 'Assets'), 'Images': demisto.get(alert, 'Details.Images'), 'Description': demisto.get(alert, 'Details.Description'), 'Title': demisto.get(alert, 'Details.Title'), 'TakedownStatus': demisto.get(alert, 'TakedownStatus'), 'SubType': demisto.get(alert, 'Details.SubType'), } tags = demisto.get(alert, 'Details.Tags') if parse_tags: readable['Tags'] = extract_tags(tags) else: readable['Tag'] = [] for tag in tags: readable['Tag'].append({'ID': tag.get('_id'), 'Name': tag.get('Name')}) return readable def get_alert_by_id_helper(alert_id): """ Helper for getting details by ID """ response = http_request('GET', 'public/v1/data/alerts/get-complete-alert/' + alert_id, json_response=True) return alert_to_readable(response, True), alert_to_readable(response, False) def get_alert_by_id(): """ Get alert details by id """ alert_id = demisto.getArg('alert-id') activity_hr, activity_ctx = get_alert_by_id_helper(alert_id) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': activity_ctx}, 'Contents': activity_hr, 'HumanReadable': tableToMarkdown('IntSights Alert Details', [activity_hr], ['ID', 'Severity', 'Type', 'FoundDate', 'SourceType', 'SourceURL', 'SourceEmail', 'SourceNetworkType', 'IsClosed', 'IsFlagged', 'Images', 'Tags', 'Description', 'Title', 'TakedownStatus', 'SubType']), 'ContentsFormat': formats['json'] }) def get_alert_image(): """ Retrieves the alert image by image_id """ image_id = demisto.getArg('image-id') response = http_request('GET', 'public/v1/data/alerts/alert-image/' + image_id) demisto.results(fileResult(image_id + '-image.jpeg', response.content)) def ask_analyst(): """ Send question to an analyst about the requested alert """ alert_id = demisto.getArg('alert-id') question = demisto.getArg('question') http_request('POST', 'public/v1/data/alerts/ask-the-analyst/' + alert_id, json_data={'Question': question}) question_details = {'ID': alert_id, 'Question': question} title = 'IntSights Ask the Analyst: ' \ 'Your question has been successfully sent to an analyst about the requested alert' demisto.results( { 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': question_details}, 'Contents': question_details, 'HumanReadable': tableToMarkdown(title, [question_details], ['ID', 'Question']), 'ContentsFormat': formats['json'] } ) def get_alert_activity(): """ Retrieves the alert activity by alert-id """ alert_id = demisto.getArg('alert-id') response = http_request('GET', 'public/v1/data/alerts/activity-log/' + alert_id, json_response=True) alert = {'ID': alert_id, 'Activities': []} if not response: demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alert}, 'Contents': response, 'HumanReadable': 'Alert {} does not have activities.'.format(alert_id), 'ContentsFormat': formats['json'] }) else: human_readable_arr = [] for activity in response: alert['Activities'].append({ 'ID': demisto.get(activity, '_id'), 'Type': demisto.get(activity, 'Type'), 'Initiator': demisto.get(activity, 'Initiator'), 'CreatedDate': demisto.get(activity, 'CreatedDate'), 'UpdateDate': demisto.get(activity, 'UpdateDate'), 'RemediationBlocklistUpdate': demisto.get(activity, 'AdditionalInformation.RemediationBlocklistUpdate'), 'AskTheAnalyst': {'Replies': demisto.get(activity, 'AdditionalInformation.AskTheAnalyst.Replies')}, 'Mail': {'Replies': demisto.get(activity, 'AdditionalInformation.Mail.Replies')}, 'ReadBy': demisto.get(activity, 'ReadBy') }) human_readable_arr.append({ 'ID': demisto.get(activity, '_id'), 'Type': demisto.get(activity, 'Type'), 'Initiator': demisto.get(activity, 'Initiator'), 'CreatedDate': demisto.get(activity, 'CreatedDate'), 'UpdateDate': demisto.get(activity, 'UpdateDate'), 'RemediationBlocklistUpdate': extract_remediation( demisto.get(activity, 'AdditionalInformation.RemediationBlocklistUpdate')) if demisto.get(activity, 'AdditionalInformation') else '', 'AskTheAnalyst': {'Replies': demisto.get(activity, 'AdditionalInformation.AskTheAnalyst.Replies')}, 'Mail': extract_mail( demisto.get(activity, 'AdditionalInformation.Mail.Replies')) if demisto.get(activity, 'AdditionalInformation.Mail') else '', 'ReadBy': demisto.get(activity, 'ReadBy') }) headers = ['ID', 'Type', 'Initiator', 'CreatedDate', 'UpdateDate', 'RemediationBlocklistUpdate', 'AskTheAnalyst', 'Mail', 'ReadBy'] human_readable = tableToMarkdown('IntSights Alert {} Activity Log'.format(alert_id), t=human_readable_arr, headers=headers), demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alert}, 'Contents': response, 'HumanReadable': human_readable, 'ContentsFormat': formats['json'] }) def change_severity(): """ Change severity of an alert """ alert_id = demisto.getArg('alert-id') severity = demisto.getArg('severity') http_request('PATCH', 'public/v1/data/alerts/change-severity/' + alert_id, json_data={'Severity': severity}) severity_details = {'ID': alert_id, 'Severity': severity} demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': severity_details}, 'Contents': severity_details, 'HumanReadable': tableToMarkdown( 'IntSights Update Alert Severity: The Alert severity has been successfully updated.', [severity_details], ['ID', 'Severity']), 'ContentsFormat': formats['json'] }) def get_assignee_id(assignee_email): response = http_request('GET', 'public/v1/account/users-details', json_response=True) for user in response: if assignee_email == user.get('Email', ''): return user.get('_id') raise Exception('user not found') def assign_alert(): """ Assign alert to an Assignee ID """ alert_id = demisto.getArg('alert-id') assignee_email = demisto.getArg('assignee-email') is_mssp = demisto.getArg('is-mssp-optional') assignee_id = get_assignee_id(assignee_email) assign_details = {'ID': alert_id, 'Assignees.AssigneeID': assignee_id} url = 'public/v1/data/alerts/assign-alert/' + alert_id if is_mssp: url += '?IsMssp=' + is_mssp http_request('PATCH', url, json_data={'AssigneeID': assignee_id}) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': assign_details}, 'Contents': assign_details, 'HumanReadable': tableToMarkdown( 'IntSights Assign Alert: The Alert has been successfully assigned to assigneeID', [assign_details], ['ID', 'Assignees.AssigneeID']), 'ContentsFormat': formats['json'] }) def unassign_alert(): """ Unassign an alert """ alert_id = demisto.getArg('alert-id') http_request('PATCH', 'public/v1/data/alerts/unassign-alert/' + alert_id) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id}}, 'Contents': {'ID': alert_id}, 'HumanReadable': 'Alert id: ' + alert_id + ' successfully unassigned', 'ContentsFormat': formats['json'] }) def close_alert(): """ Close an alert """ alert_id = demisto.getArg('alert-id') reason = demisto.getArg('reason') free_text = demisto.getArg('free-text') is_hidden = demisto.getArg('is-hidden') == 'True' rate = demisto.getArg('rate') close_details = {'ID': alert_id, 'Close Reason': reason, 'Closed FreeText': free_text, 'Closed Rate': rate, 'IsHidden': is_hidden} close_details_context = {'ID': alert_id, 'Closed': {'Reason': reason, 'FreeText': free_text, 'Rate': rate}, 'IsHidden': is_hidden} url = 'public/v1/data/alerts/close-alert/' + alert_id json_data = {'Reason': reason} if free_text: json_data['FreeText'] = free_text if is_hidden: json_data['IsHidden'] = is_hidden if rate: json_data['Rate'] = rate http_request('PATCH', url, json_data) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': close_details}, 'Contents': close_details_context, 'HumanReadable': tableToMarkdown('IntSights Close Alert: The Alert has successfully been closed.', [close_details], ['ID', 'Close Reason', 'Closed FreeText', 'Closed Rate', 'IsHidden']), 'ContentsFormat': formats['json'] }) def send_mail(): """ Send email with the alert details and a question """ alert_id = demisto.getArg('alert-id') emails = argToList(demisto.getArg('emails')) content = demisto.getArg('content') http_request('POST', 'public/v1/data/alerts/send-mail/' + alert_id, {'Emails': emails, 'Content': content}) context = { 'ID': alert_id, 'EmailID': emails, 'Question': content } demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context}, 'Contents': context, 'HumanReadable': 'Email with content (' + content + ') sent to emails', 'ContentsFormat': formats['json'] }) def get_tag_id(alert_id, tag_name): response = http_request('GET', 'public/v1/data/alerts/get-complete-alert/' + alert_id, json_response=True) details = response.get('Details', {}) tags = details.get('Tags', []) for tag in tags: if tag.get('Name', '') == tag_name: return tag.get('_id', '') return 'Not found' def add_tag(): """ Adds a tag to the alert """ alert_id = demisto.getArg('alert-id') tag_name = demisto.getArg('tag-name') http_request('PATCH', 'public/v1/data/alerts/add-tag/' + alert_id, json_data={'TagName': tag_name}) tag_info = { 'TagName': tag_name, 'ID': get_tag_id(alert_id, tag_name) } context = { 'ID': alert_id, 'Tags': tag_info } demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context}, 'Contents': context, 'HumanReadable': 'Tag (' + tag_name + ') added to alert id: ' + alert_id, 'ContentsFormat': formats['json'] }) def remove_tag(): """ Removes a tag from an alert """ alert_id = demisto.getArg('alert-id') tag_id = demisto.getArg('tag-id') http_request('PATCH', 'public/v1/data/alerts/remove-tag/' + alert_id, json_data={'TagID': tag_id}) context = { 'ID': alert_id, 'Tags': {'ID': tag_id} } demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context}, 'Contents': context, 'HumanReadable': 'Tag id: ' + tag_id + ' removed from alert id: ' + alert_id, 'ContentsFormat': formats['json'] }) def add_comment(): """ Adds a comment to an alert """ alert_id = demisto.getArg('alert-id') comment = demisto.getArg('comment') http_request('PATCH', 'public/v1/data/alerts/add-comment/' + alert_id, json_data={'Comment': comment}) context = { 'ID': alert_id, 'Comment': comment } demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context}, 'Contents': context, 'HumanReadable': 'Succesfully added comment "' + comment + '" to alert id: ' + alert_id, 'ContentsFormat': formats['json'] }) def ioc_to_readable(ioc_data): """ Convert IOC to readable format """ ioc_context = { 'ID': demisto.get(ioc_data, '_id'), 'SourceID': demisto.get(ioc_data, 'SourceID'), 'AccountID': demisto.get(ioc_data, 'AccountID'), 'Type': demisto.get(ioc_data, 'Type'), 'Value': demisto.get(ioc_data, 'Value'), 'FirstSeen': demisto.get(ioc_data, 'FirstSeen'), 'LastSeen': demisto.get(ioc_data, 'LastSeen'), 'Domain': demisto.get(ioc_data, 'Domain'), 'Status': demisto.get(ioc_data, 'Status'), 'Severity': demisto.get(ioc_data, 'Severity'), 'SourceName': demisto.get(ioc_data, 'Source.Name'), 'SourceConfidence': demisto.get(ioc_data, 'Source.Confidence'), 'Flags': {'IsInAlexa': demisto.get(ioc_data, 'Flags.IsInAlexa')}, 'Enrichment': { 'Status': demisto.get(ioc_data, 'Enrichment.Status'), 'Data': demisto.get(ioc_data, 'Enrichment.Data'), 'Date': demisto.get(ioc_data, 'Enrichment.Data') # Backwards compatibility issue } } ioc_readable = { 'ID': demisto.get(ioc_data, '_id'), 'SourceID': demisto.get(ioc_data, 'SourceID'), 'AccountID': demisto.get(ioc_data, 'AccountID'), 'Type': demisto.get(ioc_data, 'Type'), 'Value': demisto.get(ioc_data, 'Value'), 'FirstSeen': demisto.get(ioc_data, 'FirstSeen'), 'LastSeen': demisto.get(ioc_data, 'LastSeen'), 'Domain': demisto.get(ioc_data, 'Domain'), 'Status': demisto.get(ioc_data, 'Status'), 'Severity': demisto.get(ioc_data, 'Severity').get('Value'), 'SourceName': demisto.get(ioc_data, 'Source.Name'), 'SourceConfidence': demisto.get(ioc_data, 'Source.Confidence'), 'IsInAlexa': demisto.get(ioc_data, 'Flags.IsInAlexa'), 'Enrichment Status': demisto.get(ioc_data, 'Enrichment.Status'), 'Enrichment Data': demisto.get(ioc_data, 'Enrichment.Data') } dbot_score = { 'Indicator': ioc_context['Value'], 'Type': IOC_TYPE_TO_DBOT_TYPE[ioc_context['Type']], 'Vendor': 'IntSights', 'Score': translate_severity(ioc_readable['Severity']) } malicious_dict = { 'Vendor': 'IntSights', 'Description': 'IntSights severity level is High' } domain = {} if ioc_context['Domain']: domain['Name'] = ioc_context['Domain'] if translate_severity(ioc_readable['Severity']) == 3: domain['Malicious'] = malicious_dict ip_info = {} if ioc_context['Type'] == 'IpAddresses': ip_info['Address'] = ioc_context['Value'] if translate_severity(ioc_readable['Severity']) == 3: ip_info['Malicious'] = malicious_dict url_info = {} if ioc_context['Type'] == 'Urls': url_info['Data'] = ioc_context['Value'] if translate_severity(ioc_readable['Severity']) == 3: url_info['Malicious'] = malicious_dict hash_info = {} if ioc_context['Type'] == 'Hashes': hash_info['Name'] = ioc_context['Value'] hash_info[hash_identifier(ioc_context['Value'])] = ioc_context['Value'] if translate_severity(ioc_readable['Severity']) == 3: hash_info['Malicious'] = malicious_dict return ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info def search_for_ioc(): """ Search for IOC by value """ response = http_request('GET', 'public/v1/iocs/ioc-by-value', params=handle_filters(), json_response=True) if response: ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info = ioc_to_readable(response) demisto.results( { 'Type': entryTypes['note'], 'EntryContext': { 'IntSights.Iocs(val.ID === obj.ID)': ioc_context, 'DBotScore': dbot_score, 'Domain': domain, 'IP': ip_info, 'URL': url_info, 'File': hash_info }, 'Contents': response, 'HumanReadable': tableToMarkdown('IOC Information', [ioc_readable], ['ID', 'SourceID', 'AccountID', 'Type', 'Value', 'FirstSeen', 'LastSeen', 'Domain', 'Status', 'Severity', 'SourceName', 'SourceConfidence', 'IsInAlexa', 'Enrichment Status', 'Enrichment Data']), 'ContentsFormat': formats['json'] } ) else: results_for_no_content('IOC Information') def results_for_no_content(cmd_name): demisto.results( { 'Type': entryTypes['note'], 'EntryContext': {'IntSights': {}}, 'Contents': {}, 'HumanReadable': '### {} \n\n Could not get any results.'.format(cmd_name), 'ContentsFormat': formats['json'] } ) def translate_severity(sev): """ Translate alert severity to demisto """ if sev in ['Medium', 'High']: return 3 if sev == 'Low': return 2 return 0 def fetch_incidents(): """ Fetch incidents for Demisto """ last_run = demisto.getLastRun() demisto.info("IntSight fetch last run time is: {}".format(str(last_run))) if not last_run or 'time' not in last_run: fetch_delta, _ = parse_date_range(demisto.params().get('fetch_delta', DEFAULT_TIME_RANGE), to_timestamp=True) else: fetch_delta = last_run.get('time') current_fetch = fetch_delta alert_type = demisto.getParam('type') min_severity_level = demisto.params().get('severity_level', 'All') if min_severity_level not in SEVERITY_LEVEL: raise Exception("Minimum Alert severity level to fetch incidents incidents from, allowed values are: All," " Low, Medium, High. (Setting to All will fetch all incidents)") _, alerts_context = get_alerts_helper(handle_filters(fetch_delta)) incidents = [] for alert in alerts_context: if SEVERITY_LEVEL[min_severity_level] <= SEVERITY_LEVEL[alert.get('Severity', 'Low')]: if not alert_type or alert_type.lower() == alert.get('Type', '').lower(): incidents.append({ 'name': '{type} - {id}'.format(type=alert.get('Type', 'Type not found'), id=alert.get('ID')), 'occurred': alert.get('FoundDate'), 'severity': translate_severity(alert.get('Severity')), 'rawJSON': json.dumps(alert) }) alert_timestamp = date_to_timestamp(alert.get('FoundDate'), date_format='%Y-%m-%dT%H:%M:%S.%fZ') if alert_timestamp > current_fetch: current_fetch = alert_timestamp demisto.incidents(incidents) demisto.setLastRun({'time': current_fetch + 1000}) def get_iocs(): """ Gets all IOCs with the given filters """ response = http_request('GET', 'public/v1/iocs/complete-iocs-list', params=handle_filters(), json_response=True) domains = [] ip_infos = [] url_infos = [] hash_infos = [] dbot_scores = [] iocs_context = [] iocs_readable = [] for indicator in response: ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info = ioc_to_readable(indicator) iocs_context.append(ioc_context) iocs_readable.append(ioc_readable) dbot_scores.append(dbot_score) domains.append(domain) ip_infos.append(ip_info) url_infos.append(url_info) hash_infos.append(hash_info) headers = ['ID', 'SourceID', 'AccountID', 'Type', 'Value', 'FirstSeen', 'LastSeen', 'Domain', 'Status', 'Severity', 'SourceName', 'SourceConfidence', 'IsInAlexa', 'Enrichment Status', 'Enrichment Data'] demisto.results( { 'Type': entryTypes['note'], 'EntryContext': { 'IntSights.Iocs': iocs_context, 'DBotScore': dbot_scores, 'Domain': domains, 'IP': ip_infos, 'URL': url_infos, 'File': hash_infos }, 'Contents': response, 'HumanReadable': tableToMarkdown('IOC Information', t=iocs_readable, headers=headers), 'ContentsFormat': formats['json'] } ) def takedown_request(): """ Request alert takedown """ alert_id = demisto.getArg('alert-id') http_request('PATCH', 'public/v1/data/alerts/takedown-request/' + alert_id) context = { 'ID': alert_id, } human_readable = '### IntSights Alert Takedown\n' \ 'The Alert Takedown request has been sent successfully for {}'.format(str(alert_id)) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context}, 'Contents': context, 'HumanReadable': human_readable, 'ContentsFormat': formats['json'] }) def get_alert_takedown_status(): """ Get an alert's takedown status """ alert_id = demisto.getArg('alert-id') response = http_request('GET', 'public/v1/data/alerts/takedown-status/' + alert_id) context = { 'ID': alert_id, 'TakedownStatus': response.text } demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context}, 'Contents': context, 'HumanReadable': tableToMarkdown('IntSights Alert Takedown Status', [context], ['ID', 'TakedownStatus']), 'ContentsFormat': formats['json'] }) def update_ioc_blocklist_status(): alert_id = demisto.getArg('alert-id') types = argToList(demisto.getArg('type')) values = argToList(demisto.getArg('value')) statuses = argToList(demisto.getArg('blocklist-status')) if len(types) != len(values) or len(types) != len(statuses): return_error('The lists must be of equal length. For each IOC, provide an entry in each list.') data = [] for count, type_ in enumerate(types): data.append({ 'Type': type_, 'Value': values[count], 'BlocklistStatus': statuses[count] }) http_request('PATCH', 'public/v1/data/alerts/change-iocs-blocklist-status/' + alert_id, json_data={'Iocs': data}) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id, 'Status': statuses}}, 'Contents': {'ID': alert_id, 'Status': statuses}, 'HumanReadable': tableToMarkdown('IntSights Update IOC BlockList Status for ' + alert_id, data, ['BlocklistStatus']), 'ContentsFormat': formats['json'] }) def get_ioc_blocklist_status(): alert_id = demisto.getArg('alert-id') response = http_request('GET', 'public/v1/data/alerts/blocklist-status/' + alert_id, json_response=True) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': { 'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id, 'Status': [ioc.get('Status') for ioc in response]}}, 'Contents': response, 'HumanReadable': tableToMarkdown('IntSights Blocklist Status for ' + alert_id, response, ['Status']), 'ContentsFormat': formats['json'] }) def get_mssp_sub_accounts(): account_id = demisto.getParam('credentials')['identifier'] accounts = http_request('GET', 'public/v1/mssp/customers', json_response=True) if not accounts: return_error("intsights-mssp-get-sub-accounts failed to return data.") # Fix accounts _id keys for account in accounts: account["ID"] = account["_id"] del account["_id"] if len(accounts) < 1: return_error('Current MSSP Account has no sub accounts.') account_ids = [i["ID"] for i in accounts] if MSSP_ACCOUNT_ID not in account_ids: demisto.log("[DEBUG] - MSSP sub accounts:" + str(accounts)) return_error('Entered sub account id ({}) is not part of this mssp account'.format(MSSP_ACCOUNT_ID)) for i, account in enumerate(account_ids): # Call account HEADERS['Account-Id'] = account account_ua = http_request('GET', 'public/v1/account/used-assets', json_response=True) if not account_ua: continue accounts[i].update(account_ua) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.MsspAccount(val.ID === obj.ID)': accounts}, 'HumanReadable': tableToMarkdown('IntSights MSSP accounts used assets ' + account_id, accounts, ["ID", 'CompanyName', "Status", "AssetsLimit", "AssetsCount"]), 'Contents': accounts, 'ContentsFormat': formats['json'] }) # Restore the header HEADERS['Account-Id'] = MSSP_ACCOUNT_ID def test_module(): http_request('GET', 'public/v1/api/version') if demisto.params().get('isFetch'): min_severity_level = demisto.params().get('severity_level', 'All') if min_severity_level not in SEVERITY_LEVEL: return_error("Minimum Alert severity level to fetch incidents incidents from, allowed values are: " "All, Low, Medium, High. (Setting to All will fetch all incidents)") demisto.results('ok') try: if demisto.command() == 'test-module': test_module() elif demisto.command() == 'fetch-incidents': fetch_incidents() elif demisto.command() == 'intsights-mssp-get-sub-accounts': get_mssp_sub_accounts() elif demisto.command() == 'intsights-get-alerts': get_alerts() elif demisto.command() == 'intsights-get-alert-image': get_alert_image() elif demisto.command() == 'intsights-get-alert-activities': get_alert_activity() elif demisto.command() == 'intsights-assign-alert': assign_alert() elif demisto.command() == 'intsights-unassign-alert': unassign_alert() elif demisto.command() == 'intsights-send-mail': send_mail() elif demisto.command() == 'intsights-ask-the-analyst': ask_analyst() elif demisto.command() == 'intsights-add-tag-to-alert': add_tag() elif demisto.command() == 'intsights-remove-tag-from-alert': remove_tag() elif demisto.command() == 'intsights-add-comment-to-alert': add_comment() elif demisto.command() == 'intsights-update-alert-severity': change_severity() elif demisto.command() == 'intsights-get-alert-by-id': get_alert_by_id() elif demisto.command() == 'intsights-get-ioc-by-value': search_for_ioc() elif demisto.command() == 'intsights-get-iocs': get_iocs() elif demisto.command() == 'intsights-alert-takedown-request': takedown_request() elif demisto.command() == 'intsights-get-alert-takedown-status': get_alert_takedown_status() elif demisto.command() == 'intsights-get-ioc-blocklist-status': get_ioc_blocklist_status() elif demisto.command() == 'intsights-update-ioc-blocklist-status': update_ioc_blocklist_status() elif demisto.command() == 'intsights-close-alert': close_alert() else: raise Exception('Unrecognized command: ' + demisto.command()) except Exception as err: return_error(str(err))
38.319192
120
0.62297
from CommonServerPython import * reload(sys) sys.setdefaultencoding('utf-8') requests.packages.urllib3.disable_warnings() URL = demisto.getParam('server') if URL[-1] != '/': URL += '/' if not demisto.getParam('proxy'): del os.environ['HTTP_PROXY'] del os.environ['HTTPS_PROXY'] del os.environ['http_proxy'] del os.environ['https_proxy'] VALIDATE_CERT = not demisto.params().get('insecure', True) ID_AND_API_KEY = demisto.getParam('credentials')['identifier'] + ':' + demisto.getParam('credentials')['password'] ENCODED_AUTH_KEY = base64.b64encode(ID_AND_API_KEY.encode("utf-8")) MSSP_ACCOUNT_ID = demisto.getParam('mssp_sub_account_id') HEADERS = {'Authorization': 'Basic {}'.format(ENCODED_AUTH_KEY.decode()), 'Content-Type': 'application/json', 'Account-Id': demisto.getParam('credentials')['identifier']} if MSSP_ACCOUNT_ID: HEADERS['Account-Id'] = MSSP_ACCOUNT_ID IOC_TYPE_TO_DBOT_TYPE = { 'IpAddresses': 'ip', 'Urls': 'url', 'Domains': 'domain', 'Hashes': 'hash' } DEFAULT_TIME_RANGE = '1 day' SEVERITY_LEVEL = { 'All': 0, 'Low': 1, 'Medium': 2, 'High': 3 } def http_request(method, path, json_data=None, params=None, json_response=False): try: response = requests.request(method, URL + path, headers=HEADERS, json=json_data, params=params, verify=VALIDATE_CERT) except requests.exceptions.SSLError: raise Exception('Connection error in the API call to IntSights.\nCheck your not secure parameter.') except requests.ConnectionError: raise Exception('Connection error in the API call to IntSights.\nCheck your Server URL parameter.') if response.status_code < 200 or response.status_code > 299: if not (response.text == 'SeverityNotChanged' or response.text == 'TagExist' or response.text == 'IocBlocklistStatusNotChanged'): return_error('Error in API call to IntSights service %s - [%d] %s' % (path, response.status_code, response.text)) if response.status_code == 204: return [] if json_response: try: return response.json() except ValueError: raise Exception('Error in API call to IntSights service - check your configured URL address') return response def convert_iso_string_to_python_date(date_in_iso_format): iso_format = "%Y-%m-%dT%H:%M:%S" date_in_python_format = datetime.strptime(date_in_iso_format, iso_format) return date_in_python_format def convert_python_date_to_unix_millisecond(python_date_object): timestamp_in_unix_millisecond = date_to_timestamp(python_date_object, 'datetime.datetime') return timestamp_in_unix_millisecond def increase_iso_by_x_days(date_in_iso_format, num_of_days): date_in_python_format = convert_iso_string_to_python_date(date_in_iso_format) new_date_in_python_format = date_in_python_format + timedelta(days=int(num_of_days)) new_date_in_iso_format = new_date_in_python_format.isoformat() return new_date_in_iso_format def remove_milliseconds_from_iso(date_in_iso_format): date_parts_arr = date_in_iso_format.split('.') date_in_iso_without_milliseconds = date_parts_arr[0] return date_in_iso_without_milliseconds def increase_timestamp_by_x_days(date_in_unix_ms_timestamp, num_of_days): date_in_iso = timestamp_to_datestring(date_in_unix_ms_timestamp) date_in_iso_without_ms = remove_milliseconds_from_iso(date_in_iso) date_in_iso_plus_x_days = increase_iso_by_x_days(date_in_iso_without_ms, num_of_days) timestamp_in_unix_ms_plus_x_days = date_to_timestamp(date_in_iso_plus_x_days) return timestamp_in_unix_ms_plus_x_days def update_params_with_end_and_start_date(params, oldest_day_to_search_in_unix_timestamp, now_date_in_unix_timestamp): params['foundDateFrom'] = oldest_day_to_search_in_unix_timestamp params['foundDateTo'] = now_date_in_unix_timestamp params['sourceDateFrom'] = oldest_day_to_search_in_unix_timestamp params['sourceDateTo'] = now_date_in_unix_timestamp def update_params_with_delta_arg(params, time_delta_in_days_int): now_date_in_iso = datetime.utcnow().isoformat() now_date_in_iso_without_ms = remove_milliseconds_from_iso(now_date_in_iso) now_date_in_unix_timestamp = date_to_timestamp(now_date_in_iso_without_ms) oldest_day_to_search_in_unix_timestamp = increase_timestamp_by_x_days(now_date_in_unix_timestamp, -1 * time_delta_in_days_int) update_params_with_end_and_start_date(params, oldest_day_to_search_in_unix_timestamp, now_date_in_unix_timestamp) del params['time-delta'] def update_params_dict_according_to_delta_arg(params, time_delta_in_days_int): if 'foundDateFrom' in params or 'foundDateTo' in params: demisto.debug( "ERROR in get_alerts() - can't use found-date-to or found-date-from arguments with time-delta argument") return_error("Error: can't assign delta when assigned both found-date-to or found-date-from") else: update_params_with_delta_arg(params, time_delta_in_days_int) return params def handle_filters(found_date_from=None): args_camel_case = { 'alert-type': 'alertType', 'source-type': 'sourceType', 'network-type': 'networkType', 'source-date-from': 'sourceDateFrom', 'source-date-to': 'sourceDateTo', 'found-date-from': 'foundDateFrom', 'found-date-to': 'foundDateTo', 'is-flagged': 'isFlagged', 'is-closed': 'isClosed', 'source-ID': 'sourceId', 'first-seen-from': 'firstSeenFrom', 'first-seen-to': 'firstSeenTo', 'last-seen-from': 'lastSeenFrom', 'last-seen-to': 'lastSeenTo', 'value': 'iocValue', } params = {} for key in demisto.args(): if demisto.getArg(key): params[args_camel_case.get(key) or key] = demisto.getArg(key) if demisto.getArg('time-delta'): time_delta_in_days = demisto.getArg('time-delta') update_params_dict_according_to_delta_arg(params, int(time_delta_in_days)) elif found_date_from: params['foundDateFrom'] = found_date_from return params def get_alerts_helper(params): demisto.info("Executing get_alerts with params: {}".format(params)) response = http_request('GET', 'public/v1/data/alerts/alerts-list', params=params, json_response=True) alerts_human_readable = [] alerts_context = [] for alert_id in response: alert_human_readable, alert_context = get_alert_by_id_helper(alert_id) alerts_human_readable.append(alert_human_readable) alerts_context.append(alert_context) return alerts_human_readable, alerts_context def extract_mail(replies): if not replies: return '' mails = [] for reply in replies: mails.append(reply.get('Email')) return '\n'.join(mails) def extract_remediation(remidiations): if not remidiations: return '' remedies = [] string_format = "{0} - Status: {1}" for remedy in remidiations: remedies.append(string_format.format(remedy.get('Value'), remedy.get('Status'))) return '\n'.join(remedies) def hash_identifier(hash_val): if md5Regex.match(hash_val): return 'MD5' if sha1Regex.match(hash_val): return 'SHA1' if sha256Regex.match(hash_val): return 'SHA256' return 'Unknown' def extract_tags(tags): pretty_tags = [] string_format = "ID: {0} - Name: {1}" for tag in tags: pretty_tags.append(string_format.format(tag.get('_id'), tag.get('Name'))) return pretty_tags def get_alerts(): alerts_human_readable, alerts_context = get_alerts_helper(handle_filters()) headers = ['ID', 'Severity', 'Type', 'FoundDate', 'SourceType', 'SourceURL', 'SourceEmail', 'SourceNetworkType', 'IsClosed', 'Closed', 'IsFlagged', 'Images', 'Tags', 'Description', 'Title', 'TakedownStatus', 'SubType'] demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alerts_context}, 'Contents': alerts_context, 'HumanReadable': tableToMarkdown('IntSights Alerts', alerts_human_readable, headers=headers, removeNull=False), 'ContentsFormat': formats['json'] }) def alert_to_readable(alert, parse_tags): is_closed = demisto.get(alert, 'IsClosed') if is_closed is None: is_closed = demisto.get(alert, 'Closed.IsClosed') readable = { 'ID': demisto.get(alert, '_id'), 'Severity': demisto.get(alert, 'Details.Severity'), 'Type': demisto.get(alert, 'Details.Type'), 'FoundDate': demisto.get(alert, 'FoundDate'), 'SourceType': demisto.get(alert, 'Details.Source.Type'), 'SourceURL': demisto.get(alert, 'Details.Source.URL'), 'SourceEmail': demisto.get(alert, 'Details.Source.Email'), 'SourceNetworkType': demisto.get(alert, 'Details.Source.NetworkType'), 'IsClosed': is_closed, 'IsFlagged': demisto.get(alert, 'IsFlagged'), 'Assets': demisto.get(alert, 'Assets'), 'Images': demisto.get(alert, 'Details.Images'), 'Description': demisto.get(alert, 'Details.Description'), 'Title': demisto.get(alert, 'Details.Title'), 'TakedownStatus': demisto.get(alert, 'TakedownStatus'), 'SubType': demisto.get(alert, 'Details.SubType'), } tags = demisto.get(alert, 'Details.Tags') if parse_tags: readable['Tags'] = extract_tags(tags) else: readable['Tag'] = [] for tag in tags: readable['Tag'].append({'ID': tag.get('_id'), 'Name': tag.get('Name')}) return readable def get_alert_by_id_helper(alert_id): response = http_request('GET', 'public/v1/data/alerts/get-complete-alert/' + alert_id, json_response=True) return alert_to_readable(response, True), alert_to_readable(response, False) def get_alert_by_id(): alert_id = demisto.getArg('alert-id') activity_hr, activity_ctx = get_alert_by_id_helper(alert_id) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': activity_ctx}, 'Contents': activity_hr, 'HumanReadable': tableToMarkdown('IntSights Alert Details', [activity_hr], ['ID', 'Severity', 'Type', 'FoundDate', 'SourceType', 'SourceURL', 'SourceEmail', 'SourceNetworkType', 'IsClosed', 'IsFlagged', 'Images', 'Tags', 'Description', 'Title', 'TakedownStatus', 'SubType']), 'ContentsFormat': formats['json'] }) def get_alert_image(): image_id = demisto.getArg('image-id') response = http_request('GET', 'public/v1/data/alerts/alert-image/' + image_id) demisto.results(fileResult(image_id + '-image.jpeg', response.content)) def ask_analyst(): alert_id = demisto.getArg('alert-id') question = demisto.getArg('question') http_request('POST', 'public/v1/data/alerts/ask-the-analyst/' + alert_id, json_data={'Question': question}) question_details = {'ID': alert_id, 'Question': question} title = 'IntSights Ask the Analyst: ' \ 'Your question has been successfully sent to an analyst about the requested alert' demisto.results( { 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': question_details}, 'Contents': question_details, 'HumanReadable': tableToMarkdown(title, [question_details], ['ID', 'Question']), 'ContentsFormat': formats['json'] } ) def get_alert_activity(): alert_id = demisto.getArg('alert-id') response = http_request('GET', 'public/v1/data/alerts/activity-log/' + alert_id, json_response=True) alert = {'ID': alert_id, 'Activities': []} if not response: demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alert}, 'Contents': response, 'HumanReadable': 'Alert {} does not have activities.'.format(alert_id), 'ContentsFormat': formats['json'] }) else: human_readable_arr = [] for activity in response: alert['Activities'].append({ 'ID': demisto.get(activity, '_id'), 'Type': demisto.get(activity, 'Type'), 'Initiator': demisto.get(activity, 'Initiator'), 'CreatedDate': demisto.get(activity, 'CreatedDate'), 'UpdateDate': demisto.get(activity, 'UpdateDate'), 'RemediationBlocklistUpdate': demisto.get(activity, 'AdditionalInformation.RemediationBlocklistUpdate'), 'AskTheAnalyst': {'Replies': demisto.get(activity, 'AdditionalInformation.AskTheAnalyst.Replies')}, 'Mail': {'Replies': demisto.get(activity, 'AdditionalInformation.Mail.Replies')}, 'ReadBy': demisto.get(activity, 'ReadBy') }) human_readable_arr.append({ 'ID': demisto.get(activity, '_id'), 'Type': demisto.get(activity, 'Type'), 'Initiator': demisto.get(activity, 'Initiator'), 'CreatedDate': demisto.get(activity, 'CreatedDate'), 'UpdateDate': demisto.get(activity, 'UpdateDate'), 'RemediationBlocklistUpdate': extract_remediation( demisto.get(activity, 'AdditionalInformation.RemediationBlocklistUpdate')) if demisto.get(activity, 'AdditionalInformation') else '', 'AskTheAnalyst': {'Replies': demisto.get(activity, 'AdditionalInformation.AskTheAnalyst.Replies')}, 'Mail': extract_mail( demisto.get(activity, 'AdditionalInformation.Mail.Replies')) if demisto.get(activity, 'AdditionalInformation.Mail') else '', 'ReadBy': demisto.get(activity, 'ReadBy') }) headers = ['ID', 'Type', 'Initiator', 'CreatedDate', 'UpdateDate', 'RemediationBlocklistUpdate', 'AskTheAnalyst', 'Mail', 'ReadBy'] human_readable = tableToMarkdown('IntSights Alert {} Activity Log'.format(alert_id), t=human_readable_arr, headers=headers), demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': alert}, 'Contents': response, 'HumanReadable': human_readable, 'ContentsFormat': formats['json'] }) def change_severity(): alert_id = demisto.getArg('alert-id') severity = demisto.getArg('severity') http_request('PATCH', 'public/v1/data/alerts/change-severity/' + alert_id, json_data={'Severity': severity}) severity_details = {'ID': alert_id, 'Severity': severity} demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': severity_details}, 'Contents': severity_details, 'HumanReadable': tableToMarkdown( 'IntSights Update Alert Severity: The Alert severity has been successfully updated.', [severity_details], ['ID', 'Severity']), 'ContentsFormat': formats['json'] }) def get_assignee_id(assignee_email): response = http_request('GET', 'public/v1/account/users-details', json_response=True) for user in response: if assignee_email == user.get('Email', ''): return user.get('_id') raise Exception('user not found') def assign_alert(): alert_id = demisto.getArg('alert-id') assignee_email = demisto.getArg('assignee-email') is_mssp = demisto.getArg('is-mssp-optional') assignee_id = get_assignee_id(assignee_email) assign_details = {'ID': alert_id, 'Assignees.AssigneeID': assignee_id} url = 'public/v1/data/alerts/assign-alert/' + alert_id if is_mssp: url += '?IsMssp=' + is_mssp http_request('PATCH', url, json_data={'AssigneeID': assignee_id}) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': assign_details}, 'Contents': assign_details, 'HumanReadable': tableToMarkdown( 'IntSights Assign Alert: The Alert has been successfully assigned to assigneeID', [assign_details], ['ID', 'Assignees.AssigneeID']), 'ContentsFormat': formats['json'] }) def unassign_alert(): alert_id = demisto.getArg('alert-id') http_request('PATCH', 'public/v1/data/alerts/unassign-alert/' + alert_id) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id}}, 'Contents': {'ID': alert_id}, 'HumanReadable': 'Alert id: ' + alert_id + ' successfully unassigned', 'ContentsFormat': formats['json'] }) def close_alert(): alert_id = demisto.getArg('alert-id') reason = demisto.getArg('reason') free_text = demisto.getArg('free-text') is_hidden = demisto.getArg('is-hidden') == 'True' rate = demisto.getArg('rate') close_details = {'ID': alert_id, 'Close Reason': reason, 'Closed FreeText': free_text, 'Closed Rate': rate, 'IsHidden': is_hidden} close_details_context = {'ID': alert_id, 'Closed': {'Reason': reason, 'FreeText': free_text, 'Rate': rate}, 'IsHidden': is_hidden} url = 'public/v1/data/alerts/close-alert/' + alert_id json_data = {'Reason': reason} if free_text: json_data['FreeText'] = free_text if is_hidden: json_data['IsHidden'] = is_hidden if rate: json_data['Rate'] = rate http_request('PATCH', url, json_data) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': close_details}, 'Contents': close_details_context, 'HumanReadable': tableToMarkdown('IntSights Close Alert: The Alert has successfully been closed.', [close_details], ['ID', 'Close Reason', 'Closed FreeText', 'Closed Rate', 'IsHidden']), 'ContentsFormat': formats['json'] }) def send_mail(): alert_id = demisto.getArg('alert-id') emails = argToList(demisto.getArg('emails')) content = demisto.getArg('content') http_request('POST', 'public/v1/data/alerts/send-mail/' + alert_id, {'Emails': emails, 'Content': content}) context = { 'ID': alert_id, 'EmailID': emails, 'Question': content } demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context}, 'Contents': context, 'HumanReadable': 'Email with content (' + content + ') sent to emails', 'ContentsFormat': formats['json'] }) def get_tag_id(alert_id, tag_name): response = http_request('GET', 'public/v1/data/alerts/get-complete-alert/' + alert_id, json_response=True) details = response.get('Details', {}) tags = details.get('Tags', []) for tag in tags: if tag.get('Name', '') == tag_name: return tag.get('_id', '') return 'Not found' def add_tag(): alert_id = demisto.getArg('alert-id') tag_name = demisto.getArg('tag-name') http_request('PATCH', 'public/v1/data/alerts/add-tag/' + alert_id, json_data={'TagName': tag_name}) tag_info = { 'TagName': tag_name, 'ID': get_tag_id(alert_id, tag_name) } context = { 'ID': alert_id, 'Tags': tag_info } demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context}, 'Contents': context, 'HumanReadable': 'Tag (' + tag_name + ') added to alert id: ' + alert_id, 'ContentsFormat': formats['json'] }) def remove_tag(): alert_id = demisto.getArg('alert-id') tag_id = demisto.getArg('tag-id') http_request('PATCH', 'public/v1/data/alerts/remove-tag/' + alert_id, json_data={'TagID': tag_id}) context = { 'ID': alert_id, 'Tags': {'ID': tag_id} } demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context}, 'Contents': context, 'HumanReadable': 'Tag id: ' + tag_id + ' removed from alert id: ' + alert_id, 'ContentsFormat': formats['json'] }) def add_comment(): alert_id = demisto.getArg('alert-id') comment = demisto.getArg('comment') http_request('PATCH', 'public/v1/data/alerts/add-comment/' + alert_id, json_data={'Comment': comment}) context = { 'ID': alert_id, 'Comment': comment } demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context}, 'Contents': context, 'HumanReadable': 'Succesfully added comment "' + comment + '" to alert id: ' + alert_id, 'ContentsFormat': formats['json'] }) def ioc_to_readable(ioc_data): ioc_context = { 'ID': demisto.get(ioc_data, '_id'), 'SourceID': demisto.get(ioc_data, 'SourceID'), 'AccountID': demisto.get(ioc_data, 'AccountID'), 'Type': demisto.get(ioc_data, 'Type'), 'Value': demisto.get(ioc_data, 'Value'), 'FirstSeen': demisto.get(ioc_data, 'FirstSeen'), 'LastSeen': demisto.get(ioc_data, 'LastSeen'), 'Domain': demisto.get(ioc_data, 'Domain'), 'Status': demisto.get(ioc_data, 'Status'), 'Severity': demisto.get(ioc_data, 'Severity'), 'SourceName': demisto.get(ioc_data, 'Source.Name'), 'SourceConfidence': demisto.get(ioc_data, 'Source.Confidence'), 'Flags': {'IsInAlexa': demisto.get(ioc_data, 'Flags.IsInAlexa')}, 'Enrichment': { 'Status': demisto.get(ioc_data, 'Enrichment.Status'), 'Data': demisto.get(ioc_data, 'Enrichment.Data'), 'Date': demisto.get(ioc_data, 'Enrichment.Data') } } ioc_readable = { 'ID': demisto.get(ioc_data, '_id'), 'SourceID': demisto.get(ioc_data, 'SourceID'), 'AccountID': demisto.get(ioc_data, 'AccountID'), 'Type': demisto.get(ioc_data, 'Type'), 'Value': demisto.get(ioc_data, 'Value'), 'FirstSeen': demisto.get(ioc_data, 'FirstSeen'), 'LastSeen': demisto.get(ioc_data, 'LastSeen'), 'Domain': demisto.get(ioc_data, 'Domain'), 'Status': demisto.get(ioc_data, 'Status'), 'Severity': demisto.get(ioc_data, 'Severity').get('Value'), 'SourceName': demisto.get(ioc_data, 'Source.Name'), 'SourceConfidence': demisto.get(ioc_data, 'Source.Confidence'), 'IsInAlexa': demisto.get(ioc_data, 'Flags.IsInAlexa'), 'Enrichment Status': demisto.get(ioc_data, 'Enrichment.Status'), 'Enrichment Data': demisto.get(ioc_data, 'Enrichment.Data') } dbot_score = { 'Indicator': ioc_context['Value'], 'Type': IOC_TYPE_TO_DBOT_TYPE[ioc_context['Type']], 'Vendor': 'IntSights', 'Score': translate_severity(ioc_readable['Severity']) } malicious_dict = { 'Vendor': 'IntSights', 'Description': 'IntSights severity level is High' } domain = {} if ioc_context['Domain']: domain['Name'] = ioc_context['Domain'] if translate_severity(ioc_readable['Severity']) == 3: domain['Malicious'] = malicious_dict ip_info = {} if ioc_context['Type'] == 'IpAddresses': ip_info['Address'] = ioc_context['Value'] if translate_severity(ioc_readable['Severity']) == 3: ip_info['Malicious'] = malicious_dict url_info = {} if ioc_context['Type'] == 'Urls': url_info['Data'] = ioc_context['Value'] if translate_severity(ioc_readable['Severity']) == 3: url_info['Malicious'] = malicious_dict hash_info = {} if ioc_context['Type'] == 'Hashes': hash_info['Name'] = ioc_context['Value'] hash_info[hash_identifier(ioc_context['Value'])] = ioc_context['Value'] if translate_severity(ioc_readable['Severity']) == 3: hash_info['Malicious'] = malicious_dict return ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info def search_for_ioc(): response = http_request('GET', 'public/v1/iocs/ioc-by-value', params=handle_filters(), json_response=True) if response: ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info = ioc_to_readable(response) demisto.results( { 'Type': entryTypes['note'], 'EntryContext': { 'IntSights.Iocs(val.ID === obj.ID)': ioc_context, 'DBotScore': dbot_score, 'Domain': domain, 'IP': ip_info, 'URL': url_info, 'File': hash_info }, 'Contents': response, 'HumanReadable': tableToMarkdown('IOC Information', [ioc_readable], ['ID', 'SourceID', 'AccountID', 'Type', 'Value', 'FirstSeen', 'LastSeen', 'Domain', 'Status', 'Severity', 'SourceName', 'SourceConfidence', 'IsInAlexa', 'Enrichment Status', 'Enrichment Data']), 'ContentsFormat': formats['json'] } ) else: results_for_no_content('IOC Information') def results_for_no_content(cmd_name): demisto.results( { 'Type': entryTypes['note'], 'EntryContext': {'IntSights': {}}, 'Contents': {}, 'HumanReadable': '### {} \n\n Could not get any results.'.format(cmd_name), 'ContentsFormat': formats['json'] } ) def translate_severity(sev): if sev in ['Medium', 'High']: return 3 if sev == 'Low': return 2 return 0 def fetch_incidents(): last_run = demisto.getLastRun() demisto.info("IntSight fetch last run time is: {}".format(str(last_run))) if not last_run or 'time' not in last_run: fetch_delta, _ = parse_date_range(demisto.params().get('fetch_delta', DEFAULT_TIME_RANGE), to_timestamp=True) else: fetch_delta = last_run.get('time') current_fetch = fetch_delta alert_type = demisto.getParam('type') min_severity_level = demisto.params().get('severity_level', 'All') if min_severity_level not in SEVERITY_LEVEL: raise Exception("Minimum Alert severity level to fetch incidents incidents from, allowed values are: All," " Low, Medium, High. (Setting to All will fetch all incidents)") _, alerts_context = get_alerts_helper(handle_filters(fetch_delta)) incidents = [] for alert in alerts_context: if SEVERITY_LEVEL[min_severity_level] <= SEVERITY_LEVEL[alert.get('Severity', 'Low')]: if not alert_type or alert_type.lower() == alert.get('Type', '').lower(): incidents.append({ 'name': '{type} - {id}'.format(type=alert.get('Type', 'Type not found'), id=alert.get('ID')), 'occurred': alert.get('FoundDate'), 'severity': translate_severity(alert.get('Severity')), 'rawJSON': json.dumps(alert) }) alert_timestamp = date_to_timestamp(alert.get('FoundDate'), date_format='%Y-%m-%dT%H:%M:%S.%fZ') if alert_timestamp > current_fetch: current_fetch = alert_timestamp demisto.incidents(incidents) demisto.setLastRun({'time': current_fetch + 1000}) def get_iocs(): response = http_request('GET', 'public/v1/iocs/complete-iocs-list', params=handle_filters(), json_response=True) domains = [] ip_infos = [] url_infos = [] hash_infos = [] dbot_scores = [] iocs_context = [] iocs_readable = [] for indicator in response: ioc_context, ioc_readable, dbot_score, domain, ip_info, url_info, hash_info = ioc_to_readable(indicator) iocs_context.append(ioc_context) iocs_readable.append(ioc_readable) dbot_scores.append(dbot_score) domains.append(domain) ip_infos.append(ip_info) url_infos.append(url_info) hash_infos.append(hash_info) headers = ['ID', 'SourceID', 'AccountID', 'Type', 'Value', 'FirstSeen', 'LastSeen', 'Domain', 'Status', 'Severity', 'SourceName', 'SourceConfidence', 'IsInAlexa', 'Enrichment Status', 'Enrichment Data'] demisto.results( { 'Type': entryTypes['note'], 'EntryContext': { 'IntSights.Iocs': iocs_context, 'DBotScore': dbot_scores, 'Domain': domains, 'IP': ip_infos, 'URL': url_infos, 'File': hash_infos }, 'Contents': response, 'HumanReadable': tableToMarkdown('IOC Information', t=iocs_readable, headers=headers), 'ContentsFormat': formats['json'] } ) def takedown_request(): alert_id = demisto.getArg('alert-id') http_request('PATCH', 'public/v1/data/alerts/takedown-request/' + alert_id) context = { 'ID': alert_id, } human_readable = '### IntSights Alert Takedown\n' \ 'The Alert Takedown request has been sent successfully for {}'.format(str(alert_id)) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context}, 'Contents': context, 'HumanReadable': human_readable, 'ContentsFormat': formats['json'] }) def get_alert_takedown_status(): alert_id = demisto.getArg('alert-id') response = http_request('GET', 'public/v1/data/alerts/takedown-status/' + alert_id) context = { 'ID': alert_id, 'TakedownStatus': response.text } demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': context}, 'Contents': context, 'HumanReadable': tableToMarkdown('IntSights Alert Takedown Status', [context], ['ID', 'TakedownStatus']), 'ContentsFormat': formats['json'] }) def update_ioc_blocklist_status(): alert_id = demisto.getArg('alert-id') types = argToList(demisto.getArg('type')) values = argToList(demisto.getArg('value')) statuses = argToList(demisto.getArg('blocklist-status')) if len(types) != len(values) or len(types) != len(statuses): return_error('The lists must be of equal length. For each IOC, provide an entry in each list.') data = [] for count, type_ in enumerate(types): data.append({ 'Type': type_, 'Value': values[count], 'BlocklistStatus': statuses[count] }) http_request('PATCH', 'public/v1/data/alerts/change-iocs-blocklist-status/' + alert_id, json_data={'Iocs': data}) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id, 'Status': statuses}}, 'Contents': {'ID': alert_id, 'Status': statuses}, 'HumanReadable': tableToMarkdown('IntSights Update IOC BlockList Status for ' + alert_id, data, ['BlocklistStatus']), 'ContentsFormat': formats['json'] }) def get_ioc_blocklist_status(): alert_id = demisto.getArg('alert-id') response = http_request('GET', 'public/v1/data/alerts/blocklist-status/' + alert_id, json_response=True) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': { 'IntSights.Alerts(val.ID === obj.ID)': {'ID': alert_id, 'Status': [ioc.get('Status') for ioc in response]}}, 'Contents': response, 'HumanReadable': tableToMarkdown('IntSights Blocklist Status for ' + alert_id, response, ['Status']), 'ContentsFormat': formats['json'] }) def get_mssp_sub_accounts(): account_id = demisto.getParam('credentials')['identifier'] accounts = http_request('GET', 'public/v1/mssp/customers', json_response=True) if not accounts: return_error("intsights-mssp-get-sub-accounts failed to return data.") for account in accounts: account["ID"] = account["_id"] del account["_id"] if len(accounts) < 1: return_error('Current MSSP Account has no sub accounts.') account_ids = [i["ID"] for i in accounts] if MSSP_ACCOUNT_ID not in account_ids: demisto.log("[DEBUG] - MSSP sub accounts:" + str(accounts)) return_error('Entered sub account id ({}) is not part of this mssp account'.format(MSSP_ACCOUNT_ID)) for i, account in enumerate(account_ids): HEADERS['Account-Id'] = account account_ua = http_request('GET', 'public/v1/account/used-assets', json_response=True) if not account_ua: continue accounts[i].update(account_ua) demisto.results({ 'Type': entryTypes['note'], 'EntryContext': {'IntSights.MsspAccount(val.ID === obj.ID)': accounts}, 'HumanReadable': tableToMarkdown('IntSights MSSP accounts used assets ' + account_id, accounts, ["ID", 'CompanyName', "Status", "AssetsLimit", "AssetsCount"]), 'Contents': accounts, 'ContentsFormat': formats['json'] }) HEADERS['Account-Id'] = MSSP_ACCOUNT_ID def test_module(): http_request('GET', 'public/v1/api/version') if demisto.params().get('isFetch'): min_severity_level = demisto.params().get('severity_level', 'All') if min_severity_level not in SEVERITY_LEVEL: return_error("Minimum Alert severity level to fetch incidents incidents from, allowed values are: " "All, Low, Medium, High. (Setting to All will fetch all incidents)") demisto.results('ok') try: if demisto.command() == 'test-module': test_module() elif demisto.command() == 'fetch-incidents': fetch_incidents() elif demisto.command() == 'intsights-mssp-get-sub-accounts': get_mssp_sub_accounts() elif demisto.command() == 'intsights-get-alerts': get_alerts() elif demisto.command() == 'intsights-get-alert-image': get_alert_image() elif demisto.command() == 'intsights-get-alert-activities': get_alert_activity() elif demisto.command() == 'intsights-assign-alert': assign_alert() elif demisto.command() == 'intsights-unassign-alert': unassign_alert() elif demisto.command() == 'intsights-send-mail': send_mail() elif demisto.command() == 'intsights-ask-the-analyst': ask_analyst() elif demisto.command() == 'intsights-add-tag-to-alert': add_tag() elif demisto.command() == 'intsights-remove-tag-from-alert': remove_tag() elif demisto.command() == 'intsights-add-comment-to-alert': add_comment() elif demisto.command() == 'intsights-update-alert-severity': change_severity() elif demisto.command() == 'intsights-get-alert-by-id': get_alert_by_id() elif demisto.command() == 'intsights-get-ioc-by-value': search_for_ioc() elif demisto.command() == 'intsights-get-iocs': get_iocs() elif demisto.command() == 'intsights-alert-takedown-request': takedown_request() elif demisto.command() == 'intsights-get-alert-takedown-status': get_alert_takedown_status() elif demisto.command() == 'intsights-get-ioc-blocklist-status': get_ioc_blocklist_status() elif demisto.command() == 'intsights-update-ioc-blocklist-status': update_ioc_blocklist_status() elif demisto.command() == 'intsights-close-alert': close_alert() else: raise Exception('Unrecognized command: ' + demisto.command()) except Exception as err: return_error(str(err))
true
true
f7084dda96df5fbdb4c6f4b4ce402acbfe593566
851
py
Python
SupportLibraries/base_helpers.py
Abhilash04/SeleniumPythonHybridFramework
277db41afed6b24ec2c8e9b579925ad3da75e937
[ "MIT" ]
9
2018-11-14T09:19:22.000Z
2021-05-18T15:18:45.000Z
SupportLibraries/base_helpers.py
Abhilash04/SeleniumPythonHybridFramework
277db41afed6b24ec2c8e9b579925ad3da75e937
[ "MIT" ]
null
null
null
SupportLibraries/base_helpers.py
Abhilash04/SeleniumPythonHybridFramework
277db41afed6b24ec2c8e9b579925ad3da75e937
[ "MIT" ]
4
2018-09-18T16:56:37.000Z
2020-07-02T11:57:21.000Z
""" This module contains common reusable functions. """ from traceback import print_stack from configparser import ConfigParser from SupportLibraries.ui_helpers import UIHelpers class BaseHelpers(UIHelpers): """ This class includes basic reusable base_helpers. """ def __init__(self, driver): super().__init__(driver) self.driver = driver def load_properties_file(self): """ This method loads the properties/ini file :return: this method returns config reader instance. """ config = None try: # noinspection PyBroadException config = ConfigParser() config.read('test.ini') except Exception as ex: self.log.error("Failed to load ini/properties file.", ex) print_stack() return config
23.638889
69
0.633373
from traceback import print_stack from configparser import ConfigParser from SupportLibraries.ui_helpers import UIHelpers class BaseHelpers(UIHelpers): def __init__(self, driver): super().__init__(driver) self.driver = driver def load_properties_file(self): config = None try: config = ConfigParser() config.read('test.ini') except Exception as ex: self.log.error("Failed to load ini/properties file.", ex) print_stack() return config
true
true
f7084ecbce9cfc66335bac95d0a736f1ac69da7d
6,856
py
Python
airflow-service-mpack/common-services/AIRFLOW/1.10.0/package/scripts/airflow_setup.py
hapadong/ambari-airflow-mpack
92d0345c18e0a345cc9e9493d926ec0e4cc98980
[ "Apache-2.0" ]
null
null
null
airflow-service-mpack/common-services/AIRFLOW/1.10.0/package/scripts/airflow_setup.py
hapadong/ambari-airflow-mpack
92d0345c18e0a345cc9e9493d926ec0e4cc98980
[ "Apache-2.0" ]
null
null
null
airflow-service-mpack/common-services/AIRFLOW/1.10.0/package/scripts/airflow_setup.py
hapadong/ambari-airflow-mpack
92d0345c18e0a345cc9e9493d926ec0e4cc98980
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python import sys, os, pwd, grp, signal, time, base64 from resource_management import * from resource_management.core.exceptions import Fail from resource_management.core.logger import Logger from resource_management.core.resources.system import Execute, Directory, File from resource_management.core.shell import call from resource_management.core.system import System from resource_management.libraries.functions.default import default def airflow_make_systemd_scripts_webserver(env): import params env.set_params(params) confFileText = format("""[Unit] Description=Airflow webserver daemon After=network.target postgresql.service mysql.service redis.service rabbitmq-server.service Wants=postgresql.service mysql.service redis.service rabbitmq-server.service [Service] EnvironmentFile=/etc/sysconfig/airflow User={airflow_user} Group={airflow_group} Type=simple ExecStart={conda_root}/envs/{conda_airflow_virtualenv}/bin/airflow webserver -D --pid /usr/local/airflow/airflow-webserver.pid --stderr /var/log/airflow/webserver.err --stdout /var/log/airflow/webserver.out -l /var/log/airflow/webserver.log PIDFile=/usr/local/airflow/airflow-webserver.pid Restart=always RestartSec=5s SyslogIdentifier=airflow-scheduler [Install] WantedBy=multi-user.target """) with open("/etc/systemd/system/multi-user.target.wants/airflow-webserver.service", 'w') as configFile: configFile.write(confFileText) configFile.close() confFileText = format("""AIRFLOW_HOME={airflow_home} AIRFLOW_CONFIG={airflow_home}/airflow.cfg PATH={conda_root}/envs/{conda_airflow_virtualenv}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin """) with open("/etc/sysconfig/airflow", 'w') as configFile: configFile.write(confFileText) configFile.close() Execute("systemctl daemon-reload") def airflow_make_systemd_scripts_scheduler(env): import params env.set_params(params) confFileText = format("""[Unit] Description=Airflow scheduler daemon After=network.target postgresql.service mysql.service redis.service rabbitmq-server.service Wants=postgresql.service mysql.service redis.service rabbitmq-server.service [Service] EnvironmentFile=/etc/sysconfig/airflow User={airflow_user} Group={airflow_group} Type=simple ExecStart={conda_root}/envs/{conda_airflow_virtualenv}/bin/airflow scheduler -D --pid /usr/local/airflow/airflow-scheduler.pid --stderr /var/log/airflow/scheduler.err --stdout /var/log/airflow/scheduler.out -l /var/log/airflow/scheduler.log PIDFile=/usr/local/airflow/airflow-scheduler.pid Restart=always RestartSec=5s SyslogIdentifier=airflow-scheduler [Install] WantedBy=multi-user.target """) with open("/etc/systemd/system/multi-user.target.wants/airflow-scheduler.service", 'w') as configFile: configFile.write(confFileText) configFile.close() confFileText = format("""AIRFLOW_HOME={airflow_home} AIRFLOW_CONFIG={airflow_home}/airflow.cfg PATH={conda_root}/envs/{conda_airflow_virtualenv}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin """) with open("/etc/sysconfig/airflow", 'w') as configFile: configFile.write(confFileText) configFile.close() Execute("systemctl daemon-reload") def airflow_make_systemd_scripts_worker(env): import params env.set_params(params) confFileText = format("""[Unit] Description=Airflow worker daemon After=network.target postgresql.service mysql.service redis.service rabbitmq-server.service Wants=postgresql.service mysql.service redis.service rabbitmq-server.service [Service] EnvironmentFile=/etc/sysconfig/airflow User={airflow_user} Group={airflow_group} Type=simple ExecStart={conda_root}/envs/{conda_airflow_virtualenv}/bin/airflow worker -D --pid /usr/local/airflow/airflow-worker.pid --stderr /var/log/airflow/worker.err --stdout /var/log/airflow/worker.out -l /var/log/airflow/worker.log PIDFile=/usr/local/airflow/airflow-worker.pid Restart=always RestartSec=5s SyslogIdentifier=airflow-worker [Install] WantedBy=multi-user.target """) with open("/etc/systemd/system/multi-user.target.wants/airflow-worker.service", 'w') as configFile: configFile.write(confFileText) configFile.close() confFileText = format("""AIRFLOW_HOME={airflow_home} AIRFLOW_CONFIG={airflow_home}/airflow.cfg PATH={conda_root}/envs/{conda_airflow_virtualenv}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin """) with open("/etc/sysconfig/airflow", 'w') as configFile: configFile.write(confFileText) configFile.close() Execute("systemctl daemon-reload") def airflow_generate_config_for_section(sections): """ Generating values for airflow.cfg for each section. This allows to add custom-site configuration from ambari to cfg file. """ result = {} for section, data in sections.items(): section_config = "" for key, value in data.items(): section_config += format("{key} = {value}\n") result[section] = section_config return result def airflow_configure(env): import params env.set_params(params) airflow_config_file = "" airflow_config = airflow_generate_config_for_section({ "core" : params.config['configurations']['airflow-core-site'], "cli" : params.config['configurations']['airflow-cli-site'], "api" : params.config['configurations']['airflow-api-site'], "operators" : params.config['configurations']['airflow-operators-site'], "webserver" : params.config['configurations']['airflow-webserver-site'], "email" : params.config['configurations']['airflow-email-site'], "smtp" : params.config['configurations']['airflow-smtp-site'], "celery" : params.config['configurations']['airflow-celery-site'], "dask" : params.config['configurations']['airflow-dask-site'], "scheduler" : params.config['configurations']['airflow-scheduler-site'], "ldap" : params.config['configurations']['airflow-ldap-site'], "mesos" : params.config['configurations']['airflow-mesos-site'], "kerberos" : params.config['configurations']['airflow-kerberos-site'], "github_enterprise" : params.config['configurations']['airflow-githubenterprise-site'], "admin" : params.config['configurations']['airflow-admin-site'], "lineage" : params.config['configurations']['airflow-lineage-site'], "atlas" : params.config['configurations']['airflow-atlas-site'], "hive" : params.config['configurations']['airflow-hive-site'], "celery_broker_transport_options" : params.config['configurations']['airflow-celerybrokertransportoptions-site'], "elasticsearch" : params.config['configurations']['airflow-elasticsearch-site'], "kubernetes" : params.config['configurations']['airflow-kubernetes-site'], "kubernetes_secrets" : params.config['configurations']['airflow-kubernetessecrets-site'] }) for section, value in airflow_config.items(): airflow_config_file += format("[{section}]\n{value}\n") with open(params.airflow_home + "/airflow.cfg", 'w') as configFile: configFile.write(airflow_config_file) configFile.close()
37.67033
240
0.777275
import sys, os, pwd, grp, signal, time, base64 from resource_management import * from resource_management.core.exceptions import Fail from resource_management.core.logger import Logger from resource_management.core.resources.system import Execute, Directory, File from resource_management.core.shell import call from resource_management.core.system import System from resource_management.libraries.functions.default import default def airflow_make_systemd_scripts_webserver(env): import params env.set_params(params) confFileText = format("""[Unit] Description=Airflow webserver daemon After=network.target postgresql.service mysql.service redis.service rabbitmq-server.service Wants=postgresql.service mysql.service redis.service rabbitmq-server.service [Service] EnvironmentFile=/etc/sysconfig/airflow User={airflow_user} Group={airflow_group} Type=simple ExecStart={conda_root}/envs/{conda_airflow_virtualenv}/bin/airflow webserver -D --pid /usr/local/airflow/airflow-webserver.pid --stderr /var/log/airflow/webserver.err --stdout /var/log/airflow/webserver.out -l /var/log/airflow/webserver.log PIDFile=/usr/local/airflow/airflow-webserver.pid Restart=always RestartSec=5s SyslogIdentifier=airflow-scheduler [Install] WantedBy=multi-user.target """) with open("/etc/systemd/system/multi-user.target.wants/airflow-webserver.service", 'w') as configFile: configFile.write(confFileText) configFile.close() confFileText = format("""AIRFLOW_HOME={airflow_home} AIRFLOW_CONFIG={airflow_home}/airflow.cfg PATH={conda_root}/envs/{conda_airflow_virtualenv}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin """) with open("/etc/sysconfig/airflow", 'w') as configFile: configFile.write(confFileText) configFile.close() Execute("systemctl daemon-reload") def airflow_make_systemd_scripts_scheduler(env): import params env.set_params(params) confFileText = format("""[Unit] Description=Airflow scheduler daemon After=network.target postgresql.service mysql.service redis.service rabbitmq-server.service Wants=postgresql.service mysql.service redis.service rabbitmq-server.service [Service] EnvironmentFile=/etc/sysconfig/airflow User={airflow_user} Group={airflow_group} Type=simple ExecStart={conda_root}/envs/{conda_airflow_virtualenv}/bin/airflow scheduler -D --pid /usr/local/airflow/airflow-scheduler.pid --stderr /var/log/airflow/scheduler.err --stdout /var/log/airflow/scheduler.out -l /var/log/airflow/scheduler.log PIDFile=/usr/local/airflow/airflow-scheduler.pid Restart=always RestartSec=5s SyslogIdentifier=airflow-scheduler [Install] WantedBy=multi-user.target """) with open("/etc/systemd/system/multi-user.target.wants/airflow-scheduler.service", 'w') as configFile: configFile.write(confFileText) configFile.close() confFileText = format("""AIRFLOW_HOME={airflow_home} AIRFLOW_CONFIG={airflow_home}/airflow.cfg PATH={conda_root}/envs/{conda_airflow_virtualenv}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin """) with open("/etc/sysconfig/airflow", 'w') as configFile: configFile.write(confFileText) configFile.close() Execute("systemctl daemon-reload") def airflow_make_systemd_scripts_worker(env): import params env.set_params(params) confFileText = format("""[Unit] Description=Airflow worker daemon After=network.target postgresql.service mysql.service redis.service rabbitmq-server.service Wants=postgresql.service mysql.service redis.service rabbitmq-server.service [Service] EnvironmentFile=/etc/sysconfig/airflow User={airflow_user} Group={airflow_group} Type=simple ExecStart={conda_root}/envs/{conda_airflow_virtualenv}/bin/airflow worker -D --pid /usr/local/airflow/airflow-worker.pid --stderr /var/log/airflow/worker.err --stdout /var/log/airflow/worker.out -l /var/log/airflow/worker.log PIDFile=/usr/local/airflow/airflow-worker.pid Restart=always RestartSec=5s SyslogIdentifier=airflow-worker [Install] WantedBy=multi-user.target """) with open("/etc/systemd/system/multi-user.target.wants/airflow-worker.service", 'w') as configFile: configFile.write(confFileText) configFile.close() confFileText = format("""AIRFLOW_HOME={airflow_home} AIRFLOW_CONFIG={airflow_home}/airflow.cfg PATH={conda_root}/envs/{conda_airflow_virtualenv}/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin """) with open("/etc/sysconfig/airflow", 'w') as configFile: configFile.write(confFileText) configFile.close() Execute("systemctl daemon-reload") def airflow_generate_config_for_section(sections): result = {} for section, data in sections.items(): section_config = "" for key, value in data.items(): section_config += format("{key} = {value}\n") result[section] = section_config return result def airflow_configure(env): import params env.set_params(params) airflow_config_file = "" airflow_config = airflow_generate_config_for_section({ "core" : params.config['configurations']['airflow-core-site'], "cli" : params.config['configurations']['airflow-cli-site'], "api" : params.config['configurations']['airflow-api-site'], "operators" : params.config['configurations']['airflow-operators-site'], "webserver" : params.config['configurations']['airflow-webserver-site'], "email" : params.config['configurations']['airflow-email-site'], "smtp" : params.config['configurations']['airflow-smtp-site'], "celery" : params.config['configurations']['airflow-celery-site'], "dask" : params.config['configurations']['airflow-dask-site'], "scheduler" : params.config['configurations']['airflow-scheduler-site'], "ldap" : params.config['configurations']['airflow-ldap-site'], "mesos" : params.config['configurations']['airflow-mesos-site'], "kerberos" : params.config['configurations']['airflow-kerberos-site'], "github_enterprise" : params.config['configurations']['airflow-githubenterprise-site'], "admin" : params.config['configurations']['airflow-admin-site'], "lineage" : params.config['configurations']['airflow-lineage-site'], "atlas" : params.config['configurations']['airflow-atlas-site'], "hive" : params.config['configurations']['airflow-hive-site'], "celery_broker_transport_options" : params.config['configurations']['airflow-celerybrokertransportoptions-site'], "elasticsearch" : params.config['configurations']['airflow-elasticsearch-site'], "kubernetes" : params.config['configurations']['airflow-kubernetes-site'], "kubernetes_secrets" : params.config['configurations']['airflow-kubernetessecrets-site'] }) for section, value in airflow_config.items(): airflow_config_file += format("[{section}]\n{value}\n") with open(params.airflow_home + "/airflow.cfg", 'w') as configFile: configFile.write(airflow_config_file) configFile.close()
true
true
f7084ee37b0683bda4a3495022074260d110fd35
2,715
py
Python
closure_linter/testutil.py
Announcement/closure-linter
de66a7ca6ca0ca248d55d567aad96b7e0657be85
[ "Apache-2.0" ]
123
2015-07-03T08:39:12.000Z
2021-11-14T09:28:14.000Z
tools/closure_linter/closure_linter/testutil.py
flyover/node
8e743a77e48256444c3637f8282e4cad35da508c
[ "Artistic-2.0" ]
25
2015-06-22T10:40:55.000Z
2019-05-06T12:01:53.000Z
tools/closure_linter/closure_linter/testutil.py
flyover/node
8e743a77e48256444c3637f8282e4cad35da508c
[ "Artistic-2.0" ]
36
2015-07-03T15:17:13.000Z
2020-10-13T16:34:28.000Z
#!/usr/bin/env python # # Copyright 2012 The Closure Linter Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions for testing gjslint components.""" # Allow non-Google copyright # pylint: disable=g-bad-file-header __author__ = ('nnaze@google.com (Nathan Naze)') import StringIO from closure_linter import ecmametadatapass from closure_linter import javascriptstatetracker from closure_linter import javascripttokenizer def TokenizeSource(source): """Convert a source into a string of tokens. Args: source: A source file as a string or file-like object (iterates lines). Returns: The first token of the resulting token stream. """ if isinstance(source, basestring): source = StringIO.StringIO(source) tokenizer = javascripttokenizer.JavaScriptTokenizer() return tokenizer.TokenizeFile(source) def TokenizeSourceAndRunEcmaPass(source): """Tokenize a source and run the EcmaMetaDataPass on it. Args: source: A source file as a string or file-like object (iterates lines). Returns: The first token of the resulting token stream. """ start_token = TokenizeSource(source) ecma_pass = ecmametadatapass.EcmaMetaDataPass() ecma_pass.Process(start_token) return start_token def ParseFunctionsAndComments(source, error_handler=None): """Run the tokenizer and tracker and return comments and functions found. Args: source: A source file as a string or file-like object (iterates lines). error_handler: An error handler. Returns: The functions and comments as a tuple. """ start_token = TokenizeSourceAndRunEcmaPass(source) tracker = javascriptstatetracker.JavaScriptStateTracker() if error_handler is not None: tracker.DocFlagPass(start_token, error_handler) functions = [] comments = [] for token in start_token: tracker.HandleToken(token, tracker.GetLastNonSpaceToken()) function = tracker.GetFunction() if function and function not in functions: functions.append(function) comment = tracker.GetDocComment() if comment and comment not in comments: comments.append(comment) tracker.HandleAfterToken(token) return functions, comments
28.578947
75
0.755064
__author__ = ('nnaze@google.com (Nathan Naze)') import StringIO from closure_linter import ecmametadatapass from closure_linter import javascriptstatetracker from closure_linter import javascripttokenizer def TokenizeSource(source): if isinstance(source, basestring): source = StringIO.StringIO(source) tokenizer = javascripttokenizer.JavaScriptTokenizer() return tokenizer.TokenizeFile(source) def TokenizeSourceAndRunEcmaPass(source): start_token = TokenizeSource(source) ecma_pass = ecmametadatapass.EcmaMetaDataPass() ecma_pass.Process(start_token) return start_token def ParseFunctionsAndComments(source, error_handler=None): start_token = TokenizeSourceAndRunEcmaPass(source) tracker = javascriptstatetracker.JavaScriptStateTracker() if error_handler is not None: tracker.DocFlagPass(start_token, error_handler) functions = [] comments = [] for token in start_token: tracker.HandleToken(token, tracker.GetLastNonSpaceToken()) function = tracker.GetFunction() if function and function not in functions: functions.append(function) comment = tracker.GetDocComment() if comment and comment not in comments: comments.append(comment) tracker.HandleAfterToken(token) return functions, comments
true
true
f7084f20a1b8eee1a82a652ae308751180897d7a
55
py
Python
ma_gym/envs/traffic_junction_hard/__init__.py
joosephook/ma-gym
535df389326a5b7b389d652e775b7b8bd2e5d778
[ "Apache-2.0" ]
null
null
null
ma_gym/envs/traffic_junction_hard/__init__.py
joosephook/ma-gym
535df389326a5b7b389d652e775b7b8bd2e5d778
[ "Apache-2.0" ]
null
null
null
ma_gym/envs/traffic_junction_hard/__init__.py
joosephook/ma-gym
535df389326a5b7b389d652e775b7b8bd2e5d778
[ "Apache-2.0" ]
null
null
null
from .traffic_junction_hard import TrafficJunctionHard
27.5
54
0.909091
from .traffic_junction_hard import TrafficJunctionHard
true
true
f708501fdbd0f30992b089b0874da1cba328ec3c
3,596
py
Python
server/settings/environments/development.py
Islast/AutSPACEs
2392e4ee731c44661059b35cc205613697c782d6
[ "MIT" ]
null
null
null
server/settings/environments/development.py
Islast/AutSPACEs
2392e4ee731c44661059b35cc205613697c782d6
[ "MIT" ]
null
null
null
server/settings/environments/development.py
Islast/AutSPACEs
2392e4ee731c44661059b35cc205613697c782d6
[ "MIT" ]
null
null
null
""" This file contains all the settings that defines the development server. SECURITY WARNING: don't run with debug turned on in production! """ import logging from typing import List from server.settings.components import config from server.settings.components.common import INSTALLED_APPS, MIDDLEWARE # Setting the development status: DEBUG = True ALLOWED_HOSTS = [ config('DOMAIN_NAME'), 'localhost', '0.0.0.0', # noqa: S104 '127.0.0.1', '[::1]', ] # Installed apps for development only: INSTALLED_APPS += ( 'debug_toolbar', 'nplusone.ext.django', 'django_migration_linter', 'django_test_migrations.contrib.django_checks.AutoNames', 'django_test_migrations.contrib.django_checks.DatabaseConfiguration', 'extra_checks', ) # Static files: # https://docs.djangoproject.com/en/2.2/ref/settings/#std:setting-STATICFILES_DIRS STATICFILES_DIRS: List[str] = [] # Django debug toolbar: # https://django-debug-toolbar.readthedocs.io MIDDLEWARE += ( 'debug_toolbar.middleware.DebugToolbarMiddleware', # https://github.com/bradmontgomery/django-querycount # Prints how many queries were executed, useful for the APIs. 'querycount.middleware.QueryCountMiddleware', ) def _custom_show_toolbar(request): """Only show the debug toolbar to users with the superuser flag.""" return DEBUG and request.user.is_superuser DEBUG_TOOLBAR_CONFIG = { 'SHOW_TOOLBAR_CALLBACK': 'server.settings.environments.development._custom_show_toolbar', } # This will make debug toolbar to work with django-csp, # since `ddt` loads some scripts from `ajax.googleapis.com`: CSP_SCRIPT_SRC = ("'self'", 'ajax.googleapis.com') CSP_IMG_SRC = ("'self'", 'data:') CSP_CONNECT_SRC = ("'self'",) # nplusone # https://github.com/jmcarp/nplusone # Should be the first in line: MIDDLEWARE = ( # noqa: WPS440 'nplusone.ext.django.NPlusOneMiddleware', ) + MIDDLEWARE # Logging N+1 requests: NPLUSONE_RAISE = True # comment out if you want to allow N+1 requests NPLUSONE_LOGGER = logging.getLogger('django') NPLUSONE_LOG_LEVEL = logging.WARN NPLUSONE_WHITELIST = [ {'model': 'admin.*'}, ] # django-test-migrations # https://github.com/wemake-services/django-test-migrations # Set of badly named migrations to ignore: DTM_IGNORED_MIGRATIONS = frozenset(( ('axes', '*'), )) # django-extra-checks # https://github.com/kalekseev/django-extra-checks EXTRA_CHECKS = { 'checks': [ # Forbid `unique_together`: 'no-unique-together', # Require non empty `upload_to` argument: 'field-file-upload-to', # Use the indexes option instead: 'no-index-together', # Each model must be registered in admin: 'model-admin', # FileField/ImageField must have non empty `upload_to` argument: 'field-file-upload-to', # Text fields shouldn't use `null=True`: 'field-text-null', # Prefer using BooleanField(null=True) instead of NullBooleanField: 'field-boolean-null', # Don't pass `null=False` to model fields (this is django default) 'field-null', # ForeignKey fields must specify db_index explicitly if used in # other indexes: {'id': 'field-foreign-key-db-index', 'when': 'indexes'}, # If field nullable `(null=True)`, # then default=None argument is redundant and should be removed: 'field-default-null', # Fields with choices must have companion CheckConstraint # to enforce choices on database level 'field-choices-constraint', ], }
27.450382
82
0.691324
import logging from typing import List from server.settings.components import config from server.settings.components.common import INSTALLED_APPS, MIDDLEWARE DEBUG = True ALLOWED_HOSTS = [ config('DOMAIN_NAME'), 'localhost', '0.0.0.0', '127.0.0.1', '[::1]', ] INSTALLED_APPS += ( 'debug_toolbar', 'nplusone.ext.django', 'django_migration_linter', 'django_test_migrations.contrib.django_checks.AutoNames', 'django_test_migrations.contrib.django_checks.DatabaseConfiguration', 'extra_checks', ) STATICFILES_DIRS: List[str] = [] MIDDLEWARE += ( 'debug_toolbar.middleware.DebugToolbarMiddleware', 'querycount.middleware.QueryCountMiddleware', ) def _custom_show_toolbar(request): return DEBUG and request.user.is_superuser DEBUG_TOOLBAR_CONFIG = { 'SHOW_TOOLBAR_CALLBACK': 'server.settings.environments.development._custom_show_toolbar', } CSP_SCRIPT_SRC = ("'self'", 'ajax.googleapis.com') CSP_IMG_SRC = ("'self'", 'data:') CSP_CONNECT_SRC = ("'self'",) MIDDLEWARE = ( 'nplusone.ext.django.NPlusOneMiddleware', ) + MIDDLEWARE NPLUSONE_RAISE = True NPLUSONE_LOGGER = logging.getLogger('django') NPLUSONE_LOG_LEVEL = logging.WARN NPLUSONE_WHITELIST = [ {'model': 'admin.*'}, ] DTM_IGNORED_MIGRATIONS = frozenset(( ('axes', '*'), )) EXTRA_CHECKS = { 'checks': [ 'no-unique-together', 'field-file-upload-to', 'no-index-together', 'model-admin', 'field-file-upload-to', 'field-text-null', # Prefer using BooleanField(null=True) instead of NullBooleanField: 'field-boolean-null', # Don't pass `null=False` to model fields (this is django default) 'field-null', {'id': 'field-foreign-key-db-index', 'when': 'indexes'}, 'field-default-null', 'field-choices-constraint', ], }
true
true
f7085056f732870808a5fc11c2c6e7523990d5b1
594
py
Python
tools/generate_taint_models/parameter.py
sthagen/facebook-pyre-check
cea188088c9632b10e0d0a658a8f1954f19413cd
[ "MIT" ]
null
null
null
tools/generate_taint_models/parameter.py
sthagen/facebook-pyre-check
cea188088c9632b10e0d0a658a8f1954f19413cd
[ "MIT" ]
null
null
null
tools/generate_taint_models/parameter.py
sthagen/facebook-pyre-check
cea188088c9632b10e0d0a658a8f1954f19413cd
[ "MIT" ]
null
null
null
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from enum import auto, Enum from typing import NamedTuple, Optional class Parameter(NamedTuple): class Kind(Enum): ARG = auto() VARARG = auto() KWARG = auto() name: str annotation: Optional[str] kind: Kind def __eq__(self, other: "Parameter") -> bool: if not isinstance(other, self.__class__): return False return self.name == other.name
24.75
65
0.654882
from enum import auto, Enum from typing import NamedTuple, Optional class Parameter(NamedTuple): class Kind(Enum): ARG = auto() VARARG = auto() KWARG = auto() name: str annotation: Optional[str] kind: Kind def __eq__(self, other: "Parameter") -> bool: if not isinstance(other, self.__class__): return False return self.name == other.name
true
true
f70850b2252c5eeef4ef80bf3767acdfeb2562eb
481
py
Python
tests/test_app/models.py
enderlabs/django-encrypted-json
148dae03001e1731b40e12bb9cac749c7edf1caa
[ "MIT" ]
null
null
null
tests/test_app/models.py
enderlabs/django-encrypted-json
148dae03001e1731b40e12bb9cac749c7edf1caa
[ "MIT" ]
null
null
null
tests/test_app/models.py
enderlabs/django-encrypted-json
148dae03001e1731b40e12bb9cac749c7edf1caa
[ "MIT" ]
1
2019-03-07T10:01:47.000Z
2019-03-07T10:01:47.000Z
from django.db import models from django_encrypted_json.fields import EncryptedValueJsonField # Create your models here. class TestModel(models.Model): json = EncryptedValueJsonField(default={}) optional_json = EncryptedValueJsonField(blank=True, null=True) partial_encrypt = EncryptedValueJsonField( blank=True, null=True, skip_keys=('test', )) partial_encrypt_w_default = EncryptedValueJsonField( blank=True, skip_keys=('test', ), default=[])
32.066667
66
0.75052
from django.db import models from django_encrypted_json.fields import EncryptedValueJsonField class TestModel(models.Model): json = EncryptedValueJsonField(default={}) optional_json = EncryptedValueJsonField(blank=True, null=True) partial_encrypt = EncryptedValueJsonField( blank=True, null=True, skip_keys=('test', )) partial_encrypt_w_default = EncryptedValueJsonField( blank=True, skip_keys=('test', ), default=[])
true
true
f70850ca483a2cf7d2e8eca3f7141764aebd44b9
6,705
py
Python
demo/runners/aerlingusold.py
MySupersuit/DecentralisedIdentityVaxPassport
9dd2168c56335f821a38048559b56726718660ce
[ "Apache-2.0" ]
null
null
null
demo/runners/aerlingusold.py
MySupersuit/DecentralisedIdentityVaxPassport
9dd2168c56335f821a38048559b56726718660ce
[ "Apache-2.0" ]
null
null
null
demo/runners/aerlingusold.py
MySupersuit/DecentralisedIdentityVaxPassport
9dd2168c56335f821a38048559b56726718660ce
[ "Apache-2.0" ]
null
null
null
import asyncio import json import logging import os import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # noqa from runners.support.agent import DemoAgent, default_genesis_txns from runners.support.utils import ( log_json, log_msg, log_status, log_timer, prompt, prompt_loop, require_indy, ) LOGGER = logging.getLogger(__name__) class AerLingusAgent(DemoAgent): def __init__(self, http_port: int, admin_port: int, **kwargs): super().__init__( "Aer Lingus Agent", http_port, admin_port, prefix="AerLingus", extra_args=[ "--auto-accept-invites", "--auto-accept-requests" ], **kwargs, ) self.connection_id = None self._connection_ready = asyncio.Future() self.cred_state = {} # TODO define a dict to hold credential attributes based on # the credential_definition_id self.cred_attrs = {} async def detect_connection(self): await self._connection_ready @property def connection_ready(self): return self._connection_ready.done() and self._connection_ready.result() async def handle_connections(self, message): if message["connection_id"] == self.connection_id: if message["state"] == "active" and not self._connection_ready.done(): self.log("Connected") self._connection_ready.set_result(True) async def handle_issue_credential(self, message): state = message["state"] credential_exchange_id = message["credential_exchange_id"] prev_state = self.cred_state.get(credential_exchange_id) if prev_state == state: return # ignore self.cred_state[credential_exchange_id] = state self.log( "Credential: state =", state, ", credential_exchange_id =", credential_exchange_id, ) if state == "request_received": # TODO issue credentials based on the credential_definition_id pass async def handle_present_proof(self, message): state = message["state"] presentation_exchange_id = message["presentation_exchange_id"] self.log( "Presentation: state =", state, ", presentation_exchange_id =", presentation_exchange_id, ) if state == "presentation_received": # TODO handle received presentations pass async def handle_basicmessages(self, message): self.log("Received message:", message["content"]) async def main(start_port: int, show_timing: bool = False): genesis = await default_genesis_txns() if not genesis: print("Error retrieving ledger genesis transactions") sys.exit(1) agent = None try: log_status("#1 Provision an agent and wallet, get back configuration details") agent = AerLingusAgent( start_port, start_port + 1, genesis_data=genesis, timing=show_timing ) await agent.listen_webhooks(start_port + 2) await agent.register_did() with log_timer("Startup duration:"): await agent.start_process() log_msg("Admin url is at:", agent.admin_url) log_msg("Endpoint url is at:", agent.endpoint) # Create a schema log_status("#3 Create a new schema on the ledger") with log_timer("Publish schema duration:"): pass # TODO define schema # version = format( # "%d.%d.%d" # % ( # random.randint(1, 101), # random.randint(1, 101), # random.randint(1, 101), # ) # ) # ( # schema_id, # credential_definition_id, # ) = await agent.register_schema_and_creddef( # "employee id schema", # version, # ["employee_id", "name", "date", "position"], # ) # with log_timer("Generate invitation duration:"): # # Generate an invitation # log_status( # "#5 Create a connection to alice and print out the invite details" # ) # connection = await agent.admin_POST("/connections/create-invitation") # agent.connection_id = connection["connection_id"] # log_json(connection, label="Invitation response:") # log_msg("*****************") # log_msg(json.dumps(connection["invitation"]), label="Invitation:", color=None) # log_msg("*****************") # log_msg("Waiting for connection...") # await agent.detect_connection() async for option in prompt_loop( "(1) Issue Credential, (2) Send Proof Request, " + "(3) Send Message (X) Exit? [1/2/3/X] " ): if option in "xX": break elif option == "1": log_status("#13 Issue credential offer to X") # TODO credential offers elif option == "2": log_status("#20 Request proof of degree from alice") # TODO presentation requests elif option == "3": msg = await prompt("Enter message: ") await agent.admin_POST( f"/connections/{agent.connection_id}/send-message", {"content": msg} ) if show_timing: timing = await agent.fetch_timing() if timing: for line in agent.format_timing(timing): log_msg(line) finally: terminated = True try: if agent: await agent.terminate() except Exception: LOGGER.exception("Error terminating agent:") terminated = False await asyncio.sleep(0.1) if not terminated: os._exit(1) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Runs an Aer Lingus demo agent.") parser.add_argument( "-p", "--port", type=int, default=8040, metavar=("<port>"), help="Choose the starting port number to listen on", ) parser.add_argument( "--timing", action="store_true", help="Enable timing information" ) args = parser.parse_args() require_indy() try: asyncio.get_event_loop().run_until_complete(main(args.port, args.timing)) except KeyboardInterrupt: os._exit(1)
30.616438
88
0.567338
import asyncio import json import logging import os import sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from runners.support.agent import DemoAgent, default_genesis_txns from runners.support.utils import ( log_json, log_msg, log_status, log_timer, prompt, prompt_loop, require_indy, ) LOGGER = logging.getLogger(__name__) class AerLingusAgent(DemoAgent): def __init__(self, http_port: int, admin_port: int, **kwargs): super().__init__( "Aer Lingus Agent", http_port, admin_port, prefix="AerLingus", extra_args=[ "--auto-accept-invites", "--auto-accept-requests" ], **kwargs, ) self.connection_id = None self._connection_ready = asyncio.Future() self.cred_state = {} self.cred_attrs = {} async def detect_connection(self): await self._connection_ready @property def connection_ready(self): return self._connection_ready.done() and self._connection_ready.result() async def handle_connections(self, message): if message["connection_id"] == self.connection_id: if message["state"] == "active" and not self._connection_ready.done(): self.log("Connected") self._connection_ready.set_result(True) async def handle_issue_credential(self, message): state = message["state"] credential_exchange_id = message["credential_exchange_id"] prev_state = self.cred_state.get(credential_exchange_id) if prev_state == state: return self.cred_state[credential_exchange_id] = state self.log( "Credential: state =", state, ", credential_exchange_id =", credential_exchange_id, ) if state == "request_received": pass async def handle_present_proof(self, message): state = message["state"] presentation_exchange_id = message["presentation_exchange_id"] self.log( "Presentation: state =", state, ", presentation_exchange_id =", presentation_exchange_id, ) if state == "presentation_received": pass async def handle_basicmessages(self, message): self.log("Received message:", message["content"]) async def main(start_port: int, show_timing: bool = False): genesis = await default_genesis_txns() if not genesis: print("Error retrieving ledger genesis transactions") sys.exit(1) agent = None try: log_status("#1 Provision an agent and wallet, get back configuration details") agent = AerLingusAgent( start_port, start_port + 1, genesis_data=genesis, timing=show_timing ) await agent.listen_webhooks(start_port + 2) await agent.register_did() with log_timer("Startup duration:"): await agent.start_process() log_msg("Admin url is at:", agent.admin_url) log_msg("Endpoint url is at:", agent.endpoint) log_status("#3 Create a new schema on the ledger") with log_timer("Publish schema duration:"): pass async for option in prompt_loop( "(1) Issue Credential, (2) Send Proof Request, " + "(3) Send Message (X) Exit? [1/2/3/X] " ): if option in "xX": break elif option == "1": log_status("#13 Issue credential offer to X") elif option == "2": log_status("#20 Request proof of degree from alice") elif option == "3": msg = await prompt("Enter message: ") await agent.admin_POST( f"/connections/{agent.connection_id}/send-message", {"content": msg} ) if show_timing: timing = await agent.fetch_timing() if timing: for line in agent.format_timing(timing): log_msg(line) finally: terminated = True try: if agent: await agent.terminate() except Exception: LOGGER.exception("Error terminating agent:") terminated = False await asyncio.sleep(0.1) if not terminated: os._exit(1) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Runs an Aer Lingus demo agent.") parser.add_argument( "-p", "--port", type=int, default=8040, metavar=("<port>"), help="Choose the starting port number to listen on", ) parser.add_argument( "--timing", action="store_true", help="Enable timing information" ) args = parser.parse_args() require_indy() try: asyncio.get_event_loop().run_until_complete(main(args.port, args.timing)) except KeyboardInterrupt: os._exit(1)
true
true
f70850e2708d1f5805cad2a1aec3a5796e69da47
1,636
py
Python
src/moduels/gui/Combo_EngineList.py
HaujetZhao/Caps_Writer
f2b2038a2c0984a1d356f024cbac421fe594601a
[ "MIT" ]
234
2020-07-10T11:23:09.000Z
2022-03-31T09:41:40.000Z
src/moduels/gui/Combo_EngineList.py
HaujetZhao/Caps_Writer
f2b2038a2c0984a1d356f024cbac421fe594601a
[ "MIT" ]
9
2020-07-11T08:31:11.000Z
2022-03-01T04:30:08.000Z
src/moduels/gui/Combo_EngineList.py
HaujetZhao/Caps_Writer
f2b2038a2c0984a1d356f024cbac421fe594601a
[ "MIT" ]
23
2020-07-14T08:58:44.000Z
2022-03-17T06:38:10.000Z
# -*- coding: UTF-8 -*- import os, sqlite3 from PySide2.QtWidgets import QComboBox from moduels.component.NormalValue import 常量 # 添加预设对话框 class Combo_EngineList(QComboBox): def __init__(self): super().__init__() self.initElements() # 先初始化各个控件 self.initSlots() # 再将各个控件连接到信号槽 self.initLayouts() # 然后布局 self.initValues() # 再定义各个控件的值 def initElements(self): pass def initSlots(self): pass def initLayouts(self): pass def initValues(self): self.初始化列表() def mousePressEvent(self, e): self.列表更新() self.showPopup() def 初始化列表(self): self.列表项 = [] 数据库连接 = 常量.数据库连接 cursor = 数据库连接.cursor() result = cursor.execute(f'''select 引擎名称 from {常量.语音引擎表单名} order by id;''').fetchall() if len(result) != 0: for item in result: self.列表项.append(item[0]) self.addItems(self.列表项) # if not os.path.exists(常量.音效文件路径): os.makedirs(常量.音效文件路径) # with os.scandir(常量.音效文件路径) as 目录条目: # for entry in 目录条目: # if not entry.name.startswith('.') and entry.is_dir(): # self.列表项.append(entry.name) def 列表更新(self): 新列表 = [] 数据库连接 = 常量.数据库连接 cursor = 数据库连接.cursor() result = cursor.execute(f'''select 引擎名称 from {常量.语音引擎表单名} order by id;''').fetchall() if len(result) != 0: for item in result: 新列表.append(item[0]) if self.列表项 == 新列表: return True self.clear() self.列表项 = 新列表 self.addItems(self.列表项)
26.819672
93
0.556235
import os, sqlite3 from PySide2.QtWidgets import QComboBox from moduels.component.NormalValue import 常量 class Combo_EngineList(QComboBox): def __init__(self): super().__init__() self.initElements() self.initSlots() self.initLayouts() self.initValues() def initElements(self): pass def initSlots(self): pass def initLayouts(self): pass def initValues(self): self.初始化列表() def mousePressEvent(self, e): self.列表更新() self.showPopup() def 初始化列表(self): self.列表项 = [] 数据库连接 = 常量.数据库连接 cursor = 数据库连接.cursor() result = cursor.execute(f'''select 引擎名称 from {常量.语音引擎表单名} order by id;''').fetchall() if len(result) != 0: for item in result: self.列表项.append(item[0]) self.addItems(self.列表项) def 列表更新(self): 新列表 = [] 数据库连接 = 常量.数据库连接 cursor = 数据库连接.cursor() result = cursor.execute(f'''select 引擎名称 from {常量.语音引擎表单名} order by id;''').fetchall() if len(result) != 0: for item in result: 新列表.append(item[0]) if self.列表项 == 新列表: return True self.clear() self.列表项 = 新列表 self.addItems(self.列表项)
true
true
f7085159f268f42e1dab4ca0c38dc0a46286f7cd
4,538
py
Python
app/eg003_list_envelopes.py
AaronWDS/eg-03-python-auth-code-grant
6cb1694cbbb8cdbf166fda282e81886fd8380e5e
[ "MIT" ]
null
null
null
app/eg003_list_envelopes.py
AaronWDS/eg-03-python-auth-code-grant
6cb1694cbbb8cdbf166fda282e81886fd8380e5e
[ "MIT" ]
null
null
null
app/eg003_list_envelopes.py
AaronWDS/eg-03-python-auth-code-grant
6cb1694cbbb8cdbf166fda282e81886fd8380e5e
[ "MIT" ]
null
null
null
"""Example 003: List envelopes in the user's account""" from flask import render_template, url_for, redirect, session, flash, request from os import path import json from app import app, ds_config, views from datetime import datetime, timedelta from docusign_esign import * from docusign_esign.rest import ApiException eg = "eg003" # reference (and url) for this example def controller(): """Controller router using the HTTP method""" if request.method == 'GET': return get_controller() elif request.method == 'POST': return create_controller() else: return render_template('404.html'), 404 def create_controller(): """ 1. Check the token 2. Call the worker method 3. Show results """ minimum_buffer_min = 3 if views.ds_token_ok(minimum_buffer_min): # 2. Call the worker method args = { 'account_id': session['ds_account_id'], 'base_path': session['ds_base_path'], 'ds_access_token': session['ds_access_token'], } try: results = worker(args) except ApiException as err: error_body_json = err and hasattr(err, 'body') and err.body # we can pull the DocuSign error code and message from the response body error_body = json.loads(error_body_json) error_code = error_body and 'errorCode' in error_body and error_body['errorCode'] error_message = error_body and 'message' in error_body and error_body['message'] # In production, may want to provide customized error messages and # remediation advice to the user. return render_template('error.html', err=err, error_code=error_code, error_message=error_message ) return render_template("example_done.html", title="List envelopes results", h1="List envelopes results", message="Results from the Envelopes::listStatusChanges method:", json=json.dumps(json.dumps(results.to_dict())) ) else: flash('Sorry, you need to re-authenticate.') # We could store the parameters of the requested operation # so it could be restarted automatically. # But since it should be rare to have a token issue here, # we'll make the user re-enter the form data after # authentication. session['eg'] = url_for(eg) return redirect(url_for('ds_must_authenticate')) # ***DS.snippet.0.start def worker(args): """ 1. Call the envelope status change method to list the envelopes that have changed in the last 10 days """ # Exceptions will be caught by the calling function api_client = ApiClient() api_client.host = args['base_path'] api_client.set_default_header("Authorization", "Bearer " + args['ds_access_token']) envelope_api = EnvelopesApi(api_client) # The Envelopes::listStatusChanges method has many options # See https://developers.docusign.com/esign-rest-api/reference/Envelopes/Envelopes/listStatusChanges # The list status changes call requires at least a from_date OR # a set of envelopeIds. Here we filter using a from_date. # Here we set the from_date to filter envelopes for the last month # Use ISO 8601 date format from_date = (datetime.utcnow() - timedelta(days=10)).isoformat() results = envelope_api.list_status_changes(args['account_id'], from_date = from_date) return results # ***DS.snippet.0.end def get_controller(): """responds with the form for the example""" if views.ds_token_ok(): return render_template("eg003_list_envelopes.html", title="List changed envelopes", source_file=path.basename(__file__), source_url=ds_config.DS_CONFIG['github_example_url'] + path.basename(__file__), documentation=ds_config.DS_CONFIG['documentation'] + eg, show_doc=ds_config.DS_CONFIG['documentation'], ) else: # Save the current operation so it will be resumed after authentication session['eg'] = url_for(eg) return redirect(url_for('ds_must_authenticate'))
40.159292
110
0.617232
from flask import render_template, url_for, redirect, session, flash, request from os import path import json from app import app, ds_config, views from datetime import datetime, timedelta from docusign_esign import * from docusign_esign.rest import ApiException eg = "eg003" def controller(): if request.method == 'GET': return get_controller() elif request.method == 'POST': return create_controller() else: return render_template('404.html'), 404 def create_controller(): minimum_buffer_min = 3 if views.ds_token_ok(minimum_buffer_min): args = { 'account_id': session['ds_account_id'], 'base_path': session['ds_base_path'], 'ds_access_token': session['ds_access_token'], } try: results = worker(args) except ApiException as err: error_body_json = err and hasattr(err, 'body') and err.body error_body = json.loads(error_body_json) error_code = error_body and 'errorCode' in error_body and error_body['errorCode'] error_message = error_body and 'message' in error_body and error_body['message'] return render_template('error.html', err=err, error_code=error_code, error_message=error_message ) return render_template("example_done.html", title="List envelopes results", h1="List envelopes results", message="Results from the Envelopes::listStatusChanges method:", json=json.dumps(json.dumps(results.to_dict())) ) else: flash('Sorry, you need to re-authenticate.') # authentication. session['eg'] = url_for(eg) return redirect(url_for('ds_must_authenticate')) # ***DS.snippet.0.start def worker(args): # Exceptions will be caught by the calling function api_client = ApiClient() api_client.host = args['base_path'] api_client.set_default_header("Authorization", "Bearer " + args['ds_access_token']) envelope_api = EnvelopesApi(api_client) # The Envelopes::listStatusChanges method has many options # See https://developers.docusign.com/esign-rest-api/reference/Envelopes/Envelopes/listStatusChanges # The list status changes call requires at least a from_date OR # a set of envelopeIds. Here we filter using a from_date. # Here we set the from_date to filter envelopes for the last month # Use ISO 8601 date format from_date = (datetime.utcnow() - timedelta(days=10)).isoformat() results = envelope_api.list_status_changes(args['account_id'], from_date = from_date) return results # ***DS.snippet.0.end def get_controller(): if views.ds_token_ok(): return render_template("eg003_list_envelopes.html", title="List changed envelopes", source_file=path.basename(__file__), source_url=ds_config.DS_CONFIG['github_example_url'] + path.basename(__file__), documentation=ds_config.DS_CONFIG['documentation'] + eg, show_doc=ds_config.DS_CONFIG['documentation'], ) else: # Save the current operation so it will be resumed after authentication session['eg'] = url_for(eg) return redirect(url_for('ds_must_authenticate'))
true
true
f70851cea744e28815e15729e639c2a5608b56a3
6,283
py
Python
prov_vo/migrations/0006_add_useddescription_wasgeneratedbydescription.py
kristinriebe/django-prov-vo
5bd86eb58833fe591004e6ef431b2b3deae7a62c
[ "Apache-2.0" ]
1
2018-12-11T05:53:55.000Z
2018-12-11T05:53:55.000Z
prov_vo/migrations/0006_add_useddescription_wasgeneratedbydescription.py
kristinriebe/django-prov-vo
5bd86eb58833fe591004e6ef431b2b3deae7a62c
[ "Apache-2.0" ]
null
null
null
prov_vo/migrations/0006_add_useddescription_wasgeneratedbydescription.py
kristinriebe/django-prov-vo
5bd86eb58833fe591004e6ef431b2b3deae7a62c
[ "Apache-2.0" ]
1
2021-06-23T13:09:05.000Z
2021-06-23T13:09:05.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2018-01-22 12:07 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('prov_vo', '0005_add_activitydescription_entitydescription'), ] operations = [ migrations.CreateModel( name='UsedDescription', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('role', models.CharField(blank=True, max_length=128, null=True)), ('activityDescription', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.ActivityDescription')), ('entityDescription', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.EntityDescription')), ], ), migrations.CreateModel( name='WasGeneratedByDescription', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('role', models.CharField(blank=True, max_length=128, null=True)), ('activityDescription', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.ActivityDescription')), ('entityDescription', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.EntityDescription')), ], ), migrations.AlterField( model_name='entity', name='rights', field=models.CharField(blank=True, choices=[('voprov:public', 'voprov:public'), ('voprov:secure', 'voprov:secure'), ('voprov:proprietary', 'voprov:proprietary')], max_length=128, null=True), ), migrations.AlterField( model_name='hadmember', name='collection', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Collection'), ), migrations.AlterField( model_name='hadmember', name='entity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ecollection', to='prov_vo.Entity'), ), migrations.AlterField( model_name='hadstep', name='activity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='activityFlow', to='prov_vo.Activity'), ), migrations.AlterField( model_name='hadstep', name='activityFlow', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.ActivityFlow'), ), migrations.AlterField( model_name='used', name='activity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Activity'), ), migrations.AlterField( model_name='used', name='entity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Entity'), ), migrations.AlterField( model_name='wasassociatedwith', name='activity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Activity'), ), migrations.AlterField( model_name='wasassociatedwith', name='agent', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Agent'), ), migrations.AlterField( model_name='wasattributedto', name='agent', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Agent'), ), migrations.AlterField( model_name='wasattributedto', name='entity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Entity'), ), migrations.AlterField( model_name='wasderivedfrom', name='generatedEntity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Entity'), ), migrations.AlterField( model_name='wasderivedfrom', name='usedEntity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='generatedEntity', to='prov_vo.Entity'), ), migrations.AlterField( model_name='wasgeneratedby', name='activity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Activity'), ), migrations.AlterField( model_name='wasgeneratedby', name='entity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Entity'), ), migrations.AlterField( model_name='wasinformedby', name='informant', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='informed', to='prov_vo.Activity'), ), migrations.AlterField( model_name='wasinformedby', name='informed', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Activity'), ), migrations.AddField( model_name='used', name='description', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.UsedDescription'), ), migrations.AddField( model_name='wasgeneratedby', name='description', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.WasGeneratedByDescription'), ), ]
48.330769
202
0.629954
from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('prov_vo', '0005_add_activitydescription_entitydescription'), ] operations = [ migrations.CreateModel( name='UsedDescription', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('role', models.CharField(blank=True, max_length=128, null=True)), ('activityDescription', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.ActivityDescription')), ('entityDescription', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.EntityDescription')), ], ), migrations.CreateModel( name='WasGeneratedByDescription', fields=[ ('id', models.AutoField(primary_key=True, serialize=False)), ('role', models.CharField(blank=True, max_length=128, null=True)), ('activityDescription', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.ActivityDescription')), ('entityDescription', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.EntityDescription')), ], ), migrations.AlterField( model_name='entity', name='rights', field=models.CharField(blank=True, choices=[('voprov:public', 'voprov:public'), ('voprov:secure', 'voprov:secure'), ('voprov:proprietary', 'voprov:proprietary')], max_length=128, null=True), ), migrations.AlterField( model_name='hadmember', name='collection', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Collection'), ), migrations.AlterField( model_name='hadmember', name='entity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ecollection', to='prov_vo.Entity'), ), migrations.AlterField( model_name='hadstep', name='activity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='activityFlow', to='prov_vo.Activity'), ), migrations.AlterField( model_name='hadstep', name='activityFlow', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.ActivityFlow'), ), migrations.AlterField( model_name='used', name='activity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Activity'), ), migrations.AlterField( model_name='used', name='entity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Entity'), ), migrations.AlterField( model_name='wasassociatedwith', name='activity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Activity'), ), migrations.AlterField( model_name='wasassociatedwith', name='agent', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Agent'), ), migrations.AlterField( model_name='wasattributedto', name='agent', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Agent'), ), migrations.AlterField( model_name='wasattributedto', name='entity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Entity'), ), migrations.AlterField( model_name='wasderivedfrom', name='generatedEntity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Entity'), ), migrations.AlterField( model_name='wasderivedfrom', name='usedEntity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='generatedEntity', to='prov_vo.Entity'), ), migrations.AlterField( model_name='wasgeneratedby', name='activity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Activity'), ), migrations.AlterField( model_name='wasgeneratedby', name='entity', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Entity'), ), migrations.AlterField( model_name='wasinformedby', name='informant', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='informed', to='prov_vo.Activity'), ), migrations.AlterField( model_name='wasinformedby', name='informed', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.Activity'), ), migrations.AddField( model_name='used', name='description', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.UsedDescription'), ), migrations.AddField( model_name='wasgeneratedby', name='description', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='prov_vo.WasGeneratedByDescription'), ), ]
true
true
f7085209ff1d42f87982b89f116312cf7f730323
3,399
py
Python
findfine/trip/models.py
muchu1983/findfine
b2255db6327324e89b914fd93a81f7ea5eac6f64
[ "BSD-3-Clause" ]
1
2017-05-25T20:25:20.000Z
2017-05-25T20:25:20.000Z
findfine/trip/models.py
muchu1983/104_findfine
b2255db6327324e89b914fd93a81f7ea5eac6f64
[ "BSD-3-Clause" ]
null
null
null
findfine/trip/models.py
muchu1983/104_findfine
b2255db6327324e89b914fd93a81f7ea5eac6f64
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ Copyright (C) 2016, MuChu Hsu Contributed by Muchu Hsu (muchu1983@gmail.com) This file is part of BSD license <https://opensource.org/licenses/BSD-3-Clause> """ from django.db import models from account.models import UserAccount # 行程資料 class Trip(models.Model): #來源網站 strSource = models.CharField(max_length=255, null=False) #原始 URL strOriginUrl = models.CharField(db_index=True, max_length=255, null=False) #主要圖片 url strImageUrl = models.TextField(null=False) #更新狀態 (out-of-date, up-to-date) strUpdateStatus = models.CharField(max_length=255, null=True) #更新日期 dtUpdateTime = models.DateTimeField(null=True) #標題 strTitle = models.CharField(max_length=255, null=True) #地點 strLocation = models.TextField(null=True) #金額 (USD) intUsdCost = models.IntegerField(null=True) #導覽語言 strGuideLanguage = models.CharField(max_length=255, null=True) #評價星數 (1-5) intReviewStar = models.IntegerField(null=True) #評價訪客數 intReviewVisitor = models.IntegerField(null=True) #主要景點 strAttrations = models.CharField(max_length=255, null=True) #摘要 strIntroduction = models.TextField(null=True) #行程開始日期 dtDatetimeFrom = models.DateTimeField(null=True) #行程結束日期 dtDatetimeTo = models.DateTimeField(null=True) #行程總時數 (Hour) intDurationHour = models.IntegerField(null=True) #行程類型 strStyle = models.CharField(max_length=255, null=True) #特殊選項編號 intOption = models.IntegerField(null=True) #使用者偏好的行程 class FavoriteTrip(models.Model): #使用者帳號 ForeignKey fkUserAccount = models.ForeignKey(UserAccount, null=False, on_delete=models.CASCADE) #行程 ForeignKey fkTrip = models.ForeignKey(Trip, null=False, on_delete=models.CASCADE) #設定 (json 格式) strJsonSetting = models.TextField(null=True) #使用者自訂 行程規劃 class CustomizedTripPlan(models.Model): #使用者帳號 ForeignKey fkUserAccount = models.ForeignKey(UserAccount, null=False, on_delete=models.CASCADE) #行程規劃名稱 strName = models.CharField(max_length=255, null=True) #封面圖 strImageUrl = models.TextField(null=True) #完整行程規劃 開始日期 dtDatetimeFrom = models.DateTimeField(null=True) #完整行程規劃 結束日期 dtDatetimeTo = models.DateTimeField(null=True) #使用者自訂 行程規劃項目 class CustomizedTripPlanItem(models.Model): #行程規劃 ForeignKey fkCustomizedTripPlan = models.ForeignKey(CustomizedTripPlan, null=False, on_delete=models.CASCADE) #項目標題 strTitle = models.CharField(max_length=255, null=True) #原始 URL strOriginUrl = models.TextField(null=True) #主要圖片 url strImageUrl = models.TextField(null=True) #地點 strLocation = models.TextField(null=True) #金額 (USD) intUsdCost = models.IntegerField(null=True) #行程總時數 (Hour) intDurationHour = models.IntegerField(null=True) #註解 strComment = models.TextField(null=True) #經度 strLongitude = models.CharField(max_length=255, null=True) #緯度 strLatitude = models.CharField(max_length=255, null=True) #規劃開始日期 dtDatetimeFrom = models.DateTimeField(null=True) #規劃結束日期 dtDatetimeTo = models.DateTimeField(null=True) #匯率資料 class ExRate(models.Model): #貨幣名稱 strCurrencyName = models.CharField(max_length=255, null=False) #美金匯率 fUSDollar = models.FloatField(null=False) #更新日期 dtUpdateTime = models.DateTimeField(null=False)
31.766355
102
0.714034
from django.db import models from account.models import UserAccount class Trip(models.Model): strSource = models.CharField(max_length=255, null=False) strOriginUrl = models.CharField(db_index=True, max_length=255, null=False) strImageUrl = models.TextField(null=False) strUpdateStatus = models.CharField(max_length=255, null=True) dtUpdateTime = models.DateTimeField(null=True) strTitle = models.CharField(max_length=255, null=True) strLocation = models.TextField(null=True) intUsdCost = models.IntegerField(null=True) strGuideLanguage = models.CharField(max_length=255, null=True) intReviewStar = models.IntegerField(null=True) intReviewVisitor = models.IntegerField(null=True) strAttrations = models.CharField(max_length=255, null=True) strIntroduction = models.TextField(null=True) dtDatetimeFrom = models.DateTimeField(null=True) dtDatetimeTo = models.DateTimeField(null=True) intDurationHour = models.IntegerField(null=True) strStyle = models.CharField(max_length=255, null=True) intOption = models.IntegerField(null=True) class FavoriteTrip(models.Model): fkUserAccount = models.ForeignKey(UserAccount, null=False, on_delete=models.CASCADE) fkTrip = models.ForeignKey(Trip, null=False, on_delete=models.CASCADE) strJsonSetting = models.TextField(null=True) class CustomizedTripPlan(models.Model): fkUserAccount = models.ForeignKey(UserAccount, null=False, on_delete=models.CASCADE) strName = models.CharField(max_length=255, null=True) strImageUrl = models.TextField(null=True) dtDatetimeFrom = models.DateTimeField(null=True) dtDatetimeTo = models.DateTimeField(null=True) class CustomizedTripPlanItem(models.Model): fkCustomizedTripPlan = models.ForeignKey(CustomizedTripPlan, null=False, on_delete=models.CASCADE) strTitle = models.CharField(max_length=255, null=True) strOriginUrl = models.TextField(null=True) strImageUrl = models.TextField(null=True) strLocation = models.TextField(null=True) intUsdCost = models.IntegerField(null=True) intDurationHour = models.IntegerField(null=True) strComment = models.TextField(null=True) strLongitude = models.CharField(max_length=255, null=True) strLatitude = models.CharField(max_length=255, null=True) dtDatetimeFrom = models.DateTimeField(null=True) dtDatetimeTo = models.DateTimeField(null=True) class ExRate(models.Model): strCurrencyName = models.CharField(max_length=255, null=False) fUSDollar = models.FloatField(null=False) dtUpdateTime = models.DateTimeField(null=False)
true
true
f70852dd2aefe681a6e4b9449b993aa22610a832
5,612
py
Python
test/modules/http2/env.py
tititiou36/httpd
1348607c00ba58ce371f2f8ecb08abf610227043
[ "Apache-2.0" ]
2,529
2015-01-02T11:52:53.000Z
2022-03-30T19:54:27.000Z
test/modules/http2/env.py
tititiou36/httpd
1348607c00ba58ce371f2f8ecb08abf610227043
[ "Apache-2.0" ]
133
2015-04-21T05:50:45.000Z
2022-03-30T14:23:40.000Z
test/modules/http2/env.py
tititiou36/httpd
1348607c00ba58ce371f2f8ecb08abf610227043
[ "Apache-2.0" ]
1,113
2015-01-01T14:47:02.000Z
2022-03-29T16:47:18.000Z
import inspect import logging import os import re import subprocess from typing import Dict, Any from pyhttpd.certs import CertificateSpec from pyhttpd.conf import HttpdConf from pyhttpd.env import HttpdTestEnv, HttpdTestSetup log = logging.getLogger(__name__) class H2TestSetup(HttpdTestSetup): def __init__(self, env: 'HttpdTestEnv'): super().__init__(env=env) self.add_source_dir(os.path.dirname(inspect.getfile(H2TestSetup))) self.add_modules(["http2", "proxy_http2", "cgid", "autoindex", "ssl"]) def make(self): super().make() self._add_h2test() self._setup_data_1k_1m() def _add_h2test(self): local_dir = os.path.dirname(inspect.getfile(H2TestSetup)) p = subprocess.run([self.env.apxs, '-c', 'mod_h2test.c'], capture_output=True, cwd=os.path.join(local_dir, 'mod_h2test')) rv = p.returncode if rv != 0: log.error(f"compiling md_h2test failed: {p.stderr}") raise Exception(f"compiling md_h2test failed: {p.stderr}") modules_conf = os.path.join(self.env.server_dir, 'conf/modules.conf') with open(modules_conf, 'a') as fd: # load our test module which is not installed fd.write(f"LoadModule h2test_module \"{local_dir}/mod_h2test/.libs/mod_h2test.so\"\n") def _setup_data_1k_1m(self): s90 = "01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678\n" with open(os.path.join(self.env.gen_dir, "data-1k"), 'w') as f: for i in range(10): f.write(f"{i:09d}-{s90}") with open(os.path.join(self.env.gen_dir, "data-10k"), 'w') as f: for i in range(100): f.write(f"{i:09d}-{s90}") with open(os.path.join(self.env.gen_dir, "data-100k"), 'w') as f: for i in range(1000): f.write(f"{i:09d}-{s90}") with open(os.path.join(self.env.gen_dir, "data-1m"), 'w') as f: for i in range(10000): f.write(f"{i:09d}-{s90}") class H2TestEnv(HttpdTestEnv): def __init__(self, pytestconfig=None): super().__init__(pytestconfig=pytestconfig) self.add_httpd_conf([ "H2MinWorkers 1", "H2MaxWorkers 64", "Protocols h2 http/1.1 h2c", ]) self.add_httpd_log_modules(["http2", "proxy_http2", "h2test"]) self.add_cert_specs([ CertificateSpec(domains=[ f"push.{self._http_tld}", f"hints.{self._http_tld}", f"ssl.{self._http_tld}", f"pad0.{self._http_tld}", f"pad1.{self._http_tld}", f"pad2.{self._http_tld}", f"pad3.{self._http_tld}", f"pad8.{self._http_tld}", ]), CertificateSpec(domains=[f"noh2.{self.http_tld}"], key_type='rsa2048'), ]) self.httpd_error_log.set_ignored_lognos([ 'AH02032', 'AH01276', 'AH01630', 'AH00135', 'AH02261', # Re-negotiation handshake failed (our test_101) 'AH03490', # scoreboard full, happens on limit tests ]) self.httpd_error_log.add_ignored_patterns([ re.compile(r'.*malformed header from script \'hecho.py\': Bad header: x.*'), re.compile(r'.*:tls_post_process_client_hello:.*'), re.compile(r'.*:tls_process_client_certificate:.*'), re.compile(r'.*have incompatible TLS configurations.'), ]) def setup_httpd(self, setup: HttpdTestSetup = None): super().setup_httpd(setup=H2TestSetup(env=self)) class H2Conf(HttpdConf): def __init__(self, env: HttpdTestEnv, extras: Dict[str, Any] = None): super().__init__(env=env, extras=HttpdConf.merge_extras(extras, { f"cgi.{env.http_tld}": [ "SSLOptions +StdEnvVars", "AddHandler cgi-script .py", ] })) def start_vhost(self, domains, port=None, doc_root="htdocs", with_ssl=None, ssl_module=None, with_certificates=None): super().start_vhost(domains=domains, port=port, doc_root=doc_root, with_ssl=with_ssl, ssl_module=ssl_module, with_certificates=with_certificates) if f"noh2.{self.env.http_tld}" in domains: protos = ["http/1.1"] elif port == self.env.https_port or with_ssl is True: protos = ["h2", "http/1.1"] else: protos = ["h2c", "http/1.1"] if f"test2.{self.env.http_tld}" in domains: protos = reversed(protos) self.add(f"Protocols {' '.join(protos)}") return self def add_vhost_noh2(self): domains = [f"noh2.{self.env.http_tld}", f"noh2-alias.{self.env.http_tld}"] self.start_vhost(domains=domains, port=self.env.https_port, doc_root="htdocs/noh2") self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"]) self.end_vhost() self.start_vhost(domains=domains, port=self.env.http_port, doc_root="htdocs/noh2") self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"]) self.end_vhost() return self def add_vhost_test1(self, proxy_self=False, h2proxy_self=False): return super().add_vhost_test1(proxy_self=proxy_self, h2proxy_self=h2proxy_self) def add_vhost_test2(self): return super().add_vhost_test2()
39.521127
107
0.586066
import inspect import logging import os import re import subprocess from typing import Dict, Any from pyhttpd.certs import CertificateSpec from pyhttpd.conf import HttpdConf from pyhttpd.env import HttpdTestEnv, HttpdTestSetup log = logging.getLogger(__name__) class H2TestSetup(HttpdTestSetup): def __init__(self, env: 'HttpdTestEnv'): super().__init__(env=env) self.add_source_dir(os.path.dirname(inspect.getfile(H2TestSetup))) self.add_modules(["http2", "proxy_http2", "cgid", "autoindex", "ssl"]) def make(self): super().make() self._add_h2test() self._setup_data_1k_1m() def _add_h2test(self): local_dir = os.path.dirname(inspect.getfile(H2TestSetup)) p = subprocess.run([self.env.apxs, '-c', 'mod_h2test.c'], capture_output=True, cwd=os.path.join(local_dir, 'mod_h2test')) rv = p.returncode if rv != 0: log.error(f"compiling md_h2test failed: {p.stderr}") raise Exception(f"compiling md_h2test failed: {p.stderr}") modules_conf = os.path.join(self.env.server_dir, 'conf/modules.conf') with open(modules_conf, 'a') as fd: fd.write(f"LoadModule h2test_module \"{local_dir}/mod_h2test/.libs/mod_h2test.so\"\n") def _setup_data_1k_1m(self): s90 = "01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678\n" with open(os.path.join(self.env.gen_dir, "data-1k"), 'w') as f: for i in range(10): f.write(f"{i:09d}-{s90}") with open(os.path.join(self.env.gen_dir, "data-10k"), 'w') as f: for i in range(100): f.write(f"{i:09d}-{s90}") with open(os.path.join(self.env.gen_dir, "data-100k"), 'w') as f: for i in range(1000): f.write(f"{i:09d}-{s90}") with open(os.path.join(self.env.gen_dir, "data-1m"), 'w') as f: for i in range(10000): f.write(f"{i:09d}-{s90}") class H2TestEnv(HttpdTestEnv): def __init__(self, pytestconfig=None): super().__init__(pytestconfig=pytestconfig) self.add_httpd_conf([ "H2MinWorkers 1", "H2MaxWorkers 64", "Protocols h2 http/1.1 h2c", ]) self.add_httpd_log_modules(["http2", "proxy_http2", "h2test"]) self.add_cert_specs([ CertificateSpec(domains=[ f"push.{self._http_tld}", f"hints.{self._http_tld}", f"ssl.{self._http_tld}", f"pad0.{self._http_tld}", f"pad1.{self._http_tld}", f"pad2.{self._http_tld}", f"pad3.{self._http_tld}", f"pad8.{self._http_tld}", ]), CertificateSpec(domains=[f"noh2.{self.http_tld}"], key_type='rsa2048'), ]) self.httpd_error_log.set_ignored_lognos([ 'AH02032', 'AH01276', 'AH01630', 'AH00135', 'AH02261', 'AH03490', ]) self.httpd_error_log.add_ignored_patterns([ re.compile(r'.*malformed header from script \'hecho.py\': Bad header: x.*'), re.compile(r'.*:tls_post_process_client_hello:.*'), re.compile(r'.*:tls_process_client_certificate:.*'), re.compile(r'.*have incompatible TLS configurations.'), ]) def setup_httpd(self, setup: HttpdTestSetup = None): super().setup_httpd(setup=H2TestSetup(env=self)) class H2Conf(HttpdConf): def __init__(self, env: HttpdTestEnv, extras: Dict[str, Any] = None): super().__init__(env=env, extras=HttpdConf.merge_extras(extras, { f"cgi.{env.http_tld}": [ "SSLOptions +StdEnvVars", "AddHandler cgi-script .py", ] })) def start_vhost(self, domains, port=None, doc_root="htdocs", with_ssl=None, ssl_module=None, with_certificates=None): super().start_vhost(domains=domains, port=port, doc_root=doc_root, with_ssl=with_ssl, ssl_module=ssl_module, with_certificates=with_certificates) if f"noh2.{self.env.http_tld}" in domains: protos = ["http/1.1"] elif port == self.env.https_port or with_ssl is True: protos = ["h2", "http/1.1"] else: protos = ["h2c", "http/1.1"] if f"test2.{self.env.http_tld}" in domains: protos = reversed(protos) self.add(f"Protocols {' '.join(protos)}") return self def add_vhost_noh2(self): domains = [f"noh2.{self.env.http_tld}", f"noh2-alias.{self.env.http_tld}"] self.start_vhost(domains=domains, port=self.env.https_port, doc_root="htdocs/noh2") self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"]) self.end_vhost() self.start_vhost(domains=domains, port=self.env.http_port, doc_root="htdocs/noh2") self.add(["Protocols http/1.1", "SSLOptions +StdEnvVars"]) self.end_vhost() return self def add_vhost_test1(self, proxy_self=False, h2proxy_self=False): return super().add_vhost_test1(proxy_self=proxy_self, h2proxy_self=h2proxy_self) def add_vhost_test2(self): return super().add_vhost_test2()
true
true
f70858bd78db6d504be4468aaef1b4b5b49cd875
6,392
py
Python
models/GNN/GIN.py
mwcvitkovic/Supervised-Learning-on-Relational-Databases-with-GNNs
57195ccab62d23dcbcac1a317f8a9811a9fd6cb5
[ "MIT" ]
44
2020-02-07T12:44:25.000Z
2022-03-31T21:57:08.000Z
models/GNN/GIN.py
mwcvitkovic/Supervised-Learning-on-Relational-Databases-with-GNNs
57195ccab62d23dcbcac1a317f8a9811a9fd6cb5
[ "MIT" ]
2
2020-02-07T03:54:15.000Z
2020-05-07T13:21:29.000Z
models/GNN/GIN.py
mwcvitkovic/Supervised-Learning-on-Relational-Databases-with-GNNs
57195ccab62d23dcbcac1a317f8a9811a9fd6cb5
[ "MIT" ]
10
2020-02-23T07:34:55.000Z
2021-07-25T18:34:40.000Z
from dgl import BatchedDGLGraph from dgl.nn.pytorch.conv import GINConv from torch import nn from models.GNN.GNNModelBase import GNNModelBase from models.utils import TypeConditionalLinear class GIN(GNNModelBase): """ Graph Isomorphism Network as described in https://arxiv.org/pdf/1810.00826.pdf """ def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs): super().__init__(**kwargs) self.layers = nn.ModuleList() for _ in range(self.n_layers): apply_func_layers = sum( [[nn.Linear(self.hidden_dim, self.hidden_dim), self.get_act(), self.get_norm(self.hidden_dim), nn.Dropout(self.p_dropout)] for _ in range(n_apply_func_layers)], []) apply_func = nn.Sequential(*apply_func_layers) self.layers.append(GINConv(apply_func=apply_func, aggregator_type=aggregator_type, init_eps=init_eps, learn_eps=learn_eps)) def gnn_forward(self, g: BatchedDGLGraph): feats = g.ndata['h'] for layer in self.layers: feats = layer(graph=g, feat=feats) readout = self.readout(g, feats) out = self.fcout(readout) return out class RelationalGIN(GNNModelBase): """ Version of GIN that passes edge-type-conditional messages """ def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs): super().__init__(**kwargs) self.n_relations = 2 * len( self.db_info['edge_type_to_int']) - 1 # there are negative edge types for the reverse edges self.layers = nn.ModuleList() for _ in range(self.n_layers): apply_func_layers = sum( [[nn.Linear(self.hidden_dim, self.hidden_dim), self.get_act(), self.get_norm(self.hidden_dim), nn.Dropout(self.p_dropout)] for _ in range(n_apply_func_layers)], []) apply_func = nn.Sequential(*apply_func_layers) self.layers.append(RelationalGINConv(apply_func=apply_func, activation=self.get_act(), aggregator_type=aggregator_type, hidden_dim=self.hidden_dim, init_eps=init_eps, learn_eps=learn_eps, num_rels=self.n_relations)) def gnn_forward(self, g: BatchedDGLGraph): feats = g.ndata['h'] etypes = g.edata['edge_types'] + self.n_relations // 2 for layer in self.layers: feats = layer(graph=g, feat=feats, etypes=etypes) readout = self.readout(g, feats) out = self.fcout(readout) return out class RelationalGINConv(GINConv): def __init__(self, apply_func, activation, aggregator_type, hidden_dim, init_eps=0, learn_eps=False, num_rels=0): super().__init__(apply_func, aggregator_type, init_eps, learn_eps) self.num_rels = num_rels self.act = activation self.edge_message_layer = TypeConditionalLinear(hidden_dim, hidden_dim, num_rels) def message_func(self, edges): msg = edges.src['h'] msg = self.edge_message_layer(msg, edges.data['type']) msg = self.act(msg) return {'msg': msg} def forward(self, graph, feat, etypes): graph = graph.local_var() graph.ndata['h'] = feat graph.edata['type'] = etypes graph.update_all(self.message_func, self._reducer('msg', 'neigh')) rst = (1 + self.eps) * feat + graph.ndata['neigh'] if self.apply_func is not None: rst = self.apply_func(rst) return rst class ERGIN(RelationalGIN): """ GIN using different linear mappings for each node and edge type """ def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs): super().__init__(n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs) self.n_node_types = len(self.db_info['node_type_to_int']) self.act = self.get_act() self.layers = nn.ModuleList() self.apply_func_blocks = nn.ModuleList() for _ in range(self.n_layers): self.layers.append(RelationalGINConv(apply_func=None, activation=self.get_act(), aggregator_type=aggregator_type, hidden_dim=self.hidden_dim, init_eps=init_eps, learn_eps=learn_eps, num_rels=self.n_relations)) self.apply_func_blocks.append( nn.ModuleList([nn.ModuleDict({'tcl': TypeConditionalLinear(self.hidden_dim, self.hidden_dim, self.n_node_types), 'act': self.get_act(), 'norm': self.get_norm(self.hidden_dim), 'do': nn.Dropout(self.p_dropout) }) for _ in range(n_apply_func_layers)]) ) def gnn_forward(self, g: BatchedDGLGraph): feats = g.ndata['h'] ntypes = g.ndata['node_types'] etypes = g.edata['edge_types'] + self.n_relations // 2 for layer, apply_func_blocks in zip(self.layers, self.apply_func_blocks): feats = layer(graph=g, feat=feats, etypes=etypes) for block in apply_func_blocks: feats = block['tcl'](feats, ntypes) feats = block['act'](feats) feats = block['norm'](feats) feats = block['do'](feats) readout = self.readout(g, feats) out = self.fcout(readout) return out
44.082759
117
0.532697
from dgl import BatchedDGLGraph from dgl.nn.pytorch.conv import GINConv from torch import nn from models.GNN.GNNModelBase import GNNModelBase from models.utils import TypeConditionalLinear class GIN(GNNModelBase): def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs): super().__init__(**kwargs) self.layers = nn.ModuleList() for _ in range(self.n_layers): apply_func_layers = sum( [[nn.Linear(self.hidden_dim, self.hidden_dim), self.get_act(), self.get_norm(self.hidden_dim), nn.Dropout(self.p_dropout)] for _ in range(n_apply_func_layers)], []) apply_func = nn.Sequential(*apply_func_layers) self.layers.append(GINConv(apply_func=apply_func, aggregator_type=aggregator_type, init_eps=init_eps, learn_eps=learn_eps)) def gnn_forward(self, g: BatchedDGLGraph): feats = g.ndata['h'] for layer in self.layers: feats = layer(graph=g, feat=feats) readout = self.readout(g, feats) out = self.fcout(readout) return out class RelationalGIN(GNNModelBase): def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs): super().__init__(**kwargs) self.n_relations = 2 * len( self.db_info['edge_type_to_int']) - 1 self.layers = nn.ModuleList() for _ in range(self.n_layers): apply_func_layers = sum( [[nn.Linear(self.hidden_dim, self.hidden_dim), self.get_act(), self.get_norm(self.hidden_dim), nn.Dropout(self.p_dropout)] for _ in range(n_apply_func_layers)], []) apply_func = nn.Sequential(*apply_func_layers) self.layers.append(RelationalGINConv(apply_func=apply_func, activation=self.get_act(), aggregator_type=aggregator_type, hidden_dim=self.hidden_dim, init_eps=init_eps, learn_eps=learn_eps, num_rels=self.n_relations)) def gnn_forward(self, g: BatchedDGLGraph): feats = g.ndata['h'] etypes = g.edata['edge_types'] + self.n_relations // 2 for layer in self.layers: feats = layer(graph=g, feat=feats, etypes=etypes) readout = self.readout(g, feats) out = self.fcout(readout) return out class RelationalGINConv(GINConv): def __init__(self, apply_func, activation, aggregator_type, hidden_dim, init_eps=0, learn_eps=False, num_rels=0): super().__init__(apply_func, aggregator_type, init_eps, learn_eps) self.num_rels = num_rels self.act = activation self.edge_message_layer = TypeConditionalLinear(hidden_dim, hidden_dim, num_rels) def message_func(self, edges): msg = edges.src['h'] msg = self.edge_message_layer(msg, edges.data['type']) msg = self.act(msg) return {'msg': msg} def forward(self, graph, feat, etypes): graph = graph.local_var() graph.ndata['h'] = feat graph.edata['type'] = etypes graph.update_all(self.message_func, self._reducer('msg', 'neigh')) rst = (1 + self.eps) * feat + graph.ndata['neigh'] if self.apply_func is not None: rst = self.apply_func(rst) return rst class ERGIN(RelationalGIN): def __init__(self, n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs): super().__init__(n_apply_func_layers, aggregator_type, init_eps, learn_eps, **kwargs) self.n_node_types = len(self.db_info['node_type_to_int']) self.act = self.get_act() self.layers = nn.ModuleList() self.apply_func_blocks = nn.ModuleList() for _ in range(self.n_layers): self.layers.append(RelationalGINConv(apply_func=None, activation=self.get_act(), aggregator_type=aggregator_type, hidden_dim=self.hidden_dim, init_eps=init_eps, learn_eps=learn_eps, num_rels=self.n_relations)) self.apply_func_blocks.append( nn.ModuleList([nn.ModuleDict({'tcl': TypeConditionalLinear(self.hidden_dim, self.hidden_dim, self.n_node_types), 'act': self.get_act(), 'norm': self.get_norm(self.hidden_dim), 'do': nn.Dropout(self.p_dropout) }) for _ in range(n_apply_func_layers)]) ) def gnn_forward(self, g: BatchedDGLGraph): feats = g.ndata['h'] ntypes = g.ndata['node_types'] etypes = g.edata['edge_types'] + self.n_relations // 2 for layer, apply_func_blocks in zip(self.layers, self.apply_func_blocks): feats = layer(graph=g, feat=feats, etypes=etypes) for block in apply_func_blocks: feats = block['tcl'](feats, ntypes) feats = block['act'](feats) feats = block['norm'](feats) feats = block['do'](feats) readout = self.readout(g, feats) out = self.fcout(readout) return out
true
true
f7085a1fe5b988f68095a3af873e89d664908629
1,553
py
Python
chainerui/views/log.py
chainer/chainerui
91c5c26d9154a008079dbb0bcbf69b5590d105f7
[ "MIT" ]
185
2017-12-15T09:24:07.000Z
2022-01-20T11:20:13.000Z
chainerui/views/log.py
chainer/chainerui
91c5c26d9154a008079dbb0bcbf69b5590d105f7
[ "MIT" ]
191
2017-12-15T09:14:52.000Z
2022-02-17T14:09:19.000Z
chainerui/views/log.py
chainer/chainerui
91c5c26d9154a008079dbb0bcbf69b5590d105f7
[ "MIT" ]
29
2017-12-15T09:40:45.000Z
2022-03-13T11:21:11.000Z
import datetime from flask import jsonify from flask import request from flask.views import MethodView from chainerui.database import db from chainerui.models.log import Log from chainerui.models.project import Project from chainerui.models.result import Result class LogAPI(MethodView): def post(self, project_id=None, result_id=None): project = db.session.query(Project).filter_by(id=project_id).first() if project is None: return jsonify({ 'project': None, 'message': 'No interface defined for URL.' }), 404 result = db.session.query(Result).filter_by(id=result_id).first() if result is None: return jsonify({ 'result': None, 'message': 'No interface defined for URL.' }), 404 data = request.get_json() log_json = data.get('log') modified_at = log_json.get('modifiedAt', None) if modified_at is not None: result.log_modified_at = datetime.datetime.fromtimestamp( modified_at) log_values = log_json.get('values', []) reset = log_json.get('reset', False) if reset: result.logs = [] for value in log_values: result.logs.append(Log(value)) db.session.commit() return jsonify({ 'logs': { 'resultId': result.id, 'insertedLogCount': len(log_values), 'totalLogCount': len(result.logs) } })
30.45098
76
0.582743
import datetime from flask import jsonify from flask import request from flask.views import MethodView from chainerui.database import db from chainerui.models.log import Log from chainerui.models.project import Project from chainerui.models.result import Result class LogAPI(MethodView): def post(self, project_id=None, result_id=None): project = db.session.query(Project).filter_by(id=project_id).first() if project is None: return jsonify({ 'project': None, 'message': 'No interface defined for URL.' }), 404 result = db.session.query(Result).filter_by(id=result_id).first() if result is None: return jsonify({ 'result': None, 'message': 'No interface defined for URL.' }), 404 data = request.get_json() log_json = data.get('log') modified_at = log_json.get('modifiedAt', None) if modified_at is not None: result.log_modified_at = datetime.datetime.fromtimestamp( modified_at) log_values = log_json.get('values', []) reset = log_json.get('reset', False) if reset: result.logs = [] for value in log_values: result.logs.append(Log(value)) db.session.commit() return jsonify({ 'logs': { 'resultId': result.id, 'insertedLogCount': len(log_values), 'totalLogCount': len(result.logs) } })
true
true
f7085abad3d73b13460022c34a237ccc00a67785
5,529
py
Python
unblob/handlers/compression/lz4.py
IoT-Inspector/unblob
4a6c871dae6805a922e55d30a7925910dc6a4eda
[ "MIT" ]
17
2021-11-23T10:05:24.000Z
2022-03-10T15:40:41.000Z
unblob/handlers/compression/lz4.py
IoT-Inspector/unblob
4a6c871dae6805a922e55d30a7925910dc6a4eda
[ "MIT" ]
184
2021-11-22T12:25:05.000Z
2022-03-31T16:27:41.000Z
unblob/handlers/compression/lz4.py
IoT-Inspector/unblob
4a6c871dae6805a922e55d30a7925910dc6a4eda
[ "MIT" ]
2
2021-11-25T09:34:01.000Z
2022-02-18T00:14:23.000Z
""" LZ4 frame format definition: https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md """ import io from typing import Optional from lz4.block import decompress from structlog import get_logger from unblob.extractors import Command from ...file_utils import Endian, convert_int8, convert_int32 from ...models import File, Handler, HexString, ValidChunk logger = get_logger() SKIPPABLE_FRAMES_MAGIC = [0x184D2A50 + i for i in range(0, 16)] FRAME_MAGIC = 0x184D2204 LEGACY_FRAME_MAGIC = 0x184C2102 FRAME_MAGICS = SKIPPABLE_FRAMES_MAGIC + [FRAME_MAGIC] + [LEGACY_FRAME_MAGIC] _1BIT = 0x01 _2BITS = 0x03 END_MARK = 0x00000000 CONTENT_SIZE_LEN = 8 BLOCK_SIZE_LEN = ( FRAME_SIZE_LEN ) = BLOCK_CHECKSUM_LEN = CONTENT_CHECKSUM_LEN = MAGIC_LEN = DICTID_LEN = 4 FLG_LEN = BD_LEN = HC_LEN = 1 MAX_LEGACY_BLOCK_SIZE = 8 * 1024 * 1024 # 8 MB class FLG: """Represents the FLG field""" version: int = 0 block_independence: int = 0 block_checksum: int = 0 content_size: int = 0 content_checksum: int = 0 dictid: int = 0 def __init__(self, raw_flg: int): self.version = (raw_flg >> 6) & _2BITS self.block_independence = (raw_flg >> 5) & _1BIT self.block_checksum = (raw_flg >> 4) & _1BIT self.content_size = (raw_flg >> 3) & _1BIT self.content_checksum = (raw_flg >> 2) & _1BIT self.dictid = raw_flg & _1BIT def as_dict(self) -> dict: return { "version": self.version, "block_independence": self.block_independence, "block_checksum": self.block_checksum, "content_size": self.content_size, "content_checksum": self.content_checksum, "dictid": self.dictid, } class _LZ4HandlerBase(Handler): """A common base for all LZ4 formats.""" def _skip_magic_bytes(self, file: File): file.seek(MAGIC_LEN, io.SEEK_CUR) EXTRACTOR = Command("lz4", "--decompress", "{inpath}", "{outdir}/{infile}") class LegacyFrameHandler(_LZ4HandlerBase): NAME = "lz4_legacy" PATTERNS = [HexString("02 21 4C 18")] def calculate_chunk(self, file: File, start_offset: int) -> Optional[ValidChunk]: self._skip_magic_bytes(file) while True: # The last block is detected either because it is followed by the “EOF” (End of File) mark, # or because it is followed by a known Frame Magic Number. raw_bsize = file.read(BLOCK_SIZE_LEN) if raw_bsize == b"": # EOF break block_compressed_size = convert_int32(raw_bsize, Endian.LITTLE) if block_compressed_size in FRAME_MAGICS: # next magic, read too far file.seek(-4, io.SEEK_CUR) break compressed_block = file.read(block_compressed_size) uncompressed_block = decompress(compressed_block, MAX_LEGACY_BLOCK_SIZE) # See 'fixed block size' in https://android.googlesource.com/platform/external/lz4/+/HEAD/doc/lz4_Frame_format.md#legacy-frame if len(uncompressed_block) < MAX_LEGACY_BLOCK_SIZE: break end_offset = file.tell() return ValidChunk(start_offset=start_offset, end_offset=end_offset) class SkippableFrameHandler(_LZ4HandlerBase): """This can be anything, basically uncompressed data.""" NAME = "lz4_skippable" PATTERNS = [HexString("5? 2A 4D 18")] def calculate_chunk(self, file: File, start_offset: int) -> Optional[ValidChunk]: self._skip_magic_bytes(file) frame_size = convert_int32(file.read(FRAME_SIZE_LEN), Endian.LITTLE) file.seek(frame_size, io.SEEK_CUR) end_offset = file.tell() return ValidChunk(start_offset=start_offset, end_offset=end_offset) class DefaultFrameHandler(_LZ4HandlerBase): """This is the modern version, most frequently used.""" NAME = "lz4_default" PATTERNS = [HexString("04 22 4D 18")] def calculate_chunk( # noqa: C901 self, file: File, start_offset: int ) -> Optional[ValidChunk]: self._skip_magic_bytes(file) # 2. we parse the frame descriptor of dynamic size flg_bytes = file.read(FLG_LEN) raw_flg = convert_int8(flg_bytes, Endian.LITTLE) flg = FLG(raw_flg) logger.debug("Parsed FLG", **flg.as_dict()) # skip BD (max blocksize), only useful for decoders that needs to allocate memory file.seek(BD_LEN, io.SEEK_CUR) if flg.content_size: file.seek(CONTENT_SIZE_LEN, io.SEEK_CUR) if flg.dictid: file.seek(DICTID_LEN, io.SEEK_CUR) header_checksum = convert_int8(file.read(HC_LEN), Endian.LITTLE) logger.debug("Header checksum (HC) read", header_checksum=header_checksum) # 3. we read block by block until we hit the endmarker while True: block_size = convert_int32(file.read(BLOCK_SIZE_LEN), Endian.LITTLE) logger.debug("block_size", block_size=block_size) if block_size == END_MARK: break file.seek(block_size, io.SEEK_CUR) if flg.block_checksum: file.seek(BLOCK_CHECKSUM_LEN, io.SEEK_CUR) # 4. we reached the endmark (0x00000000) # 5. if frame descriptor mentions CRC, we add CRC if flg.content_checksum: file.seek(CONTENT_CHECKSUM_LEN, io.SEEK_CUR) end_offset = file.tell() return ValidChunk(start_offset=start_offset, end_offset=end_offset)
32.910714
138
0.657985
import io from typing import Optional from lz4.block import decompress from structlog import get_logger from unblob.extractors import Command from ...file_utils import Endian, convert_int8, convert_int32 from ...models import File, Handler, HexString, ValidChunk logger = get_logger() SKIPPABLE_FRAMES_MAGIC = [0x184D2A50 + i for i in range(0, 16)] FRAME_MAGIC = 0x184D2204 LEGACY_FRAME_MAGIC = 0x184C2102 FRAME_MAGICS = SKIPPABLE_FRAMES_MAGIC + [FRAME_MAGIC] + [LEGACY_FRAME_MAGIC] _1BIT = 0x01 _2BITS = 0x03 END_MARK = 0x00000000 CONTENT_SIZE_LEN = 8 BLOCK_SIZE_LEN = ( FRAME_SIZE_LEN ) = BLOCK_CHECKSUM_LEN = CONTENT_CHECKSUM_LEN = MAGIC_LEN = DICTID_LEN = 4 FLG_LEN = BD_LEN = HC_LEN = 1 MAX_LEGACY_BLOCK_SIZE = 8 * 1024 * 1024 class FLG: version: int = 0 block_independence: int = 0 block_checksum: int = 0 content_size: int = 0 content_checksum: int = 0 dictid: int = 0 def __init__(self, raw_flg: int): self.version = (raw_flg >> 6) & _2BITS self.block_independence = (raw_flg >> 5) & _1BIT self.block_checksum = (raw_flg >> 4) & _1BIT self.content_size = (raw_flg >> 3) & _1BIT self.content_checksum = (raw_flg >> 2) & _1BIT self.dictid = raw_flg & _1BIT def as_dict(self) -> dict: return { "version": self.version, "block_independence": self.block_independence, "block_checksum": self.block_checksum, "content_size": self.content_size, "content_checksum": self.content_checksum, "dictid": self.dictid, } class _LZ4HandlerBase(Handler): def _skip_magic_bytes(self, file: File): file.seek(MAGIC_LEN, io.SEEK_CUR) EXTRACTOR = Command("lz4", "--decompress", "{inpath}", "{outdir}/{infile}") class LegacyFrameHandler(_LZ4HandlerBase): NAME = "lz4_legacy" PATTERNS = [HexString("02 21 4C 18")] def calculate_chunk(self, file: File, start_offset: int) -> Optional[ValidChunk]: self._skip_magic_bytes(file) while True: raw_bsize = file.read(BLOCK_SIZE_LEN) if raw_bsize == b"": break block_compressed_size = convert_int32(raw_bsize, Endian.LITTLE) if block_compressed_size in FRAME_MAGICS: file.seek(-4, io.SEEK_CUR) break compressed_block = file.read(block_compressed_size) uncompressed_block = decompress(compressed_block, MAX_LEGACY_BLOCK_SIZE) if len(uncompressed_block) < MAX_LEGACY_BLOCK_SIZE: break end_offset = file.tell() return ValidChunk(start_offset=start_offset, end_offset=end_offset) class SkippableFrameHandler(_LZ4HandlerBase): NAME = "lz4_skippable" PATTERNS = [HexString("5? 2A 4D 18")] def calculate_chunk(self, file: File, start_offset: int) -> Optional[ValidChunk]: self._skip_magic_bytes(file) frame_size = convert_int32(file.read(FRAME_SIZE_LEN), Endian.LITTLE) file.seek(frame_size, io.SEEK_CUR) end_offset = file.tell() return ValidChunk(start_offset=start_offset, end_offset=end_offset) class DefaultFrameHandler(_LZ4HandlerBase): NAME = "lz4_default" PATTERNS = [HexString("04 22 4D 18")] def calculate_chunk( self, file: File, start_offset: int ) -> Optional[ValidChunk]: self._skip_magic_bytes(file) flg_bytes = file.read(FLG_LEN) raw_flg = convert_int8(flg_bytes, Endian.LITTLE) flg = FLG(raw_flg) logger.debug("Parsed FLG", **flg.as_dict()) file.seek(BD_LEN, io.SEEK_CUR) if flg.content_size: file.seek(CONTENT_SIZE_LEN, io.SEEK_CUR) if flg.dictid: file.seek(DICTID_LEN, io.SEEK_CUR) header_checksum = convert_int8(file.read(HC_LEN), Endian.LITTLE) logger.debug("Header checksum (HC) read", header_checksum=header_checksum) while True: block_size = convert_int32(file.read(BLOCK_SIZE_LEN), Endian.LITTLE) logger.debug("block_size", block_size=block_size) if block_size == END_MARK: break file.seek(block_size, io.SEEK_CUR) if flg.block_checksum: file.seek(BLOCK_CHECKSUM_LEN, io.SEEK_CUR) if flg.content_checksum: file.seek(CONTENT_CHECKSUM_LEN, io.SEEK_CUR) end_offset = file.tell() return ValidChunk(start_offset=start_offset, end_offset=end_offset)
true
true
f7085c13fbffd806d01c4d4dbad4ef478c69338d
901
py
Python
utils/image_converter.py
ml-research/Learning-to-Break-Deep-Perceptual-Hashing
12148e8ecd47faa1f816f52f56662c47cd240cc1
[ "MIT" ]
3
2021-11-28T06:18:58.000Z
2022-01-27T08:14:43.000Z
utils/image_converter.py
ml-research/Learning-to-Break-Deep-Perceptual-Hashing
12148e8ecd47faa1f816f52f56662c47cd240cc1
[ "MIT" ]
null
null
null
utils/image_converter.py
ml-research/Learning-to-Break-Deep-Perceptual-Hashing
12148e8ecd47faa1f816f52f56662c47cd240cc1
[ "MIT" ]
1
2021-12-25T10:51:21.000Z
2021-12-25T10:51:21.000Z
from PIL import Image import os import sys from os import listdir from os.path import isfile, join def main(): folder_path = sys.argv[1] output_folder_path = folder_path + '_png' try: os.mkdir(output_folder_path) except: if not os.listdir(output_folder_path): print('Folder {output_folder_path} already exists and is empty.') else: print( 'Folder {output_folder_path} already exists and is not empty.') files = [f for f in listdir(folder_path) if isfile(join(folder_path, f))] for file in files: file_path = join(folder_path, file) img = Image.open(file_path) output_filename = file.rsplit(sep='.', maxsplit=1)[0] + '.png' output_path = join(output_folder_path, output_filename) img.save(output_path, format='png', quality=100) if __name__ == "__main__": main()
27.30303
79
0.644839
from PIL import Image import os import sys from os import listdir from os.path import isfile, join def main(): folder_path = sys.argv[1] output_folder_path = folder_path + '_png' try: os.mkdir(output_folder_path) except: if not os.listdir(output_folder_path): print('Folder {output_folder_path} already exists and is empty.') else: print( 'Folder {output_folder_path} already exists and is not empty.') files = [f for f in listdir(folder_path) if isfile(join(folder_path, f))] for file in files: file_path = join(folder_path, file) img = Image.open(file_path) output_filename = file.rsplit(sep='.', maxsplit=1)[0] + '.png' output_path = join(output_folder_path, output_filename) img.save(output_path, format='png', quality=100) if __name__ == "__main__": main()
true
true
f7085e10c8d2e0b60f50b9c127218e92539ccc62
6,456
py
Python
src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinn_front_end_common/interface/interface_functions/front_end_common_edge_to_n_keys_mapper.py
Roboy/LSM_SpiNNaker_MyoArm
04fa1eaf78778edea3ba3afa4c527d20c491718e
[ "BSD-3-Clause" ]
2
2020-11-01T13:22:11.000Z
2020-11-01T13:22:20.000Z
src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinn_front_end_common/interface/interface_functions/front_end_common_edge_to_n_keys_mapper.py
Roboy/LSM_SpiNNaker_MyoArm
04fa1eaf78778edea3ba3afa4c527d20c491718e
[ "BSD-3-Clause" ]
null
null
null
src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinn_front_end_common/interface/interface_functions/front_end_common_edge_to_n_keys_mapper.py
Roboy/LSM_SpiNNaker_MyoArm
04fa1eaf78778edea3ba3afa4c527d20c491718e
[ "BSD-3-Clause" ]
null
null
null
# pacman imports from pacman.model.routing_info.\ dict_based_partitioned_partition_n_keys_map import \ DictBasedPartitionedPartitionNKeysMap # spinnMachine imports from spinn_machine.utilities.progress_bar import ProgressBar # front end common imports from spinn_front_end_common.abstract_models.\ abstract_provides_incoming_partition_constraints import \ AbstractProvidesIncomingPartitionConstraints from spinn_front_end_common.abstract_models.\ abstract_provides_n_keys_for_partition import \ AbstractProvidesNKeysForPartition from spinn_front_end_common.abstract_models.\ abstract_provides_outgoing_partition_constraints import \ AbstractProvidesOutgoingPartitionConstraints from spinn_front_end_common.utilities import exceptions class FrontEndCommonEdgeToNKeysMapper(object): """ Works out the number of keys needed for each edge """ def __call__(self, partitioned_graph, partitionable_graph=None, graph_mapper=None): # Generate an n_keys map for the graph and add constraints n_keys_map = DictBasedPartitionedPartitionNKeysMap() # generate progress bar progress_bar = ProgressBar( len(partitioned_graph.subvertices), "Deducing edge to number of keys map") # contains a partitionable vertex if partitionable_graph is not None and graph_mapper is not None: # iterate over each partition in the partitioned graph for vertex in partitioned_graph.subvertices: partitions = \ partitioned_graph.outgoing_edges_partitions_from_vertex( vertex) for partition_id in partitions: partition = partitions[partition_id] added_constraints = False constraints = self._process_partitionable_partition( partition, n_keys_map, partition_id, graph_mapper, partitionable_graph) if not added_constraints: partition.add_constraints(constraints) else: self._check_constraints_equal( constraints, partition.constraints) progress_bar.update() progress_bar.end() else: for vertex in partitioned_graph.subvertices: partitions = \ partitioned_graph.outgoing_edges_partitions_from_vertex( vertex) for partition_id in partitions: partition = partitions[partition_id] added_constraints = False constraints = self._process_partitioned_partition( partition, n_keys_map, partition_id, partitioned_graph) if not added_constraints: partition.add_constraints(constraints) else: self._check_constraints_equal( constraints, partition.constraints) progress_bar.update() progress_bar.end() return {'n_keys_map': n_keys_map} @staticmethod def _check_constraints_equal(constraints, stored_constraints): """ :param constraints: :param stored_constraints: :return: """ for constraint in constraints: if constraint not in stored_constraints: raise exceptions.ConfigurationException( "Two edges within the same partition have different " "constraints. This is deemed an error. Please fix and " "try again") @staticmethod def _process_partitionable_partition( partition, n_keys_map, partition_id, graph_mapper, partitionable_graph): partitioned_edge = partition.edges[0] vertex_slice = graph_mapper.get_subvertex_slice( partitioned_edge.pre_subvertex) edge = graph_mapper.get_partitionable_edge_from_partitioned_edge( partitioned_edge) if not isinstance(edge.pre_vertex, AbstractProvidesNKeysForPartition): n_keys_map.set_n_keys_for_partition( partition, vertex_slice.n_atoms) else: n_keys_map.set_n_keys_for_partition( partition, edge.pre_vertex.get_n_keys_for_partition( partition, graph_mapper)) constraints = list() if isinstance(edge.pre_vertex, AbstractProvidesOutgoingPartitionConstraints): constraints.extend( edge.pre_vertex.get_outgoing_partition_constraints( partition, graph_mapper)) if isinstance(edge.post_vertex, AbstractProvidesIncomingPartitionConstraints): constraints.extend( edge.post_vertex.get_incoming_partition_constraints( partition, graph_mapper)) constraints.extend( partitionable_graph.partition_from_vertex( edge.pre_vertex, partition_id).constraints) return constraints @staticmethod def _process_partitioned_partition( partition, n_keys_map, partition_id, partitioned_graph): edge = partition.edges[0] if not isinstance(edge.pre_subvertex, AbstractProvidesNKeysForPartition): n_keys_map.set_n_keys_for_partition(partition, 1) else: n_keys_map.set_n_keys_for_partition( partition, edge.pre_subvertex.get_n_keys_for_partition( partition, None)) constraints = list() if isinstance(edge.pre_subvertex, AbstractProvidesOutgoingPartitionConstraints): constraints.extend( edge.pre_subvertex.get_outgoing_partition_constraints( partition, None)) if isinstance(edge.post_subvertex, AbstractProvidesIncomingPartitionConstraints): constraints.extend( edge.post_subvertex.get_incoming_partition_constraints( partition, None)) constraints.extend( partitioned_graph.partition_from_vertex( edge.pre_subvertex, partition_id).constraints) return constraints
41.121019
79
0.632125
from pacman.model.routing_info.\ dict_based_partitioned_partition_n_keys_map import \ DictBasedPartitionedPartitionNKeysMap from spinn_machine.utilities.progress_bar import ProgressBar from spinn_front_end_common.abstract_models.\ abstract_provides_incoming_partition_constraints import \ AbstractProvidesIncomingPartitionConstraints from spinn_front_end_common.abstract_models.\ abstract_provides_n_keys_for_partition import \ AbstractProvidesNKeysForPartition from spinn_front_end_common.abstract_models.\ abstract_provides_outgoing_partition_constraints import \ AbstractProvidesOutgoingPartitionConstraints from spinn_front_end_common.utilities import exceptions class FrontEndCommonEdgeToNKeysMapper(object): def __call__(self, partitioned_graph, partitionable_graph=None, graph_mapper=None): n_keys_map = DictBasedPartitionedPartitionNKeysMap() progress_bar = ProgressBar( len(partitioned_graph.subvertices), "Deducing edge to number of keys map") if partitionable_graph is not None and graph_mapper is not None: for vertex in partitioned_graph.subvertices: partitions = \ partitioned_graph.outgoing_edges_partitions_from_vertex( vertex) for partition_id in partitions: partition = partitions[partition_id] added_constraints = False constraints = self._process_partitionable_partition( partition, n_keys_map, partition_id, graph_mapper, partitionable_graph) if not added_constraints: partition.add_constraints(constraints) else: self._check_constraints_equal( constraints, partition.constraints) progress_bar.update() progress_bar.end() else: for vertex in partitioned_graph.subvertices: partitions = \ partitioned_graph.outgoing_edges_partitions_from_vertex( vertex) for partition_id in partitions: partition = partitions[partition_id] added_constraints = False constraints = self._process_partitioned_partition( partition, n_keys_map, partition_id, partitioned_graph) if not added_constraints: partition.add_constraints(constraints) else: self._check_constraints_equal( constraints, partition.constraints) progress_bar.update() progress_bar.end() return {'n_keys_map': n_keys_map} @staticmethod def _check_constraints_equal(constraints, stored_constraints): for constraint in constraints: if constraint not in stored_constraints: raise exceptions.ConfigurationException( "Two edges within the same partition have different " "constraints. This is deemed an error. Please fix and " "try again") @staticmethod def _process_partitionable_partition( partition, n_keys_map, partition_id, graph_mapper, partitionable_graph): partitioned_edge = partition.edges[0] vertex_slice = graph_mapper.get_subvertex_slice( partitioned_edge.pre_subvertex) edge = graph_mapper.get_partitionable_edge_from_partitioned_edge( partitioned_edge) if not isinstance(edge.pre_vertex, AbstractProvidesNKeysForPartition): n_keys_map.set_n_keys_for_partition( partition, vertex_slice.n_atoms) else: n_keys_map.set_n_keys_for_partition( partition, edge.pre_vertex.get_n_keys_for_partition( partition, graph_mapper)) constraints = list() if isinstance(edge.pre_vertex, AbstractProvidesOutgoingPartitionConstraints): constraints.extend( edge.pre_vertex.get_outgoing_partition_constraints( partition, graph_mapper)) if isinstance(edge.post_vertex, AbstractProvidesIncomingPartitionConstraints): constraints.extend( edge.post_vertex.get_incoming_partition_constraints( partition, graph_mapper)) constraints.extend( partitionable_graph.partition_from_vertex( edge.pre_vertex, partition_id).constraints) return constraints @staticmethod def _process_partitioned_partition( partition, n_keys_map, partition_id, partitioned_graph): edge = partition.edges[0] if not isinstance(edge.pre_subvertex, AbstractProvidesNKeysForPartition): n_keys_map.set_n_keys_for_partition(partition, 1) else: n_keys_map.set_n_keys_for_partition( partition, edge.pre_subvertex.get_n_keys_for_partition( partition, None)) constraints = list() if isinstance(edge.pre_subvertex, AbstractProvidesOutgoingPartitionConstraints): constraints.extend( edge.pre_subvertex.get_outgoing_partition_constraints( partition, None)) if isinstance(edge.post_subvertex, AbstractProvidesIncomingPartitionConstraints): constraints.extend( edge.post_subvertex.get_incoming_partition_constraints( partition, None)) constraints.extend( partitioned_graph.partition_from_vertex( edge.pre_subvertex, partition_id).constraints) return constraints
true
true
f7086129b7bd3d12551eac468949f7252bcec980
6,286
py
Python
pdf_maker.py
jlvdb/the-wizz
21e88888472d2598a0db861aef31076078628b8e
[ "Apache-2.0" ]
null
null
null
pdf_maker.py
jlvdb/the-wizz
21e88888472d2598a0db861aef31076078628b8e
[ "Apache-2.0" ]
null
null
null
pdf_maker.py
jlvdb/the-wizz
21e88888472d2598a0db861aef31076078628b8e
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 """This code is the main access point for the majority of users of The-wiZZ. It takes an input subselection of a survey catalog, a The-wiZZ HDF5 data file, and matches the two together to create a resultant clustering redshift estimate that can then be turned into a redshift PDF. This code also takes care of any weighting of the objects with unknown redshift, redshift binning, bootstrapping errors, and output. See input_flags.py for a list of options or use --help from the command line. """ import numpy as np from the_wizz import core_utils from the_wizz import pdf_maker_utils from the_wizz import input_flags if __name__ == "__main__": print("") print("The-wiZZ has begun conjuring: running pdf maker...") # First we parse the command line for arguments as usual. See # input_flags.py for a full list of input arguments. args = input_flags.parse_input_pdf_args() input_flags.print_args(args) # Load the file containing all matched pairs of spectroscopic and # photometric objects. print("Loading unknown data...") unknown_data = core_utils.file_checker_loader(args.unknown_sample_file) # Now we figure out what kind of redshift binning we would like to have. # This will be one of the largest impacts on the signal to noise of the # measurement. Some rules of thumb are: # The narrower bins are in redshift the better. You are measuring a # correlation, the narrower the bin size in comoving distance the more # correlated things will be and thus increase the amplitude. Aka use # Groth/Pebbles[sic] scaling to your advantage. # For a spectroscopic sample that is selected for a specific redshift # range with few galaxies outside that range (eg DEEP2), adaptive binning # is recommended. This will keep a equal number spectra per redshift bin. # A good rule is to try to have about 100 spectra per redshift bin for max # signal to noise. # Linear binning is provided as a curtesy and is not nesassarly # recommended. It will not give the best signal to noise compared to # adaptive and has the same draw backs as adaptive is that the bias could # be changing oddly from bin to bin. It is recommended that the user try # adaptive and comoving spaced bins for the best results. Comoving returns # bins that are of equal comoving distance from the line of sight. We also # provide binning in equal ln(1 + z). This is for people who want a # comoving like binning but without the dependece on cosmology. It also # has the convienent property of giving errors that can be more easlily # compared the usual simga/(1 + z) error. print("Creating bins...") if args.z_binning_type[0] == 'linear': z_bin_edge_array = pdf_maker_utils._create_linear_redshift_bin_edges( args.z_min, args.z_max, args.z_n_bins) elif args.z_binning_type[0] == 'adaptive': z_bin_edge_array = pdf_maker_utils._create_adaptive_redshift_bin_edges( args.z_min, args.z_max, args.z_n_bins, pdf_maker.reference_redshift_array) elif args.z_binning_type[0] == 'comoving': z_bin_edge_array = pdf_maker_utils._create_comoving_redshift_bin_edges( args.z_min, args.z_max, args.z_n_bins) elif args.z_binning_type[0] == 'logspace': z_bin_edge_array = pdf_maker_utils._create_logspace_redshift_bin_edges( args.z_min, args.z_max, args.z_n_bins) elif args.z_binning_type[0] == 'file': z_bin_edge_array = np.loadtxt(args.z_binning_type[1])[:-1] else: print("Requested binning name invalid. Valid types are:") print("\tlinear: linear binning in redshift") print("\tadaptive: constant reference objects per redshift bin") print("\tcomoving: linear binning in comoving distance") print("\tfile: file providing the bin edges") print("Returning linear binning...") z_bin_edge_array = pdf_maker_utils._create_linear_redshift_bin_edges( args.z_min, args.z_max, args.z_n_bins) # This is where the heavy lifting happens. We create our PDF maker object # which will hold the pair file for use, calculate the over density per # redshift bin, and also store intermediary results for later use. # Before we can estimate the PDF, we must mask for the objects we want # to estimate the redshit of. These objects can be color selected, # photo-z selected, or any other object selection you would like. The code # line below turns the array of indices in the hdf5 pair file, into a # single density estimate around the reference object. print("Starting indices matcher...") pdf_maker = pdf_maker_utils.collapse_ids_to_single_estimate( args.input_pair_hdf5_file, args.pair_scale_name, unknown_data, args) # Before we calculated the pdfs, we want to know what the over densities # are in each of the regions calculated on the area we consider. print("Calculating region densities...") pdf_maker.compute_region_densities(z_bin_edge_array, args.z_max) if args.output_region_pickle_file is not None: pdf_maker.write_region_densities(args.output_region_pickle_file, args) # Now that we've "collapsed" the estimate around the reference object we # need to bin up the results in redshift and create our final PDF. print("Calculating pdf...") if args.bootstrap_samples is None: pdf_maker.compute_pdf_bootstrap(args.n_bootstrap) else: bootstrap_region_array = np.loadtxt(args.bootstrap_samples, dtype=np.int_) pdf_maker._compute_pdf_bootstrap(bootstrap_region_array) # Write individual bootstraps to file. if args.output_bootstraps_file is not None: pdf_maker.write_bootstrap_samples_to_ascii(args.output_bootstraps_file, args) # Now that we have the results. We just need to write them to file and we # are done. print("Writing...") output_file = core_utils.create_ascii_file(args.output_pdf_file_name, args) pdf_maker.write_pdf_to_ascii(output_file) output_file.close() print("Done!")
54.66087
79
0.715399
import numpy as np from the_wizz import core_utils from the_wizz import pdf_maker_utils from the_wizz import input_flags if __name__ == "__main__": print("") print("The-wiZZ has begun conjuring: running pdf maker...") args = input_flags.parse_input_pdf_args() input_flags.print_args(args) print("Loading unknown data...") unknown_data = core_utils.file_checker_loader(args.unknown_sample_file) print("Creating bins...") if args.z_binning_type[0] == 'linear': z_bin_edge_array = pdf_maker_utils._create_linear_redshift_bin_edges( args.z_min, args.z_max, args.z_n_bins) elif args.z_binning_type[0] == 'adaptive': z_bin_edge_array = pdf_maker_utils._create_adaptive_redshift_bin_edges( args.z_min, args.z_max, args.z_n_bins, pdf_maker.reference_redshift_array) elif args.z_binning_type[0] == 'comoving': z_bin_edge_array = pdf_maker_utils._create_comoving_redshift_bin_edges( args.z_min, args.z_max, args.z_n_bins) elif args.z_binning_type[0] == 'logspace': z_bin_edge_array = pdf_maker_utils._create_logspace_redshift_bin_edges( args.z_min, args.z_max, args.z_n_bins) elif args.z_binning_type[0] == 'file': z_bin_edge_array = np.loadtxt(args.z_binning_type[1])[:-1] else: print("Requested binning name invalid. Valid types are:") print("\tlinear: linear binning in redshift") print("\tadaptive: constant reference objects per redshift bin") print("\tcomoving: linear binning in comoving distance") print("\tfile: file providing the bin edges") print("Returning linear binning...") z_bin_edge_array = pdf_maker_utils._create_linear_redshift_bin_edges( args.z_min, args.z_max, args.z_n_bins) print("Starting indices matcher...") pdf_maker = pdf_maker_utils.collapse_ids_to_single_estimate( args.input_pair_hdf5_file, args.pair_scale_name, unknown_data, args) print("Calculating region densities...") pdf_maker.compute_region_densities(z_bin_edge_array, args.z_max) if args.output_region_pickle_file is not None: pdf_maker.write_region_densities(args.output_region_pickle_file, args) # need to bin up the results in redshift and create our final PDF. print("Calculating pdf...") if args.bootstrap_samples is None: pdf_maker.compute_pdf_bootstrap(args.n_bootstrap) else: bootstrap_region_array = np.loadtxt(args.bootstrap_samples, dtype=np.int_) pdf_maker._compute_pdf_bootstrap(bootstrap_region_array) # Write individual bootstraps to file. if args.output_bootstraps_file is not None: pdf_maker.write_bootstrap_samples_to_ascii(args.output_bootstraps_file, args) # Now that we have the results. We just need to write them to file and we # are done. print("Writing...") output_file = core_utils.create_ascii_file(args.output_pdf_file_name, args) pdf_maker.write_pdf_to_ascii(output_file) output_file.close() print("Done!")
true
true
f708620cad5142402febfbc989ae3b8a7258b902
2,648
py
Python
apps/shop/models.py
Nicolaad/onlineweb4
5942eaf907d6824d5384147627def9edefdb9946
[ "MIT" ]
null
null
null
apps/shop/models.py
Nicolaad/onlineweb4
5942eaf907d6824d5384147627def9edefdb9946
[ "MIT" ]
null
null
null
apps/shop/models.py
Nicolaad/onlineweb4
5942eaf907d6824d5384147627def9edefdb9946
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import uuid from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.core.validators import MinValueValidator from django.db import models from rest_framework.exceptions import NotAcceptable from apps.authentication.models import OnlineUser as User class Order(models.Model): content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey("content_type", "object_id") order_line = models.ForeignKey( "OrderLine", related_name="orders", on_delete=models.CASCADE ) # Price of product when paid price = models.DecimalField(max_digits=10, decimal_places=2, blank=True) # Quantity of products ordered quantity = models.PositiveIntegerField(validators=[MinValueValidator(1)]) def total_price(self): return self.content_object.price * self.quantity def reduce_stock(self): self.content_object.reduce_stock(self.quantity) def __str__(self): return str(self.content_object) class Meta: default_permissions = ("add", "change", "delete") class OrderLine(models.Model): user = models.ForeignKey(User, related_name="u", on_delete=models.CASCADE) datetime = models.DateTimeField(auto_now_add=True) paid = models.BooleanField(default=False) def count_orders(self): return sum((order.quantity for order in self.orders.all())) def subtotal(self): return sum((order.total_price() for order in self.orders.all())) def pay(self): if self.paid: return if self.subtotal() > self.user.saldo: self.delete() raise NotAcceptable("Insufficient funds") # Setting price for orders in case product price changes later for order in self.orders.all(): order.price = order.total_price() order.save() order.reduce_stock() self.user.saldo = self.user.saldo - self.subtotal() self.user.save() self.paid = True self.save() def __str__(self): return str(self.pk) class Meta: default_permissions = ("add", "change", "delete") class MagicToken(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) token = models.CharField("token", default=uuid.uuid4, max_length=36) data = models.TextField("data") created = models.DateTimeField("created", editable=False, auto_now_add=True) class Meta: default_permissions = ("add", "change", "delete")
31.152941
80
0.687689
import uuid from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.core.validators import MinValueValidator from django.db import models from rest_framework.exceptions import NotAcceptable from apps.authentication.models import OnlineUser as User class Order(models.Model): content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) object_id = models.PositiveIntegerField() content_object = GenericForeignKey("content_type", "object_id") order_line = models.ForeignKey( "OrderLine", related_name="orders", on_delete=models.CASCADE ) price = models.DecimalField(max_digits=10, decimal_places=2, blank=True) quantity = models.PositiveIntegerField(validators=[MinValueValidator(1)]) def total_price(self): return self.content_object.price * self.quantity def reduce_stock(self): self.content_object.reduce_stock(self.quantity) def __str__(self): return str(self.content_object) class Meta: default_permissions = ("add", "change", "delete") class OrderLine(models.Model): user = models.ForeignKey(User, related_name="u", on_delete=models.CASCADE) datetime = models.DateTimeField(auto_now_add=True) paid = models.BooleanField(default=False) def count_orders(self): return sum((order.quantity for order in self.orders.all())) def subtotal(self): return sum((order.total_price() for order in self.orders.all())) def pay(self): if self.paid: return if self.subtotal() > self.user.saldo: self.delete() raise NotAcceptable("Insufficient funds") for order in self.orders.all(): order.price = order.total_price() order.save() order.reduce_stock() self.user.saldo = self.user.saldo - self.subtotal() self.user.save() self.paid = True self.save() def __str__(self): return str(self.pk) class Meta: default_permissions = ("add", "change", "delete") class MagicToken(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) token = models.CharField("token", default=uuid.uuid4, max_length=36) data = models.TextField("data") created = models.DateTimeField("created", editable=False, auto_now_add=True) class Meta: default_permissions = ("add", "change", "delete")
true
true
f7086304767140feaa75fdfd87ef314fb7e3e72f
2,300
py
Python
isotope/run_tests.py
daixiang0/tools
06ec9d04b426e9620eebfe04d265b0ba16c16f2f
[ "Apache-2.0" ]
264
2018-02-19T05:29:09.000Z
2022-03-31T18:25:15.000Z
isotope/run_tests.py
daixiang0/tools
06ec9d04b426e9620eebfe04d265b0ba16c16f2f
[ "Apache-2.0" ]
976
2018-04-26T18:44:43.000Z
2022-03-31T21:46:37.000Z
isotope/run_tests.py
daixiang0/tools
06ec9d04b426e9620eebfe04d265b0ba16c16f2f
[ "Apache-2.0" ]
243
2018-01-22T21:06:17.000Z
2022-03-28T13:14:26.000Z
#!/usr/bin/env python3 # Copyright Istio Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging from runner import cluster, config as cfg, consts, entrypoint, mesh, pipeline def main(args: argparse.Namespace) -> None: log_level = getattr(logging, args.log_level) logging.basicConfig(level=log_level, format='%(levelname)s\t> %(message)s') config = cfg.from_toml_file(args.config_path) cluster.set_up_if_not_exists( config.cluster_project_id, config.cluster_name, config.cluster_zones, config.cluster_version, config.server_machine_type, config.server_disk_size_gb, config.server_num_nodes, config.client_machine_type, config.client_disk_size_gb) for topology_path in config.topology_paths: for env_name in config.environments: entrypoint_service_name = entrypoint.extract_name(topology_path) mesh_environment = mesh.for_state( env_name, entrypoint_service_name, consts.SERVICE_GRAPH_NAMESPACE, config, args.helm_values) pipeline.run(topology_path, mesh_environment, config.server_image, config.client_image, config.istio_archive_url, config.client_qps, config.client_duration, config.client_num_conc_conns, config.labels()) def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser() parser.add_argument('config_path', type=str) parser.add_argument('helm_values', type=str) parser.add_argument( '--log_level', type=str, choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'], default='DEBUG') return parser.parse_args() if __name__ == '__main__': args = parse_args() main(args)
37.096774
79
0.705217
import argparse import logging from runner import cluster, config as cfg, consts, entrypoint, mesh, pipeline def main(args: argparse.Namespace) -> None: log_level = getattr(logging, args.log_level) logging.basicConfig(level=log_level, format='%(levelname)s\t> %(message)s') config = cfg.from_toml_file(args.config_path) cluster.set_up_if_not_exists( config.cluster_project_id, config.cluster_name, config.cluster_zones, config.cluster_version, config.server_machine_type, config.server_disk_size_gb, config.server_num_nodes, config.client_machine_type, config.client_disk_size_gb) for topology_path in config.topology_paths: for env_name in config.environments: entrypoint_service_name = entrypoint.extract_name(topology_path) mesh_environment = mesh.for_state( env_name, entrypoint_service_name, consts.SERVICE_GRAPH_NAMESPACE, config, args.helm_values) pipeline.run(topology_path, mesh_environment, config.server_image, config.client_image, config.istio_archive_url, config.client_qps, config.client_duration, config.client_num_conc_conns, config.labels()) def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser() parser.add_argument('config_path', type=str) parser.add_argument('helm_values', type=str) parser.add_argument( '--log_level', type=str, choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'], default='DEBUG') return parser.parse_args() if __name__ == '__main__': args = parse_args() main(args)
true
true
f70863342fef175da268d31958ec978288c8b856
4,119
py
Python
django/contrib/sitemaps/__init__.py
egenerat/gae-django
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
[ "MIT" ]
3
2016-07-08T23:49:32.000Z
2018-04-15T22:55:01.000Z
django/contrib/sitemaps/__init__.py
egenerat/gae-django
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
[ "MIT" ]
27
2017-02-05T15:57:04.000Z
2018-04-15T22:57:26.000Z
django/contrib/sitemaps/__init__.py
egenerat/gae-django
f12379483cf3917ed3cb46ca5ff0b94daf89fc50
[ "MIT" ]
null
null
null
from django.contrib.sites.models import Site, get_current_site from django.core import urlresolvers, paginator from django.core.exceptions import ImproperlyConfigured import urllib PING_URL = "http://www.google.com/webmasters/tools/ping" class SitemapNotFound(Exception): pass def ping_google(sitemap_url=None, ping_url=PING_URL): """ Alerts Google that the sitemap for the current site has been updated. If sitemap_url is provided, it should be an absolute path to the sitemap for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this function will attempt to deduce it by using urlresolvers.reverse(). """ if sitemap_url is None: try: # First, try to get the "index" sitemap URL. sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.index') except urlresolvers.NoReverseMatch: try: # Next, try for the "global" sitemap URL. sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap') except urlresolvers.NoReverseMatch: pass if sitemap_url is None: raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.") from django.contrib.sites.models import Site current_site = Site.objects.get_current() url = "http://%s%s" % (current_site.domain, sitemap_url) params = urllib.urlencode({'sitemap':url}) urllib.urlopen("%s?%s" % (ping_url, params)) class Sitemap(object): # This limit is defined by Google. See the index documentation at # http://sitemaps.org/protocol.php#index. limit = 50000 def __get(self, name, obj, default=None): try: attr = getattr(self, name) except AttributeError: return default if callable(attr): return attr(obj) return attr def items(self): return [] def location(self, obj): return obj.get_absolute_url() def _get_paginator(self): if not hasattr(self, "_paginator"): self._paginator = paginator.Paginator(self.items(), self.limit) return self._paginator paginator = property(_get_paginator) def get_urls(self, page=1, site=None): if site is None: if Site._meta.installed: try: site = Site.objects.get_current() except Site.DoesNotExist: pass if site is None: raise ImproperlyConfigured("In order to use Sitemaps you must either use the sites framework or pass in a Site or RequestSite object in your view code.") urls = [] for item in self.paginator.page(page).object_list: loc = "http://%s%s" % (site.domain, self.__get('location', item)) priority = self.__get('priority', item, None) url_info = { 'location': loc, 'lastmod': self.__get('lastmod', item, None), 'changefreq': self.__get('changefreq', item, None), 'priority': str(priority is not None and priority or '') } urls.append(url_info) return urls class FlatPageSitemap(Sitemap): def items(self): current_site = Site.objects.get_current() return current_site.flatpage_set.filter(registration_required=False) class GenericSitemap(Sitemap): priority = None changefreq = None def __init__(self, info_dict, priority=None, changefreq=None): self.queryset = info_dict['queryset'] self.date_field = info_dict.get('date_field', None) self.priority = priority self.changefreq = changefreq def items(self): # Make sure to return a clone; we don't want premature evaluation. return self.queryset.filter() def lastmod(self, item): if self.date_field is not None: return getattr(item, self.date_field) return None
37.445455
170
0.616655
from django.contrib.sites.models import Site, get_current_site from django.core import urlresolvers, paginator from django.core.exceptions import ImproperlyConfigured import urllib PING_URL = "http://www.google.com/webmasters/tools/ping" class SitemapNotFound(Exception): pass def ping_google(sitemap_url=None, ping_url=PING_URL): if sitemap_url is None: try: sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.index') except urlresolvers.NoReverseMatch: try: sitemap_url = urlresolvers.reverse('django.contrib.sitemaps.views.sitemap') except urlresolvers.NoReverseMatch: pass if sitemap_url is None: raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.") from django.contrib.sites.models import Site current_site = Site.objects.get_current() url = "http://%s%s" % (current_site.domain, sitemap_url) params = urllib.urlencode({'sitemap':url}) urllib.urlopen("%s?%s" % (ping_url, params)) class Sitemap(object): limit = 50000 def __get(self, name, obj, default=None): try: attr = getattr(self, name) except AttributeError: return default if callable(attr): return attr(obj) return attr def items(self): return [] def location(self, obj): return obj.get_absolute_url() def _get_paginator(self): if not hasattr(self, "_paginator"): self._paginator = paginator.Paginator(self.items(), self.limit) return self._paginator paginator = property(_get_paginator) def get_urls(self, page=1, site=None): if site is None: if Site._meta.installed: try: site = Site.objects.get_current() except Site.DoesNotExist: pass if site is None: raise ImproperlyConfigured("In order to use Sitemaps you must either use the sites framework or pass in a Site or RequestSite object in your view code.") urls = [] for item in self.paginator.page(page).object_list: loc = "http://%s%s" % (site.domain, self.__get('location', item)) priority = self.__get('priority', item, None) url_info = { 'location': loc, 'lastmod': self.__get('lastmod', item, None), 'changefreq': self.__get('changefreq', item, None), 'priority': str(priority is not None and priority or '') } urls.append(url_info) return urls class FlatPageSitemap(Sitemap): def items(self): current_site = Site.objects.get_current() return current_site.flatpage_set.filter(registration_required=False) class GenericSitemap(Sitemap): priority = None changefreq = None def __init__(self, info_dict, priority=None, changefreq=None): self.queryset = info_dict['queryset'] self.date_field = info_dict.get('date_field', None) self.priority = priority self.changefreq = changefreq def items(self): return self.queryset.filter() def lastmod(self, item): if self.date_field is not None: return getattr(item, self.date_field) return None
true
true
f70863889847fb1a163c7bc0a443fdebd98b7b02
10,304
py
Python
src/tools/autoware_auto_avp_demo/launch/ms3_core.launch.py
rubis-lab/autoware_rubis
498ec5ff4c448d456fa0c6fe2f17e02fbd13ddb9
[ "Apache-2.0" ]
null
null
null
src/tools/autoware_auto_avp_demo/launch/ms3_core.launch.py
rubis-lab/autoware_rubis
498ec5ff4c448d456fa0c6fe2f17e02fbd13ddb9
[ "Apache-2.0" ]
null
null
null
src/tools/autoware_auto_avp_demo/launch/ms3_core.launch.py
rubis-lab/autoware_rubis
498ec5ff4c448d456fa0c6fe2f17e02fbd13ddb9
[ "Apache-2.0" ]
null
null
null
# Copyright 2020-2021, The Autoware Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # #    http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Launch Modules for Milestone 3 of the AVP 2020 Demo.""" from ament_index_python import get_package_share_directory from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.actions import IncludeLaunchDescription from launch import LaunchDescription from launch.actions import DeclareLaunchArgument from launch.conditions import IfCondition from launch.substitutions import LaunchConfiguration from launch_ros.actions import Node import os def generate_launch_description(): """ Launch all nodes defined in the architecture for Milestone 3 of the AVP 2020 Demo. More details about what is included can be found at https://gitlab.com/autowarefoundation/autoware.auto/AutowareAuto/-/milestones/25. """ avp_demo_pkg_prefix = get_package_share_directory('autoware_auto_avp_demo') euclidean_cluster_param_file = os.path.join( avp_demo_pkg_prefix, 'param/euclidean_cluster.param.yaml') ray_ground_classifier_param_file = os.path.join( avp_demo_pkg_prefix, 'param/ray_ground_classifier.param.yaml') scan_downsampler_param_file = os.path.join( avp_demo_pkg_prefix, 'param/scan_downsampler_ms3.param.yaml') lanelet2_map_provider_param_file = os.path.join( avp_demo_pkg_prefix, 'param/lanelet2_map_provider.param.yaml') lane_planner_param_file = os.path.join( avp_demo_pkg_prefix, 'param/lane_planner.param.yaml') parking_planner_param_file = os.path.join( avp_demo_pkg_prefix, 'param/parking_planner.param.yaml') object_collision_estimator_param_file = os.path.join( avp_demo_pkg_prefix, 'param/object_collision_estimator.param.yaml') behavior_planner_param_file = os.path.join( avp_demo_pkg_prefix, 'param/behavior_planner.param.yaml') off_map_obstacles_filter_param_file = os.path.join( avp_demo_pkg_prefix, 'param/off_map_obstacles_filter.param.yaml') point_cloud_fusion_node_pkg_prefix = get_package_share_directory( 'point_cloud_fusion_nodes') # Arguments euclidean_cluster_param = DeclareLaunchArgument( 'euclidean_cluster_param_file', default_value=euclidean_cluster_param_file, description='Path to config file for Euclidean Clustering' ) ray_ground_classifier_param = DeclareLaunchArgument( 'ray_ground_classifier_param_file', default_value=ray_ground_classifier_param_file, description='Path to config file for Ray Ground Classifier' ) with_obstacles_param = DeclareLaunchArgument( 'with_obstacles', default_value='True', description='Enable obstacle detection' ) scan_downsampler_param = DeclareLaunchArgument( 'scan_downsampler_param_file', default_value=scan_downsampler_param_file, description='Path to config file for lidar scan downsampler' ) lanelet2_map_provider_param = DeclareLaunchArgument( 'lanelet2_map_provider_param_file', default_value=lanelet2_map_provider_param_file, description='Path to parameter file for Lanelet2 Map Provider' ) lane_planner_param = DeclareLaunchArgument( 'lane_planner_param_file', default_value=lane_planner_param_file, description='Path to parameter file for lane planner' ) parking_planner_param = DeclareLaunchArgument( 'parking_planner_param_file', default_value=parking_planner_param_file, description='Path to parameter file for parking planner' ) object_collision_estimator_param = DeclareLaunchArgument( 'object_collision_estimator_param_file', default_value=object_collision_estimator_param_file, description='Path to parameter file for object collision estimator' ) behavior_planner_param = DeclareLaunchArgument( 'behavior_planner_param_file', default_value=behavior_planner_param_file, description='Path to parameter file for behavior planner' ) off_map_obstacles_filter_param = DeclareLaunchArgument( 'off_map_obstacles_filter_param_file', default_value=off_map_obstacles_filter_param_file, description='Path to parameter file for off-map obstacle filter' ) # Nodes euclidean_clustering = Node( package='euclidean_cluster_nodes', executable='euclidean_cluster_node_exe', namespace='perception', condition=IfCondition(LaunchConfiguration('with_obstacles')), parameters=[LaunchConfiguration('euclidean_cluster_param_file')], remappings=[ ("points_in", "points_nonground") ] ) # point cloud fusion runner to fuse front and rear lidar point_cloud_fusion_node = IncludeLaunchDescription( PythonLaunchDescriptionSource( os.path.join(point_cloud_fusion_node_pkg_prefix, 'launch/vlp16_sim_lexus_pc_fusion.launch.py')) ) ray_ground_classifier = Node( package='ray_ground_classifier_nodes', executable='ray_ground_classifier_cloud_node_exe', namespace='perception', condition=IfCondition(LaunchConfiguration('with_obstacles')), parameters=[LaunchConfiguration('ray_ground_classifier_param_file')], remappings=[("points_in", "/lidars/points_fused")] ) scan_downsampler = Node( package='voxel_grid_nodes', executable='voxel_grid_node_exe', namespace='lidars', name='voxel_grid_cloud_node', parameters=[LaunchConfiguration('scan_downsampler_param_file')], remappings=[ ("points_in", "points_fused"), ("points_downsampled", "points_fused_downsampled") ] ) lanelet2_map_provider = Node( package='lanelet2_map_provider', executable='lanelet2_map_provider_exe', namespace='had_maps', name='lanelet2_map_provider_node', parameters=[LaunchConfiguration('lanelet2_map_provider_param_file')] ) lanelet2_map_visualizer = Node( package='lanelet2_map_provider', executable='lanelet2_map_visualizer_exe', name='lanelet2_map_visualizer_node', namespace='had_maps' ) global_planner = Node( package='lanelet2_global_planner_nodes', name='lanelet2_global_planner_node', namespace='planning', executable='lanelet2_global_planner_node_exe', remappings=[('HAD_Map_Client', '/had_maps/HAD_Map_Service'), ('vehicle_kinematic_state', '/vehicle/vehicle_kinematic_state')] ) lane_planner = Node( package='lane_planner_nodes', name='lane_planner_node', namespace='planning', executable='lane_planner_node_exe', parameters=[LaunchConfiguration('lane_planner_param_file')], remappings=[('HAD_Map_Service', '/had_maps/HAD_Map_Service')] ) parking_planner = Node( package='parking_planner_nodes', name='parking_planner_node', namespace='planning', executable='parking_planner_node_exe', parameters=[LaunchConfiguration('parking_planner_param_file')], remappings=[('HAD_Map_Service', '/had_maps/HAD_Map_Service')] ) object_collision_estimator = Node( package='object_collision_estimator_nodes', name='object_collision_estimator_node', namespace='planning', executable='object_collision_estimator_node_exe', condition=IfCondition(LaunchConfiguration('with_obstacles')), parameters=[LaunchConfiguration('object_collision_estimator_param_file')], remappings=[ ('obstacle_topic', '/perception/lidar_bounding_boxes_filtered'), ] ) behavior_planner = Node( package='behavior_planner_nodes', name='behavior_planner_node', namespace='planning', executable='behavior_planner_node_exe', parameters=[ LaunchConfiguration('behavior_planner_param_file'), {'enable_object_collision_estimator': LaunchConfiguration('with_obstacles')} ], output='screen', remappings=[ ('HAD_Map_Service', '/had_maps/HAD_Map_Service'), ('vehicle_state', '/vehicle/vehicle_kinematic_state'), ('route', 'global_path'), ('vehicle_state_report', '/vehicle/state_report'), ('vehicle_state_command', '/vehicle/state_command') ] ) off_map_obstacles_filter = Node( package='off_map_obstacles_filter_nodes', name='off_map_obstacles_filter_node', namespace='perception', executable='off_map_obstacles_filter_nodes_exe', parameters=[LaunchConfiguration('off_map_obstacles_filter_param_file')], output='screen', remappings=[ ('bounding_boxes_in', 'lidar_bounding_boxes'), ('bounding_boxes_out', 'lidar_bounding_boxes_filtered'), ('HAD_Map_Service', '/had_maps/HAD_Map_Service'), ] ) return LaunchDescription([ euclidean_cluster_param, ray_ground_classifier_param, scan_downsampler_param, with_obstacles_param, lanelet2_map_provider_param, lane_planner_param, parking_planner_param, object_collision_estimator_param, behavior_planner_param, off_map_obstacles_filter_param, euclidean_clustering, ray_ground_classifier, scan_downsampler, point_cloud_fusion_node, lanelet2_map_provider, lanelet2_map_visualizer, global_planner, lane_planner, parking_planner, object_collision_estimator, behavior_planner, off_map_obstacles_filter, ])
40.566929
97
0.712151
from ament_index_python import get_package_share_directory from launch.launch_description_sources import PythonLaunchDescriptionSource from launch.actions import IncludeLaunchDescription from launch import LaunchDescription from launch.actions import DeclareLaunchArgument from launch.conditions import IfCondition from launch.substitutions import LaunchConfiguration from launch_ros.actions import Node import os def generate_launch_description(): avp_demo_pkg_prefix = get_package_share_directory('autoware_auto_avp_demo') euclidean_cluster_param_file = os.path.join( avp_demo_pkg_prefix, 'param/euclidean_cluster.param.yaml') ray_ground_classifier_param_file = os.path.join( avp_demo_pkg_prefix, 'param/ray_ground_classifier.param.yaml') scan_downsampler_param_file = os.path.join( avp_demo_pkg_prefix, 'param/scan_downsampler_ms3.param.yaml') lanelet2_map_provider_param_file = os.path.join( avp_demo_pkg_prefix, 'param/lanelet2_map_provider.param.yaml') lane_planner_param_file = os.path.join( avp_demo_pkg_prefix, 'param/lane_planner.param.yaml') parking_planner_param_file = os.path.join( avp_demo_pkg_prefix, 'param/parking_planner.param.yaml') object_collision_estimator_param_file = os.path.join( avp_demo_pkg_prefix, 'param/object_collision_estimator.param.yaml') behavior_planner_param_file = os.path.join( avp_demo_pkg_prefix, 'param/behavior_planner.param.yaml') off_map_obstacles_filter_param_file = os.path.join( avp_demo_pkg_prefix, 'param/off_map_obstacles_filter.param.yaml') point_cloud_fusion_node_pkg_prefix = get_package_share_directory( 'point_cloud_fusion_nodes') euclidean_cluster_param = DeclareLaunchArgument( 'euclidean_cluster_param_file', default_value=euclidean_cluster_param_file, description='Path to config file for Euclidean Clustering' ) ray_ground_classifier_param = DeclareLaunchArgument( 'ray_ground_classifier_param_file', default_value=ray_ground_classifier_param_file, description='Path to config file for Ray Ground Classifier' ) with_obstacles_param = DeclareLaunchArgument( 'with_obstacles', default_value='True', description='Enable obstacle detection' ) scan_downsampler_param = DeclareLaunchArgument( 'scan_downsampler_param_file', default_value=scan_downsampler_param_file, description='Path to config file for lidar scan downsampler' ) lanelet2_map_provider_param = DeclareLaunchArgument( 'lanelet2_map_provider_param_file', default_value=lanelet2_map_provider_param_file, description='Path to parameter file for Lanelet2 Map Provider' ) lane_planner_param = DeclareLaunchArgument( 'lane_planner_param_file', default_value=lane_planner_param_file, description='Path to parameter file for lane planner' ) parking_planner_param = DeclareLaunchArgument( 'parking_planner_param_file', default_value=parking_planner_param_file, description='Path to parameter file for parking planner' ) object_collision_estimator_param = DeclareLaunchArgument( 'object_collision_estimator_param_file', default_value=object_collision_estimator_param_file, description='Path to parameter file for object collision estimator' ) behavior_planner_param = DeclareLaunchArgument( 'behavior_planner_param_file', default_value=behavior_planner_param_file, description='Path to parameter file for behavior planner' ) off_map_obstacles_filter_param = DeclareLaunchArgument( 'off_map_obstacles_filter_param_file', default_value=off_map_obstacles_filter_param_file, description='Path to parameter file for off-map obstacle filter' ) euclidean_clustering = Node( package='euclidean_cluster_nodes', executable='euclidean_cluster_node_exe', namespace='perception', condition=IfCondition(LaunchConfiguration('with_obstacles')), parameters=[LaunchConfiguration('euclidean_cluster_param_file')], remappings=[ ("points_in", "points_nonground") ] ) point_cloud_fusion_node = IncludeLaunchDescription( PythonLaunchDescriptionSource( os.path.join(point_cloud_fusion_node_pkg_prefix, 'launch/vlp16_sim_lexus_pc_fusion.launch.py')) ) ray_ground_classifier = Node( package='ray_ground_classifier_nodes', executable='ray_ground_classifier_cloud_node_exe', namespace='perception', condition=IfCondition(LaunchConfiguration('with_obstacles')), parameters=[LaunchConfiguration('ray_ground_classifier_param_file')], remappings=[("points_in", "/lidars/points_fused")] ) scan_downsampler = Node( package='voxel_grid_nodes', executable='voxel_grid_node_exe', namespace='lidars', name='voxel_grid_cloud_node', parameters=[LaunchConfiguration('scan_downsampler_param_file')], remappings=[ ("points_in", "points_fused"), ("points_downsampled", "points_fused_downsampled") ] ) lanelet2_map_provider = Node( package='lanelet2_map_provider', executable='lanelet2_map_provider_exe', namespace='had_maps', name='lanelet2_map_provider_node', parameters=[LaunchConfiguration('lanelet2_map_provider_param_file')] ) lanelet2_map_visualizer = Node( package='lanelet2_map_provider', executable='lanelet2_map_visualizer_exe', name='lanelet2_map_visualizer_node', namespace='had_maps' ) global_planner = Node( package='lanelet2_global_planner_nodes', name='lanelet2_global_planner_node', namespace='planning', executable='lanelet2_global_planner_node_exe', remappings=[('HAD_Map_Client', '/had_maps/HAD_Map_Service'), ('vehicle_kinematic_state', '/vehicle/vehicle_kinematic_state')] ) lane_planner = Node( package='lane_planner_nodes', name='lane_planner_node', namespace='planning', executable='lane_planner_node_exe', parameters=[LaunchConfiguration('lane_planner_param_file')], remappings=[('HAD_Map_Service', '/had_maps/HAD_Map_Service')] ) parking_planner = Node( package='parking_planner_nodes', name='parking_planner_node', namespace='planning', executable='parking_planner_node_exe', parameters=[LaunchConfiguration('parking_planner_param_file')], remappings=[('HAD_Map_Service', '/had_maps/HAD_Map_Service')] ) object_collision_estimator = Node( package='object_collision_estimator_nodes', name='object_collision_estimator_node', namespace='planning', executable='object_collision_estimator_node_exe', condition=IfCondition(LaunchConfiguration('with_obstacles')), parameters=[LaunchConfiguration('object_collision_estimator_param_file')], remappings=[ ('obstacle_topic', '/perception/lidar_bounding_boxes_filtered'), ] ) behavior_planner = Node( package='behavior_planner_nodes', name='behavior_planner_node', namespace='planning', executable='behavior_planner_node_exe', parameters=[ LaunchConfiguration('behavior_planner_param_file'), {'enable_object_collision_estimator': LaunchConfiguration('with_obstacles')} ], output='screen', remappings=[ ('HAD_Map_Service', '/had_maps/HAD_Map_Service'), ('vehicle_state', '/vehicle/vehicle_kinematic_state'), ('route', 'global_path'), ('vehicle_state_report', '/vehicle/state_report'), ('vehicle_state_command', '/vehicle/state_command') ] ) off_map_obstacles_filter = Node( package='off_map_obstacles_filter_nodes', name='off_map_obstacles_filter_node', namespace='perception', executable='off_map_obstacles_filter_nodes_exe', parameters=[LaunchConfiguration('off_map_obstacles_filter_param_file')], output='screen', remappings=[ ('bounding_boxes_in', 'lidar_bounding_boxes'), ('bounding_boxes_out', 'lidar_bounding_boxes_filtered'), ('HAD_Map_Service', '/had_maps/HAD_Map_Service'), ] ) return LaunchDescription([ euclidean_cluster_param, ray_ground_classifier_param, scan_downsampler_param, with_obstacles_param, lanelet2_map_provider_param, lane_planner_param, parking_planner_param, object_collision_estimator_param, behavior_planner_param, off_map_obstacles_filter_param, euclidean_clustering, ray_ground_classifier, scan_downsampler, point_cloud_fusion_node, lanelet2_map_provider, lanelet2_map_visualizer, global_planner, lane_planner, parking_planner, object_collision_estimator, behavior_planner, off_map_obstacles_filter, ])
true
true
f70866c7894b122e47ce5ce5260f234fc330a584
3,141
py
Python
tools/telemetry/telemetry/page/gtest_test_results.py
iplo/Chain
8bc8943d66285d5258fffc41bed7c840516c4422
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
231
2015-01-08T09:04:44.000Z
2021-12-30T03:03:10.000Z
tools/telemetry/telemetry/page/gtest_test_results.py
xiaoyuyanran/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
1
2017-02-14T21:55:58.000Z
2017-02-14T21:55:58.000Z
tools/telemetry/telemetry/page/gtest_test_results.py
xiaoyuyanran/chromium
c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
268
2015-01-21T05:53:28.000Z
2022-03-25T22:09:01.000Z
# Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import sys import time import unittest from telemetry.page import page_test_results class GTestTestResults(page_test_results.PageTestResults): def __init__(self, output_stream): super(GTestTestResults, self).__init__(output_stream) self._timestamp = None def _GetMs(self): return (time.time() - self._timestamp) * 1000 @property def num_errors(self): return len(self.errors) + len(self.failures) @staticmethod def _formatTestname(test): if isinstance(test, unittest.TestCase): chunks = test.id().split('.')[-2:] return '.'.join(chunks) else: return str(test) def _emitFailure(self, test, err): print >> self._output_stream, self._exc_info_to_string(err, test) test_name = GTestTestResults._formatTestname(test) print >> self._output_stream, '[ FAILED ]', test_name, ( '(%0.f ms)' % self._GetMs()) sys.stdout.flush() def addError(self, test, err): super(GTestTestResults, self).addError(test, err) self._emitFailure(test, err) def addFailure(self, test, err): super(GTestTestResults, self).addFailure(test, err) self._emitFailure(test, err) def startTest(self, test): super(GTestTestResults, self).startTest(test) print >> self._output_stream, '[ RUN ]', ( GTestTestResults._formatTestname(test)) sys.stdout.flush() self._timestamp = time.time() def addSuccess(self, test): super(GTestTestResults, self).addSuccess(test) test_name = GTestTestResults._formatTestname(test) print >> self._output_stream, '[ OK ]', test_name, ( '(%0.f ms)' % self._GetMs()) sys.stdout.flush() def addSkip(self, test, reason): super(GTestTestResults, self).addSkip(test, reason) test_name = GTestTestResults._formatTestname(test) logging.warning('===== SKIPPING TEST %s: %s =====', test_name, reason) if self._timestamp == None: self._timestamp = time.time() print >> self._output_stream, '[ OK ]', test_name, ( '(%0.f ms)' % self._GetMs()) sys.stdout.flush() def PrintSummary(self): unit = 'test' if len(self.successes) == 1 else 'tests' print >> self._output_stream, '[ PASSED ]', ( '%d %s.' % (len(self.successes), unit)) if self.errors or self.failures: all_errors = self.errors[:] all_errors.extend(self.failures) unit = 'test' if len(all_errors) == 1 else 'tests' print >> self._output_stream, '[ FAILED ]', ( '%d %s, listed below:' % (len(all_errors), unit)) for test, _ in all_errors: print >> self._output_stream, '[ FAILED ] ', ( GTestTestResults._formatTestname(test)) if not self.wasSuccessful(): print >> self._output_stream count = len(self.errors) + len(self.failures) unit = 'TEST' if count == 1 else 'TESTS' print >> self._output_stream, '%d FAILED %s' % (count, unit) print >> self._output_stream sys.stdout.flush()
34.516484
74
0.65425
import logging import sys import time import unittest from telemetry.page import page_test_results class GTestTestResults(page_test_results.PageTestResults): def __init__(self, output_stream): super(GTestTestResults, self).__init__(output_stream) self._timestamp = None def _GetMs(self): return (time.time() - self._timestamp) * 1000 @property def num_errors(self): return len(self.errors) + len(self.failures) @staticmethod def _formatTestname(test): if isinstance(test, unittest.TestCase): chunks = test.id().split('.')[-2:] return '.'.join(chunks) else: return str(test) def _emitFailure(self, test, err): print >> self._output_stream, self._exc_info_to_string(err, test) test_name = GTestTestResults._formatTestname(test) print >> self._output_stream, '[ FAILED ]', test_name, ( '(%0.f ms)' % self._GetMs()) sys.stdout.flush() def addError(self, test, err): super(GTestTestResults, self).addError(test, err) self._emitFailure(test, err) def addFailure(self, test, err): super(GTestTestResults, self).addFailure(test, err) self._emitFailure(test, err) def startTest(self, test): super(GTestTestResults, self).startTest(test) print >> self._output_stream, '[ RUN ]', ( GTestTestResults._formatTestname(test)) sys.stdout.flush() self._timestamp = time.time() def addSuccess(self, test): super(GTestTestResults, self).addSuccess(test) test_name = GTestTestResults._formatTestname(test) print >> self._output_stream, '[ OK ]', test_name, ( '(%0.f ms)' % self._GetMs()) sys.stdout.flush() def addSkip(self, test, reason): super(GTestTestResults, self).addSkip(test, reason) test_name = GTestTestResults._formatTestname(test) logging.warning('===== SKIPPING TEST %s: %s =====', test_name, reason) if self._timestamp == None: self._timestamp = time.time() print >> self._output_stream, '[ OK ]', test_name, ( '(%0.f ms)' % self._GetMs()) sys.stdout.flush() def PrintSummary(self): unit = 'test' if len(self.successes) == 1 else 'tests' print >> self._output_stream, '[ PASSED ]', ( '%d %s.' % (len(self.successes), unit)) if self.errors or self.failures: all_errors = self.errors[:] all_errors.extend(self.failures) unit = 'test' if len(all_errors) == 1 else 'tests' print >> self._output_stream, '[ FAILED ]', ( '%d %s, listed below:' % (len(all_errors), unit)) for test, _ in all_errors: print >> self._output_stream, '[ FAILED ] ', ( GTestTestResults._formatTestname(test)) if not self.wasSuccessful(): print >> self._output_stream count = len(self.errors) + len(self.failures) unit = 'TEST' if count == 1 else 'TESTS' print >> self._output_stream, '%d FAILED %s' % (count, unit) print >> self._output_stream sys.stdout.flush()
true
true
f70866f021973b2bbbd78644a63222e38c0f66c0
2,804
py
Python
cleaning/cleaner.py
antonpaquin/IsItAnime
72c2c15ae9bf3907a25b40378831ca4efd85cc5b
[ "MIT" ]
11
2018-06-21T08:15:31.000Z
2021-08-11T06:03:40.000Z
cleaning/cleaner.py
antonpaquin/IsItAnime
72c2c15ae9bf3907a25b40378831ca4efd85cc5b
[ "MIT" ]
null
null
null
cleaning/cleaner.py
antonpaquin/IsItAnime
72c2c15ae9bf3907a25b40378831ca4efd85cc5b
[ "MIT" ]
1
2020-01-24T20:21:48.000Z
2020-01-24T20:21:48.000Z
#! /usr/bin/python from flask import Flask, request, jsonify import boto3 import os from queue import Queue from threading import Thread import time s3 = boto3.client('s3') s3_raw = boto3.resource('s3').Bucket('isitanime-data-raw') s3_dest = boto3.resource('s3').Bucket('isitanime-data-clean') app = Flask(__name__) @app.route('/') def main(): with open('main.html', 'r') as in_f: html = in_f.read() return html @app.route('/keys') def keys(): prefix = request.args.get('prefix', 'safebooru') keys = get_keys(prefix, 100) return jsonify(keys) classify_queue = Queue() @app.route('/classify') def classify(): key = request.args.get('key') clss = request.args.get('class') assert clss in {'anime', 'notanime', 'delete'} classify_queue.put((key, clss)) return '', 200 def classify_thread(): while True: try: key, clss = classify_queue.get() classify_back(key, clss) except Exception: pass def classify_back(name, clss): copy_source = { 'Bucket': 'isitanime-data-raw', 'Key': name, } if clss != 'delete': s3_dest.copy(copy_source, clss + '-' + name) s3_raw.delete_objects( Delete={ 'Objects': [{ 'Key': name, }], 'Quiet': True, } ) print('S3 cleaned ' + name + ' == ' + clss) s3_key_cache = {} s3_marker_next = {} def get_keys(prefix, count): if prefix not in s3_key_cache: s3_key_cache[prefix] = [] if prefix not in s3_marker_next: if s3_key_cache[prefix]: s3_marker_next[prefix] = s3_key_cache[prefix][-1] else: s3_marker_next[prefix] = None key_cache = s3_key_cache[prefix] marker_next = s3_marker_next[prefix] while count > len(key_cache): if marker_next: resp = s3.list_objects( Bucket='isitanime-data-raw', Prefix=prefix, Marker=marker_next, ) else: resp = s3.list_objects( Bucket='isitanime-data-raw', Prefix=prefix, ) if 'Contents' not in resp: count = len(key_cache) print(resp) break key_cache.extend([obj['Key'] for obj in resp['Contents']]) s3_marker_next[prefix] = key_cache[-1] if not resp['IsTruncated']: count = len(key_cache) break print(key_cache) s3_key_cache[prefix] = key_cache[count:] return key_cache[:count] if __name__ == '__main__': boto_threadpool = [] for _ in range(5): t = Thread(target=classify_thread) boto_threadpool.append(t) t.start() app.run('127.0.0.1', port=8080)
23.762712
66
0.566334
from flask import Flask, request, jsonify import boto3 import os from queue import Queue from threading import Thread import time s3 = boto3.client('s3') s3_raw = boto3.resource('s3').Bucket('isitanime-data-raw') s3_dest = boto3.resource('s3').Bucket('isitanime-data-clean') app = Flask(__name__) @app.route('/') def main(): with open('main.html', 'r') as in_f: html = in_f.read() return html @app.route('/keys') def keys(): prefix = request.args.get('prefix', 'safebooru') keys = get_keys(prefix, 100) return jsonify(keys) classify_queue = Queue() @app.route('/classify') def classify(): key = request.args.get('key') clss = request.args.get('class') assert clss in {'anime', 'notanime', 'delete'} classify_queue.put((key, clss)) return '', 200 def classify_thread(): while True: try: key, clss = classify_queue.get() classify_back(key, clss) except Exception: pass def classify_back(name, clss): copy_source = { 'Bucket': 'isitanime-data-raw', 'Key': name, } if clss != 'delete': s3_dest.copy(copy_source, clss + '-' + name) s3_raw.delete_objects( Delete={ 'Objects': [{ 'Key': name, }], 'Quiet': True, } ) print('S3 cleaned ' + name + ' == ' + clss) s3_key_cache = {} s3_marker_next = {} def get_keys(prefix, count): if prefix not in s3_key_cache: s3_key_cache[prefix] = [] if prefix not in s3_marker_next: if s3_key_cache[prefix]: s3_marker_next[prefix] = s3_key_cache[prefix][-1] else: s3_marker_next[prefix] = None key_cache = s3_key_cache[prefix] marker_next = s3_marker_next[prefix] while count > len(key_cache): if marker_next: resp = s3.list_objects( Bucket='isitanime-data-raw', Prefix=prefix, Marker=marker_next, ) else: resp = s3.list_objects( Bucket='isitanime-data-raw', Prefix=prefix, ) if 'Contents' not in resp: count = len(key_cache) print(resp) break key_cache.extend([obj['Key'] for obj in resp['Contents']]) s3_marker_next[prefix] = key_cache[-1] if not resp['IsTruncated']: count = len(key_cache) break print(key_cache) s3_key_cache[prefix] = key_cache[count:] return key_cache[:count] if __name__ == '__main__': boto_threadpool = [] for _ in range(5): t = Thread(target=classify_thread) boto_threadpool.append(t) t.start() app.run('127.0.0.1', port=8080)
true
true
f70867e30ba8f552834af7092af1c7537adc3095
1,198
py
Python
tbx/people/blocks.py
elviva404/wagtail-torchbox
718d9e2c4337073f010296932d369c726a01dbd3
[ "MIT" ]
103
2015-02-24T17:58:21.000Z
2022-03-23T08:08:58.000Z
tbx/people/blocks.py
elviva404/wagtail-torchbox
718d9e2c4337073f010296932d369c726a01dbd3
[ "MIT" ]
145
2015-01-13T17:13:43.000Z
2022-03-29T12:56:20.000Z
tbx/people/blocks.py
elviva404/wagtail-torchbox
718d9e2c4337073f010296932d369c726a01dbd3
[ "MIT" ]
57
2015-01-03T12:00:37.000Z
2022-02-09T13:11:30.000Z
from wagtail.core import blocks from wagtail.images.blocks import ImageChooserBlock class StandoutItemsBlock(blocks.StructBlock): class LinkBlock(blocks.StreamBlock): internal = blocks.PageChooserBlock() external = blocks.URLBlock() class Meta: required = False max_num = 1 subtitle = blocks.CharBlock() title = blocks.CharBlock() description = blocks.TextBlock() image = ImageChooserBlock() link = LinkBlock() class Meta: icon = "pick" @staticmethod def get_link(value): """The link could be internal or external.""" try: link = value[0] except IndexError: return "" else: return ( link.value.url if link.block_type == "internal" and link.value else link.value ) class InstagramEmbedBlock(blocks.StructBlock): image = ImageChooserBlock() link = blocks.URLBlock( required=False, help_text="Link to a specific post here or leave blank for it to link to https://www.instagram.com/torchboxltd/", ) class Meta: icon = "fa-instagram"
25.489362
121
0.600167
from wagtail.core import blocks from wagtail.images.blocks import ImageChooserBlock class StandoutItemsBlock(blocks.StructBlock): class LinkBlock(blocks.StreamBlock): internal = blocks.PageChooserBlock() external = blocks.URLBlock() class Meta: required = False max_num = 1 subtitle = blocks.CharBlock() title = blocks.CharBlock() description = blocks.TextBlock() image = ImageChooserBlock() link = LinkBlock() class Meta: icon = "pick" @staticmethod def get_link(value): try: link = value[0] except IndexError: return "" else: return ( link.value.url if link.block_type == "internal" and link.value else link.value ) class InstagramEmbedBlock(blocks.StructBlock): image = ImageChooserBlock() link = blocks.URLBlock( required=False, help_text="Link to a specific post here or leave blank for it to link to https://www.instagram.com/torchboxltd/", ) class Meta: icon = "fa-instagram"
true
true
f708682576c4e7833b45e1bc645efeeecf96c324
1,289
py
Python
tests/test_nodes.py
commarla/python-nomad
37df37e4de21e6f8ac41c6154e7f1f44f1800020
[ "MIT" ]
109
2016-06-06T09:18:02.000Z
2022-03-17T17:41:20.000Z
tests/test_nodes.py
commarla/python-nomad
37df37e4de21e6f8ac41c6154e7f1f44f1800020
[ "MIT" ]
104
2016-06-04T23:06:06.000Z
2021-12-08T04:49:43.000Z
tests/test_nodes.py
commarla/python-nomad
37df37e4de21e6f8ac41c6154e7f1f44f1800020
[ "MIT" ]
80
2016-06-05T00:33:23.000Z
2021-11-20T15:17:38.000Z
import pytest # integration tests requires nomad Vagrant VM or Binary running def test_get_nodes(nomad_setup): assert isinstance(nomad_setup.nodes.get_nodes(), list) == True def test_get_nodes_prefix(nomad_setup): nodes = nomad_setup.nodes.get_nodes() prefix = nodes[0]["ID"][:4] nomad_setup.nodes.get_nodes(prefix=prefix) def test_dunder_getitem_exist(nomad_setup): n = nomad_setup.nodes["pynomad1"] assert isinstance(n, dict) def test_dunder_getitem_not_exist(nomad_setup): with pytest.raises(KeyError): j = nomad_setup.nodes["pynomad2"] def test_dunder_contain_exists(nomad_setup): assert "pynomad1" in nomad_setup.nodes def test_dunder_contain_not_exist(nomad_setup): assert "real.localdomain" not in nomad_setup.nodes def test_dunder_str(nomad_setup): assert isinstance(str(nomad_setup.nodes), str) def test_dunder_repr(nomad_setup): assert isinstance(repr(nomad_setup.nodes), str) def test_dunder_getattr(nomad_setup): with pytest.raises(AttributeError): d = nomad_setup.nodes.does_not_exist def test_dunder_iter(nomad_setup): assert hasattr(nomad_setup.nodes, '__iter__') for j in nomad_setup.nodes: pass def test_dunder_len(nomad_setup): assert len(nomad_setup.nodes) >= 0
23.017857
66
0.753297
import pytest def test_get_nodes(nomad_setup): assert isinstance(nomad_setup.nodes.get_nodes(), list) == True def test_get_nodes_prefix(nomad_setup): nodes = nomad_setup.nodes.get_nodes() prefix = nodes[0]["ID"][:4] nomad_setup.nodes.get_nodes(prefix=prefix) def test_dunder_getitem_exist(nomad_setup): n = nomad_setup.nodes["pynomad1"] assert isinstance(n, dict) def test_dunder_getitem_not_exist(nomad_setup): with pytest.raises(KeyError): j = nomad_setup.nodes["pynomad2"] def test_dunder_contain_exists(nomad_setup): assert "pynomad1" in nomad_setup.nodes def test_dunder_contain_not_exist(nomad_setup): assert "real.localdomain" not in nomad_setup.nodes def test_dunder_str(nomad_setup): assert isinstance(str(nomad_setup.nodes), str) def test_dunder_repr(nomad_setup): assert isinstance(repr(nomad_setup.nodes), str) def test_dunder_getattr(nomad_setup): with pytest.raises(AttributeError): d = nomad_setup.nodes.does_not_exist def test_dunder_iter(nomad_setup): assert hasattr(nomad_setup.nodes, '__iter__') for j in nomad_setup.nodes: pass def test_dunder_len(nomad_setup): assert len(nomad_setup.nodes) >= 0
true
true
f7086a22a8b6cbc948ed244b0ec6f73cdc2f4cd2
2,987
py
Python
newsroom/evaluate/run.py
peter-xbs/newsroom_chinese
7fcae68b2ea5584d08d0c48faee34a0734237e6b
[ "Apache-2.0" ]
null
null
null
newsroom/evaluate/run.py
peter-xbs/newsroom_chinese
7fcae68b2ea5584d08d0c48faee34a0734237e6b
[ "Apache-2.0" ]
null
null
null
newsroom/evaluate/run.py
peter-xbs/newsroom_chinese
7fcae68b2ea5584d08d0c48faee34a0734237e6b
[ "Apache-2.0" ]
null
null
null
################################################################################ from subprocess import Popen, PIPE, STDOUT from threading import Thread import bz2, json, click from newsroom import jsonl from . import readiter from tqdm import tqdm ################################################################################ def _writer(process, dataset_file, keys): for article in dataset_file: subset = {k: article[k] for k in keys if k in article} encoded = json.dumps(subset).encode("utf-8") process.stdin.write(encoded + b"\n") process.stdin.close() ################################################################################ articles_file = click.Path( exists = True, dir_okay = False, readable = True, resolve_path = True, ) summaries_file = click.Path( exists = False, dir_okay = False, writable = True, resolve_path = True, ) ################################################################################ @click.command() @click.option( "--system", type = str, required = True, help = "Name of docker image." ) @click.option( "--dataset", type = articles_file, required = True, help = "Input path to full dataset." ) @click.option( "--summaries", type = summaries_file, required = True, help = "Output path for system generated summaries." ) @click.option( "--keys", type = str, default = "text", help = "List of dataset keys to pass to system. [default = text]" ) ################################################################################ def main(system, dataset, summaries, keys): print("Starting", system, "Docker image.") process = Popen( [ "docker", "run", "--rm", "-a", "stdin", "-a", "stdout", "-i", system ], stdin = PIPE, stdout = PIPE, ) dataset_file = jsonl.open(dataset, gzip = True) # Check the size of the dataset. # As a sanity check and for the progress bar. print("Loading articles... ", end = "", flush = True) dataset_length = len(dataset_file) print("found", dataset_length, "articles.\n") # Start new thread to feed summaries into container. Thread( target = _writer, args = (process, dataset_file, keys.split(",")) ).start() # Start progress bar. progress = tqdm( readiter(process.stdout), total = dataset_length, desc = "Running " + system, ) # Prepare to decode summaries. is_json = True with jsonl.open(summaries, gzip = True) as summaries_file: summaries_file.delete() with progress as output: for line in output: summaries_file.appendline({"system": line}) print("\nRun complete. Next, evaluate with newsroom-score.") ################################################################################
22.628788
80
0.494811
from subprocess import Popen, PIPE, STDOUT from threading import Thread import bz2, json, click from newsroom import jsonl from . import readiter from tqdm import tqdm def _writer(process, dataset_file, keys): for article in dataset_file: subset = {k: article[k] for k in keys if k in article} encoded = json.dumps(subset).encode("utf-8") process.stdin.write(encoded + b"\n") process.stdin.close() articles_file = click.Path( exists = True, dir_okay = False, readable = True, resolve_path = True, ) summaries_file = click.Path( exists = False, dir_okay = False, writable = True, resolve_path = True, ) @click.command() @click.option( "--system", type = str, required = True, help = "Name of docker image." ) @click.option( "--dataset", type = articles_file, required = True, help = "Input path to full dataset." ) @click.option( "--summaries", type = summaries_file, required = True, help = "Output path for system generated summaries." ) @click.option( "--keys", type = str, default = "text", help = "List of dataset keys to pass to system. [default = text]" ) def main(system, dataset, summaries, keys): print("Starting", system, "Docker image.") process = Popen( [ "docker", "run", "--rm", "-a", "stdin", "-a", "stdout", "-i", system ], stdin = PIPE, stdout = PIPE, ) dataset_file = jsonl.open(dataset, gzip = True) print("Loading articles... ", end = "", flush = True) dataset_length = len(dataset_file) print("found", dataset_length, "articles.\n") Thread( target = _writer, args = (process, dataset_file, keys.split(",")) ).start() progress = tqdm( readiter(process.stdout), total = dataset_length, desc = "Running " + system, ) is_json = True with jsonl.open(summaries, gzip = True) as summaries_file: summaries_file.delete() with progress as output: for line in output: summaries_file.appendline({"system": line}) print("\nRun complete. Next, evaluate with newsroom-score.")
true
true
f7086a4801ba30d289dac1b201115bb48bcbdc1f
3,196
py
Python
app/models.py
OmegaM/story
421c457bc121d6ca418468199108da7bd9f96435
[ "BSD-3-Clause" ]
null
null
null
app/models.py
OmegaM/story
421c457bc121d6ca418468199108da7bd9f96435
[ "BSD-3-Clause" ]
null
null
null
app/models.py
OmegaM/story
421c457bc121d6ca418468199108da7bd9f96435
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python # -*- coding:utf-8 -*- """Created with Pycharm IDEA @Create on 2015/9/12 16:31 @my_story models.py @author : OmegaMiao""" from app import db, loginManager from datetime import datetime from werkzeug.security import generate_password_hash, check_password_hash from flask.ext.login import UserMixin class Story(db.Model): __tablename__ = 'story' id = db.Column(db.Integer, primary_key=True, autoincrement=True) title = db.Column(db.String(30), nullable=False) content = db.Column(db.String(500), nullable=False) create_time = db.Column(db.DateTime, default=datetime.now().strftime('%Y-%m-%d %H:%M:%S')) category_id = db.Column(db.Integer, db.ForeignKey('category.id')) author_id = db.Column(db.Integer, db.ForeignKey('author.id')) def __init__(self, title, content): self.title = title self.content = content def __repr__(self): return "<Story %r title %r>" % (self.id, self.title) def to_json(self): return { "id": self.id, "title": self.title, "content": self.content, "create_time": self.create_time.strftime('%Y-%m-%d %H:%M:%S') } class Category(db.Model): __tablename__ = 'category' id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(20), nullable=False, unique=True) storys = db.relationship('Story', backref='category', lazy='joined') def __init__(self, name): self.name = name def __repr__(self): return "<Category %r name %r>" % (self.id, self.name) class Author(db.Model): __tablename__ = 'author' id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(20), nullable=False) nick_name = db.Column(db.String(20), nullable=False, unique=True) storys = db.relationship('Story', backref='author', lazy='joined') def __init__(self, name, nick_name): self.name = name self.nick_name = nick_name def __repr__(self): return "<Author id: %r Name: %r nickName:%r>" % (self.id, self.name, self.nick_name) class User(UserMixin, db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) email = db.Column(db.String(64), unique=True, index=True) username = db.Column(db.String(64), unique=True, index=True) password_hash = db.Column(db.String(128)) role_id = db.Column(db.Integer, db.ForeignKey('roles.id')) @property def password(self): raise AttributeError('password is not a readable attribute') @password.setter def password(self, password): self.password_hash = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.password_hash, password) def __repr__(self): return '<User %r>' % self.username class Role(db.Model): __tablename__ = 'roles' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64), unique=True) def __repr__(self): return '<Role %r>' % self.name @loginManager.user_loader def load_user(user_id): return User.query.get(int(user_id))
29.592593
94
0.661139
from app import db, loginManager from datetime import datetime from werkzeug.security import generate_password_hash, check_password_hash from flask.ext.login import UserMixin class Story(db.Model): __tablename__ = 'story' id = db.Column(db.Integer, primary_key=True, autoincrement=True) title = db.Column(db.String(30), nullable=False) content = db.Column(db.String(500), nullable=False) create_time = db.Column(db.DateTime, default=datetime.now().strftime('%Y-%m-%d %H:%M:%S')) category_id = db.Column(db.Integer, db.ForeignKey('category.id')) author_id = db.Column(db.Integer, db.ForeignKey('author.id')) def __init__(self, title, content): self.title = title self.content = content def __repr__(self): return "<Story %r title %r>" % (self.id, self.title) def to_json(self): return { "id": self.id, "title": self.title, "content": self.content, "create_time": self.create_time.strftime('%Y-%m-%d %H:%M:%S') } class Category(db.Model): __tablename__ = 'category' id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(20), nullable=False, unique=True) storys = db.relationship('Story', backref='category', lazy='joined') def __init__(self, name): self.name = name def __repr__(self): return "<Category %r name %r>" % (self.id, self.name) class Author(db.Model): __tablename__ = 'author' id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(20), nullable=False) nick_name = db.Column(db.String(20), nullable=False, unique=True) storys = db.relationship('Story', backref='author', lazy='joined') def __init__(self, name, nick_name): self.name = name self.nick_name = nick_name def __repr__(self): return "<Author id: %r Name: %r nickName:%r>" % (self.id, self.name, self.nick_name) class User(UserMixin, db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) email = db.Column(db.String(64), unique=True, index=True) username = db.Column(db.String(64), unique=True, index=True) password_hash = db.Column(db.String(128)) role_id = db.Column(db.Integer, db.ForeignKey('roles.id')) @property def password(self): raise AttributeError('password is not a readable attribute') @password.setter def password(self, password): self.password_hash = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.password_hash, password) def __repr__(self): return '<User %r>' % self.username class Role(db.Model): __tablename__ = 'roles' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64), unique=True) def __repr__(self): return '<Role %r>' % self.name @loginManager.user_loader def load_user(user_id): return User.query.get(int(user_id))
true
true
f7086a73213296163f142ee1d848a32e72f49d62
7,182
py
Python
ckan/lib/app_globals.py
florianm/ckan
1cfd98d591ac70b4eb81048bcd227b6c1354b1bf
[ "Apache-2.0" ]
2
2015-07-17T19:09:52.000Z
2017-08-30T20:23:44.000Z
ckan/lib/app_globals.py
florianm/ckan
1cfd98d591ac70b4eb81048bcd227b6c1354b1bf
[ "Apache-2.0" ]
12
2015-01-19T18:03:56.000Z
2016-04-11T16:40:33.000Z
ckan/lib/app_globals.py
florianm/ckan
1cfd98d591ac70b4eb81048bcd227b6c1354b1bf
[ "Apache-2.0" ]
3
2015-03-31T06:19:42.000Z
2016-06-27T15:32:28.000Z
''' The application's Globals object ''' import logging import time from threading import Lock import re from paste.deploy.converters import asbool from pylons import config import ckan import ckan.model as model import ckan.logic as logic log = logging.getLogger(__name__) # mappings translate between config settings and globals because our naming # conventions are not well defined and/or implemented mappings = { # 'config_key': 'globals_key', } # This mapping is only used to define the configuration options (from the # `config` object) that should be copied to the `app_globals` (`g`) object. app_globals_from_config_details = { 'ckan.site_title': {}, 'ckan.site_logo': {}, 'ckan.site_url': {}, 'ckan.site_description': {}, 'ckan.site_about': {}, 'ckan.site_intro_text': {}, 'ckan.site_custom_css': {}, 'ckan.favicon': {}, # default gets set in config.environment.py 'ckan.template_head_end': {}, 'ckan.template_footer_end': {}, # has been setup in load_environment(): 'ckan.site_id': {}, 'ckan.recaptcha.publickey': {'name': 'recaptcha_publickey'}, 'ckan.recaptcha.version': {'name': 'recaptcha_version', 'default': '1'}, 'ckan.template_title_deliminater': {'default': '-'}, 'ckan.template_head_end': {}, 'ckan.template_footer_end': {}, 'ckan.dumps_url': {}, 'ckan.dumps_format': {}, 'ofs.impl': {'name': 'ofs_impl'}, 'ckan.homepage_style': {'default': '1'}, # split string 'search.facets': {'default': 'organization groups tags res_format license_id', 'type': 'split', 'name': 'facets'}, 'package_hide_extras': {'type': 'split'}, 'ckan.plugins': {'type': 'split'}, # bool 'debug': {'default': 'false', 'type' : 'bool'}, 'ckan.debug_supress_header' : {'default': 'false', 'type' : 'bool'}, 'ckan.legacy_templates' : {'default': 'false', 'type' : 'bool'}, 'ckan.tracking_enabled' : {'default': 'false', 'type' : 'bool'}, # int 'ckan.datasets_per_page': {'default': '20', 'type': 'int'}, 'ckan.activity_list_limit': {'default': '30', 'type': 'int'}, 'search.facets.default': {'default': '10', 'type': 'int', 'name': 'facets_default_number'}, } # A place to store the origional config options of we override them _CONFIG_CACHE = {} def set_main_css(css_file): ''' Sets the main_css. The css_file must be of the form file.css ''' assert css_file.endswith('.css') new_css = css_file # FIXME we should check the css file exists app_globals.main_css = str(new_css) def set_app_global(key, value): ''' Set a new key on the app_globals (g) object It will process the value according to the options on app_globals_from_config_details (if any) ''' key, value = process_app_global(key, value) setattr(app_globals, key, value) def process_app_global(key, value): ''' Tweak a key, value pair meant to be set on the app_globals (g) object According to the options on app_globals_from_config_details (if any) ''' options = app_globals_from_config_details.get(key) key = get_globals_key(key) if options: if 'name' in options: key = options['name'] value = value or options.get('default', '') data_type = options.get('type') if data_type == 'bool': value = asbool(value) elif data_type == 'int': value = int(value) elif data_type == 'split': value = value.split() return key, value def get_globals_key(key): # create our globals key # these can be specified in mappings or else we remove # the `ckan.` part this is to keep the existing namings # set the value if key in mappings: return mappings[key] elif key.startswith('ckan.'): return key[5:] else: return key def reset(): ''' set updatable values from config ''' def get_config_value(key, default=''): if model.meta.engine.has_table('system_info'): value = model.get_system_info(key) else: value = None config_value = config.get(key) # sort encodeings if needed if isinstance(config_value, str): try: config_value = config_value.decode('utf-8') except UnicodeDecodeError: config_value = config_value.decode('latin-1') # we want to store the config the first time we get here so we can # reset them if needed if key not in _CONFIG_CACHE: _CONFIG_CACHE[key] = config_value if value is not None: log.debug('config `%s` set to `%s` from db' % (key, value)) else: value = _CONFIG_CACHE[key] if value: log.debug('config `%s` set to `%s` from config' % (key, value)) else: value = default set_app_global(key, value) # update the config config[key] = value return value # update the config settings in auto update schema = logic.schema.update_configuration_schema() for key in schema.keys(): get_config_value(key) # cusom styling main_css = get_config_value('ckan.main_css', '/base/css/main.css') set_main_css(main_css) # site_url_nice site_url_nice = app_globals.site_url.replace('http://', '') site_url_nice = site_url_nice.replace('www.', '') app_globals.site_url_nice = site_url_nice if app_globals.site_logo: app_globals.header_class = 'header-image' elif not app_globals.site_description: app_globals.header_class = 'header-text-logo' else: app_globals.header_class = 'header-text-logo-tagline' class _Globals(object): ''' Globals acts as a container for objects available throughout the life of the application. ''' def __init__(self): '''One instance of Globals is created during application initialization and is available during requests via the 'app_globals' variable ''' self._init() self._config_update = None self._mutex = Lock() def _check_uptodate(self): ''' check the config is uptodate needed when several instances are running ''' value = model.get_system_info('ckan.config_update') if self._config_update != value: if self._mutex.acquire(False): reset() self._config_update = value self._mutex.release() def _init(self): self.ckan_version = ckan.__version__ self.ckan_base_version = re.sub('[^0-9\.]', '', self.ckan_version) if self.ckan_base_version == self.ckan_version: self.ckan_doc_version = 'ckan-{0}'.format(self.ckan_version) else: self.ckan_doc_version = 'latest' # process the config details to set globals for key in app_globals_from_config_details.keys(): new_key, value = process_app_global(key, config.get(key) or '') setattr(self, new_key, value) app_globals = _Globals() del _Globals
31.778761
82
0.623225
import logging import time from threading import Lock import re from paste.deploy.converters import asbool from pylons import config import ckan import ckan.model as model import ckan.logic as logic log = logging.getLogger(__name__) mappings = { } app_globals_from_config_details = { 'ckan.site_title': {}, 'ckan.site_logo': {}, 'ckan.site_url': {}, 'ckan.site_description': {}, 'ckan.site_about': {}, 'ckan.site_intro_text': {}, 'ckan.site_custom_css': {}, 'ckan.favicon': {}, 'ckan.template_head_end': {}, 'ckan.template_footer_end': {}, 'ckan.site_id': {}, 'ckan.recaptcha.publickey': {'name': 'recaptcha_publickey'}, 'ckan.recaptcha.version': {'name': 'recaptcha_version', 'default': '1'}, 'ckan.template_title_deliminater': {'default': '-'}, 'ckan.template_head_end': {}, 'ckan.template_footer_end': {}, 'ckan.dumps_url': {}, 'ckan.dumps_format': {}, 'ofs.impl': {'name': 'ofs_impl'}, 'ckan.homepage_style': {'default': '1'}, 'search.facets': {'default': 'organization groups tags res_format license_id', 'type': 'split', 'name': 'facets'}, 'package_hide_extras': {'type': 'split'}, 'ckan.plugins': {'type': 'split'}, 'debug': {'default': 'false', 'type' : 'bool'}, 'ckan.debug_supress_header' : {'default': 'false', 'type' : 'bool'}, 'ckan.legacy_templates' : {'default': 'false', 'type' : 'bool'}, 'ckan.tracking_enabled' : {'default': 'false', 'type' : 'bool'}, 'ckan.datasets_per_page': {'default': '20', 'type': 'int'}, 'ckan.activity_list_limit': {'default': '30', 'type': 'int'}, 'search.facets.default': {'default': '10', 'type': 'int', 'name': 'facets_default_number'}, } _CONFIG_CACHE = {} def set_main_css(css_file): assert css_file.endswith('.css') new_css = css_file app_globals.main_css = str(new_css) def set_app_global(key, value): key, value = process_app_global(key, value) setattr(app_globals, key, value) def process_app_global(key, value): options = app_globals_from_config_details.get(key) key = get_globals_key(key) if options: if 'name' in options: key = options['name'] value = value or options.get('default', '') data_type = options.get('type') if data_type == 'bool': value = asbool(value) elif data_type == 'int': value = int(value) elif data_type == 'split': value = value.split() return key, value def get_globals_key(key): if key in mappings: return mappings[key] elif key.startswith('ckan.'): return key[5:] else: return key def reset(): def get_config_value(key, default=''): if model.meta.engine.has_table('system_info'): value = model.get_system_info(key) else: value = None config_value = config.get(key) if isinstance(config_value, str): try: config_value = config_value.decode('utf-8') except UnicodeDecodeError: config_value = config_value.decode('latin-1') if key not in _CONFIG_CACHE: _CONFIG_CACHE[key] = config_value if value is not None: log.debug('config `%s` set to `%s` from db' % (key, value)) else: value = _CONFIG_CACHE[key] if value: log.debug('config `%s` set to `%s` from config' % (key, value)) else: value = default set_app_global(key, value) config[key] = value return value schema = logic.schema.update_configuration_schema() for key in schema.keys(): get_config_value(key) main_css = get_config_value('ckan.main_css', '/base/css/main.css') set_main_css(main_css) site_url_nice = app_globals.site_url.replace('http://', '') site_url_nice = site_url_nice.replace('www.', '') app_globals.site_url_nice = site_url_nice if app_globals.site_logo: app_globals.header_class = 'header-image' elif not app_globals.site_description: app_globals.header_class = 'header-text-logo' else: app_globals.header_class = 'header-text-logo-tagline' class _Globals(object): def __init__(self): self._init() self._config_update = None self._mutex = Lock() def _check_uptodate(self): value = model.get_system_info('ckan.config_update') if self._config_update != value: if self._mutex.acquire(False): reset() self._config_update = value self._mutex.release() def _init(self): self.ckan_version = ckan.__version__ self.ckan_base_version = re.sub('[^0-9\.]', '', self.ckan_version) if self.ckan_base_version == self.ckan_version: self.ckan_doc_version = 'ckan-{0}'.format(self.ckan_version) else: self.ckan_doc_version = 'latest' for key in app_globals_from_config_details.keys(): new_key, value = process_app_global(key, config.get(key) or '') setattr(self, new_key, value) app_globals = _Globals() del _Globals
true
true
f7086ab559a469398539456d57d72ca300a38868
2,328
py
Python
bot_config.py
enemchy/rpi-transmission-telegram-bot
71ad3f9ed1c9c2111ac3cd416cd203ff661b67b0
[ "MIT" ]
null
null
null
bot_config.py
enemchy/rpi-transmission-telegram-bot
71ad3f9ed1c9c2111ac3cd416cd203ff661b67b0
[ "MIT" ]
null
null
null
bot_config.py
enemchy/rpi-transmission-telegram-bot
71ad3f9ed1c9c2111ac3cd416cd203ff661b67b0
[ "MIT" ]
null
null
null
import configparser class BotConfig: def __init__(self, path): parser = configparser.ConfigParser() # open the file implicitly because parser.read() will not fail if file is not readable file = open(path) parser.read_file(file) file.close() if 'Bot' not in parser.sections(): raise Exception('All parameters must reside in section ''Bot''') bot_section = parser['Bot'] self.address = bot_section.get('address', 'localhost') port_string = bot_section.get('port', '9091') try: self.port = int(port_string) except ValueError: raise ValueError('Port ''%s'' is invalid' % port_string) try: self.user = bot_section.get('user') except KeyError: self.user = None try: self.password = bot_section.get('password') except KeyError: self.password = None if self.password and not self.user: raise Exception('Password with no user name is meaningless') self.token = bot_section.get('token', '') if not self.token: raise Exception('Telegram token is required') self.secret = bot_section.get('secret', '') if not self.secret: raise Exception('Secret is required') try: self.persistence_file = bot_section.get('persistence_file') except KeyError: self.persistence_file = None def __str__(self): result = '{address:<%s> ' \ 'port:<%d> ' % (self.address, self.port) if not self.user: result += 'user:None ' else: result += 'user:<%s> ' % self.user if not self.password: result += 'password:None' else: result += 'password:present ' result += 'token:present ' result += 'secret:present ' result += 'persistence_file:<%s>}' % self.persistence_file return result def __repr__(self): return '{address:''%s'' port:%d user:''%s'' password:''%s'' ' \ 'token:''%s'' secret:''%s'' persistence_file:''%s''}' \ % (self.address, self.port, self.user, self.password, self.token, self.secret, self.persistence_file)
29.846154
94
0.554553
import configparser class BotConfig: def __init__(self, path): parser = configparser.ConfigParser() file = open(path) parser.read_file(file) file.close() if 'Bot' not in parser.sections(): raise Exception('All parameters must reside in section ''Bot''') bot_section = parser['Bot'] self.address = bot_section.get('address', 'localhost') port_string = bot_section.get('port', '9091') try: self.port = int(port_string) except ValueError: raise ValueError('Port ''%s'' is invalid' % port_string) try: self.user = bot_section.get('user') except KeyError: self.user = None try: self.password = bot_section.get('password') except KeyError: self.password = None if self.password and not self.user: raise Exception('Password with no user name is meaningless') self.token = bot_section.get('token', '') if not self.token: raise Exception('Telegram token is required') self.secret = bot_section.get('secret', '') if not self.secret: raise Exception('Secret is required') try: self.persistence_file = bot_section.get('persistence_file') except KeyError: self.persistence_file = None def __str__(self): result = '{address:<%s> ' \ 'port:<%d> ' % (self.address, self.port) if not self.user: result += 'user:None ' else: result += 'user:<%s> ' % self.user if not self.password: result += 'password:None' else: result += 'password:present ' result += 'token:present ' result += 'secret:present ' result += 'persistence_file:<%s>}' % self.persistence_file return result def __repr__(self): return '{address:''%s'' port:%d user:''%s'' password:''%s'' ' \ 'token:''%s'' secret:''%s'' persistence_file:''%s''}' \ % (self.address, self.port, self.user, self.password, self.token, self.secret, self.persistence_file)
true
true
f7086ba816e8596d2d810b8a0cd6ad6a963370bb
1,309
py
Python
setup.py
qingzma/DBEst_MDN
3a3e26bede308b70abfad07032dc16a07a170f34
[ "BSD-2-Clause" ]
1
2022-02-23T08:01:08.000Z
2022-02-23T08:01:08.000Z
setup.py
qingzma/DBEst_MDN
3a3e26bede308b70abfad07032dc16a07a170f34
[ "BSD-2-Clause" ]
null
null
null
setup.py
qingzma/DBEst_MDN
3a3e26bede308b70abfad07032dc16a07a170f34
[ "BSD-2-Clause" ]
2
2020-09-28T15:39:12.000Z
2021-05-11T11:07:54.000Z
# -*- coding: utf-8 -*- import os from setuptools import find_packages, setup with open('README.rst') as f: readme = f.read() # with open('LICENSE.txt') as f: # licenses = f.read() setup( name='dbestclient', version='2.0', description='Model-based Approximate Query Processing (AQP) engine.', classifiers=[ 'Development Status :: 2.0', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.7', 'Topic :: Approximate Query Processing :: AQP :: Data Warehouse', ], keywords='Approximate Query Processing AQP', url='https://github.com/qingzma/DBEstClient', author='Qingzhi Ma', author_email='Q.Ma.2@warwick.ac.uk', long_description=readme, # license=licenses, # packages=['dbestclient'], packages=find_packages(exclude=('experiments', 'tests', 'docs')), entry_points={ 'console_scripts': ['dbestclient=dbestclient.main:main', 'dbestslave=dbestclient.main:slave', 'dbestmaster=dbestclient.main:master'], }, zip_safe=False, install_requires=[ 'numpy', 'sqlparse', 'pandas', 'scikit-learn', 'qregpy', 'scipy', 'dill', 'matplotlib', 'torch', 'category_encoders', 'tox', 'sphinx', 'gensim', ], test_suite='nose.collector', tests_require=['nose'], )
30.44186
152
0.638655
import os from setuptools import find_packages, setup with open('README.rst') as f: readme = f.read() setup( name='dbestclient', version='2.0', description='Model-based Approximate Query Processing (AQP) engine.', classifiers=[ 'Development Status :: 2.0', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.7', 'Topic :: Approximate Query Processing :: AQP :: Data Warehouse', ], keywords='Approximate Query Processing AQP', url='https://github.com/qingzma/DBEstClient', author='Qingzhi Ma', author_email='Q.Ma.2@warwick.ac.uk', long_description=readme, packages=find_packages(exclude=('experiments', 'tests', 'docs')), entry_points={ 'console_scripts': ['dbestclient=dbestclient.main:main', 'dbestslave=dbestclient.main:slave', 'dbestmaster=dbestclient.main:master'], }, zip_safe=False, install_requires=[ 'numpy', 'sqlparse', 'pandas', 'scikit-learn', 'qregpy', 'scipy', 'dill', 'matplotlib', 'torch', 'category_encoders', 'tox', 'sphinx', 'gensim', ], test_suite='nose.collector', tests_require=['nose'], )
true
true
f7086c4726768d3708f3740cbbb11df2243f6488
2,081
py
Python
cyllene/m_user_cmd.py
28left/psumathnotebooks
ec948216304e5f234a2f4d0f6bdcfaa1a10c435d
[ "MIT" ]
1
2021-05-04T14:09:51.000Z
2021-05-04T14:09:51.000Z
cyllene/m_user_cmd.py
28left/psumathnotebooks
ec948216304e5f234a2f4d0f6bdcfaa1a10c435d
[ "MIT" ]
null
null
null
cyllene/m_user_cmd.py
28left/psumathnotebooks
ec948216304e5f234a2f4d0f6bdcfaa1a10c435d
[ "MIT" ]
null
null
null
import sympy as sp import cyllene.f_aux as fa import cyllene.f_functionclass as ff import cyllene.f_compare as fc def function(expr): """ Defines a function based on a syntax check and a Function object, using lambda operator. Returns a pure function. """ func = ff.Function(expr) if func.is_defined: return lambda x: func.eval_at(x) else: issues_report=''.join(['\t' + str(i+1) + '. ' + func.issues[i]+'\n' \ for i in range(len(func.issues))]) print('Problems encountered:\n' + issues_report) return None # raise ValueError('Problems encountered:\n'+issues_report) # def function(expr): # [func, issues] = fd.define_function(expr) # if issues: # print("Invalid format") # return None # else: # return func def random_function(arg='random'): """ Pick a function at random. One of the folliwing types can be specified: 'const', 'linear', 'quadratic', 'cubic', 'squareroot', 'cubicroot', 'rational', 'exp', 'tri', 'log', 'comp', 'random' """ if arg in ff.FUNCTION_LIST: func = ff.Function(arg) else: func = ff.Function('random') return lambda x: func.eval_at(x) def expression(expr): """ Defines a function based on a syntax check and a Function object. Returns a Sympy object. """ func = ff.Function(expr) if func.is_defined: return func.sym_form else: issues_report=''.join(['\t' + str(i+1) + '. ' + func.issues[i]+'\n' \ for i in range(len(func.issues))]) print('Problems encountered:\n' + issues_report) return None def compare(expr1, expr2): return fc.compare_functions(expr1, expr2) def graph(expr): """ Try to find good plotting range and plot the graph """ var = fa.get_variables(expr) try: [xran, yran] = fpl.find_plot_range(expr) sp.plot(expr, (var[0], xran[0], xran[1]), axis_center=(0,0), ylim=(yran[0],yran[1])) except: sp.plot(expr)
24.77381
77
0.596348
import sympy as sp import cyllene.f_aux as fa import cyllene.f_functionclass as ff import cyllene.f_compare as fc def function(expr): func = ff.Function(expr) if func.is_defined: return lambda x: func.eval_at(x) else: issues_report=''.join(['\t' + str(i+1) + '. ' + func.issues[i]+'\n' \ for i in range(len(func.issues))]) print('Problems encountered:\n' + issues_report) return None def random_function(arg='random'): if arg in ff.FUNCTION_LIST: func = ff.Function(arg) else: func = ff.Function('random') return lambda x: func.eval_at(x) def expression(expr): func = ff.Function(expr) if func.is_defined: return func.sym_form else: issues_report=''.join(['\t' + str(i+1) + '. ' + func.issues[i]+'\n' \ for i in range(len(func.issues))]) print('Problems encountered:\n' + issues_report) return None def compare(expr1, expr2): return fc.compare_functions(expr1, expr2) def graph(expr): var = fa.get_variables(expr) try: [xran, yran] = fpl.find_plot_range(expr) sp.plot(expr, (var[0], xran[0], xran[1]), axis_center=(0,0), ylim=(yran[0],yran[1])) except: sp.plot(expr)
true
true
f7086cc203e842715757a439d19fd452474498b5
106
py
Python
src/windshape/__init__.py
Adrien4193/windshape
4c73a4a85409f04518029f0ddb8bd7e3c60e4905
[ "BSD-2-Clause" ]
null
null
null
src/windshape/__init__.py
Adrien4193/windshape
4c73a4a85409f04518029f0ddb8bd7e3c60e4905
[ "BSD-2-Clause" ]
null
null
null
src/windshape/__init__.py
Adrien4193/windshape
4c73a4a85409f04518029f0ddb8bd7e3c60e4905
[ "BSD-2-Clause" ]
null
null
null
from Commander import Commander from gui.UserInterface import UserInterface from log.Replay import Replay
26.5
43
0.867925
from Commander import Commander from gui.UserInterface import UserInterface from log.Replay import Replay
true
true
f7086d6fd660e481fbc60dbc73e3939ebde70e73
2,070
py
Python
bluesky/tests/test_bec.py
danielballan/bluesky
868c42f6b6be33f320bb3b5100d59c5378d2b5af
[ "BSD-3-Clause" ]
null
null
null
bluesky/tests/test_bec.py
danielballan/bluesky
868c42f6b6be33f320bb3b5100d59c5378d2b5af
[ "BSD-3-Clause" ]
44
2015-06-30T21:04:52.000Z
2019-09-19T11:11:52.000Z
bluesky/tests/test_bec.py
danielballan/bluesky
868c42f6b6be33f320bb3b5100d59c5378d2b5af
[ "BSD-3-Clause" ]
2
2015-07-01T00:30:41.000Z
2016-10-03T16:26:32.000Z
import ast from bluesky.plans import scan, grid_scan import bluesky.preprocessors as bpp import bluesky.plan_stubs as bps from bluesky.preprocessors import SupplementalData from bluesky.callbacks.best_effort import BestEffortCallback def test_hints(RE, hw): motor = hw.motor expected_hint = {'fields': [motor.name]} assert motor.hints == expected_hint collector = [] def collect(*args): collector.append(args) RE(scan([], motor, 1, 2, 2), {'descriptor': collect}) name, doc = collector.pop() assert doc['hints'][motor.name] == expected_hint def test_simple(RE, hw): bec = BestEffortCallback() RE.subscribe(bec) RE(scan([hw.ab_det], hw.motor, 1, 5, 5)) def test_disable(RE, hw): det, motor = hw.ab_det, hw.motor bec = BestEffortCallback() RE.subscribe(bec) bec.disable_table() RE(scan([det], motor, 1, 5, 5)) assert bec._table is None bec.enable_table() RE(scan([det], motor, 1, 5, 5)) assert bec._table is not None bec.peaks.com bec.peaks['com'] assert ast.literal_eval(repr(bec.peaks)) == vars(bec.peaks) bec.clear() assert bec._table is None # smoke test bec.disable_plots() bec.enable_plots() bec.disable_baseline() bec.enable_baseline() bec.disable_heading() bec.enable_heading() def test_blank_hints(RE, hw): bec = BestEffortCallback() RE.subscribe(bec) RE(scan([hw.ab_det], hw.motor, 1, 5, 5, md={'hints': {}})) def test_with_baseline(RE, hw): bec = BestEffortCallback() RE.subscribe(bec) sd = SupplementalData(baseline=[hw.det]) RE.preprocessors.append(sd) RE(scan([hw.ab_det], hw.motor, 1, 5, 5)) def test_underhinted_plan(RE, hw): bec = BestEffortCallback() RE.subscribe(bec) @bpp.run_decorator() def broken_plan(dets): yield from bps.trigger_and_read(dets) RE(broken_plan([hw.det])) def test_live_grid(RE, hw): bec = BestEffortCallback() RE.subscribe(bec) RE(grid_scan([hw.det4], hw.motor1, 0, 1, 1, hw.motor2, 0, 1, 2, True))
23.258427
74
0.658454
import ast from bluesky.plans import scan, grid_scan import bluesky.preprocessors as bpp import bluesky.plan_stubs as bps from bluesky.preprocessors import SupplementalData from bluesky.callbacks.best_effort import BestEffortCallback def test_hints(RE, hw): motor = hw.motor expected_hint = {'fields': [motor.name]} assert motor.hints == expected_hint collector = [] def collect(*args): collector.append(args) RE(scan([], motor, 1, 2, 2), {'descriptor': collect}) name, doc = collector.pop() assert doc['hints'][motor.name] == expected_hint def test_simple(RE, hw): bec = BestEffortCallback() RE.subscribe(bec) RE(scan([hw.ab_det], hw.motor, 1, 5, 5)) def test_disable(RE, hw): det, motor = hw.ab_det, hw.motor bec = BestEffortCallback() RE.subscribe(bec) bec.disable_table() RE(scan([det], motor, 1, 5, 5)) assert bec._table is None bec.enable_table() RE(scan([det], motor, 1, 5, 5)) assert bec._table is not None bec.peaks.com bec.peaks['com'] assert ast.literal_eval(repr(bec.peaks)) == vars(bec.peaks) bec.clear() assert bec._table is None bec.disable_plots() bec.enable_plots() bec.disable_baseline() bec.enable_baseline() bec.disable_heading() bec.enable_heading() def test_blank_hints(RE, hw): bec = BestEffortCallback() RE.subscribe(bec) RE(scan([hw.ab_det], hw.motor, 1, 5, 5, md={'hints': {}})) def test_with_baseline(RE, hw): bec = BestEffortCallback() RE.subscribe(bec) sd = SupplementalData(baseline=[hw.det]) RE.preprocessors.append(sd) RE(scan([hw.ab_det], hw.motor, 1, 5, 5)) def test_underhinted_plan(RE, hw): bec = BestEffortCallback() RE.subscribe(bec) @bpp.run_decorator() def broken_plan(dets): yield from bps.trigger_and_read(dets) RE(broken_plan([hw.det])) def test_live_grid(RE, hw): bec = BestEffortCallback() RE.subscribe(bec) RE(grid_scan([hw.det4], hw.motor1, 0, 1, 1, hw.motor2, 0, 1, 2, True))
true
true
f7086dc9522222ece382e172353cf23a6d069ea8
2,160
py
Python
rllib/algorithms/ac.py
4kubo/rllib
4f9f5f49916c7681675305b6c9a276b9e88c5e22
[ "MIT" ]
null
null
null
rllib/algorithms/ac.py
4kubo/rllib
4f9f5f49916c7681675305b6c9a276b9e88c5e22
[ "MIT" ]
null
null
null
rllib/algorithms/ac.py
4kubo/rllib
4f9f5f49916c7681675305b6c9a276b9e88c5e22
[ "MIT" ]
null
null
null
"""Actor-Critic Algorithm.""" from rllib.util.neural_networks.utilities import broadcast_to_tensor from .abstract_algorithm import AbstractAlgorithm class ActorCritic(AbstractAlgorithm): r"""Implementation of Policy Gradient algorithm. Policy-Gradient is an on-policy model-free control algorithm. Policy-Gradient computes the policy gradient using a critic to estimate the returns (sum of discounted rewards). The Policy-Gradient algorithm is a policy gradient algorithm that estimates the gradient: .. math:: \grad J = \int_{\tau} \grad \log \pi(s_t) Q(s_t, a_t), where the previous integral is computed through samples (s_t, a_t) samples. Parameters ---------- policy: AbstractPolicy Policy to optimize. critic: AbstractQFunction Critic that evaluates the current policy. criterion: _Loss Criterion to optimize the baseline. gamma: float Discount factor. References ---------- Sutton, R. S., McAllester, D. A., Singh, S. P., & Mansour, Y. (2000). Policy gradient methods for reinforcement learning with function approximation. NeurIPS. Konda, V. R., & Tsitsiklis, J. N. (2000). Actor-critic algorithms. NeurIPS. Degris, T., White, M., & Sutton, R. S. (2012). Off-policy actor-critic. ICML """ def __init__( self, num_policy_samples=15, standardize_returns=True, *args, **kwargs ): super().__init__(num_policy_samples=num_policy_samples, *args, **kwargs) self.standardize_returns = standardize_returns def returns(self, trajectory): """Estimate the returns of a trajectory.""" state, action = trajectory.state, trajectory.action weight = self.get_ope_weight(state, action, trajectory.log_prob_action) advantage = self.critic(state, action) weight = broadcast_to_tensor(input_tensor=weight, target_tensor=advantage) return weight * advantage def actor_loss(self, observation): """Get Actor loss.""" return self.score_actor_loss(observation, linearized=False).reduce( self.criterion.reduction )
34.285714
87
0.683333
from rllib.util.neural_networks.utilities import broadcast_to_tensor from .abstract_algorithm import AbstractAlgorithm class ActorCritic(AbstractAlgorithm): def __init__( self, num_policy_samples=15, standardize_returns=True, *args, **kwargs ): super().__init__(num_policy_samples=num_policy_samples, *args, **kwargs) self.standardize_returns = standardize_returns def returns(self, trajectory): state, action = trajectory.state, trajectory.action weight = self.get_ope_weight(state, action, trajectory.log_prob_action) advantage = self.critic(state, action) weight = broadcast_to_tensor(input_tensor=weight, target_tensor=advantage) return weight * advantage def actor_loss(self, observation): return self.score_actor_loss(observation, linearized=False).reduce( self.criterion.reduction )
true
true
f7086df80423a19c718962ed1d7dcca8367671f9
24,024
py
Python
build/android/gyp/compile_resources.py
zipated/src
2b8388091c71e442910a21ada3d97ae8bc1845d3
[ "BSD-3-Clause" ]
2,151
2020-04-18T07:31:17.000Z
2022-03-31T08:39:18.000Z
build/android/gyp/compile_resources.py
cangulcan/src
2b8388091c71e442910a21ada3d97ae8bc1845d3
[ "BSD-3-Clause" ]
395
2020-04-18T08:22:18.000Z
2021-12-08T13:04:49.000Z
build/android/gyp/compile_resources.py
cangulcan/src
2b8388091c71e442910a21ada3d97ae8bc1845d3
[ "BSD-3-Clause" ]
338
2020-04-18T08:03:10.000Z
2022-03-29T12:33:22.000Z
#!/usr/bin/env python # # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Compile Android resources into an intermediate APK. This can also generate an R.txt, and an .srcjar file containing the proper final R.java class for all resource packages the APK depends on. This will crunch images with aapt2. """ import argparse import collections import multiprocessing.pool import os import re import shutil import subprocess import sys import zipfile from xml.etree import ElementTree from util import build_utils from util import resource_utils _SOURCE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( __file__)))) # Import jinja2 from third_party/jinja2 sys.path.insert(1, os.path.join(_SOURCE_ROOT, 'third_party')) from jinja2 import Template # pylint: disable=F0401 # Pngs that we shouldn't convert to webp. Please add rationale when updating. _PNG_WEBP_BLACKLIST_PATTERN = re.compile('|'.join([ # Crashes on Galaxy S5 running L (https://crbug.com/807059). r'.*star_gray\.png', # Android requires pngs for 9-patch images. r'.*\.9\.png', # Daydream requires pngs for icon files. r'.*daydream_icon_.*\.png'])) # Regular expression for package declaration in 'aapt dump resources' output. _RE_PACKAGE_DECLARATION = re.compile( r'^Package Group ([0-9]+) id=0x([0-9a-fA-F]+)') def _PackageIdArgument(x): """Convert a string into a package ID while checking its range. Args: x: argument string. Returns: the package ID as an int, or -1 in case of error. """ try: x = int(x, 0) if x < 0 or x > 127: x = -1 except ValueError: x = -1 return x def _ParseArgs(args): """Parses command line options. Returns: An options object as from argparse.ArgumentParser.parse_args() """ parser, input_opts, output_opts = resource_utils.ResourceArgsParser() input_opts.add_argument('--android-manifest', required=True, help='AndroidManifest.xml path') input_opts.add_argument( '--shared-resources', action='store_true', help='Make all resources in R.java non-final and allow the resource IDs ' 'to be reset to a different package index when the apk is loaded by ' 'another application at runtime.') input_opts.add_argument( '--app-as-shared-lib', action='store_true', help='Same as --shared-resources, but also ensures all resource IDs are ' 'directly usable from the APK loaded as an application.') input_opts.add_argument( '--shared-resources-whitelist', help='An R.txt file acting as a whitelist for resources that should be ' 'non-final and have their package ID changed at runtime in R.java. ' 'Implies and overrides --shared-resources.') input_opts.add_argument('--proto-format', action='store_true', help='Compile resources to protocol buffer format.') input_opts.add_argument('--support-zh-hk', action='store_true', help='Use zh-rTW resources for zh-rHK.') input_opts.add_argument('--debuggable', action='store_true', help='Whether to add android:debuggable="true"') input_opts.add_argument('--version-code', help='Version code for apk.') input_opts.add_argument('--version-name', help='Version name for apk.') input_opts.add_argument( '--no-compress', help='disables compression for the given comma-separated list of ' 'extensions') input_opts.add_argument( '--locale-whitelist', default='[]', help='GN list of languages to include. All other language configs will ' 'be stripped out. List may include a combination of Android locales ' 'or Chrome locales.') input_opts.add_argument('--exclude-xxxhdpi', action='store_true', help='Do not include xxxhdpi drawables.') input_opts.add_argument( '--xxxhdpi-whitelist', default='[]', help='GN list of globs that say which xxxhdpi images to include even ' 'when --exclude-xxxhdpi is set.') input_opts.add_argument('--png-to-webp', action='store_true', help='Convert png files to webp format.') input_opts.add_argument('--webp-binary', default='', help='Path to the cwebp binary.') input_opts.add_argument('--no-xml-namespaces', action='store_true', help='Whether to strip xml namespaces from processed ' 'xml resources') input_opts.add_argument( '--check-resources-pkg-id', type=_PackageIdArgument, help='Check the package ID of the generated resources table. ' 'Value must be integer in [0..127] range.') output_opts.add_argument('--apk-path', required=True, help='Path to output (partial) apk.') output_opts.add_argument('--apk-info-path', required=True, help='Path to output info file for the partial apk.') output_opts.add_argument('--srcjar-out', help='Path to srcjar to contain generated R.java.') output_opts.add_argument('--r-text-out', help='Path to store the generated R.txt file.') output_opts.add_argument('--proguard-file', help='Path to proguard.txt generated file') output_opts.add_argument( '--proguard-file-main-dex', help='Path to proguard.txt generated file for main dex') options = parser.parse_args(args) resource_utils.HandleCommonOptions(options) options.locale_whitelist = build_utils.ParseGnList(options.locale_whitelist) options.xxxhdpi_whitelist = build_utils.ParseGnList(options.xxxhdpi_whitelist) if options.check_resources_pkg_id is not None: if options.check_resources_pkg_id < 0: raise Exception( 'Package resource id should be integer in [0..127] range.') if options.shared_resources and options.app_as_shared_lib: raise Exception('Only one of --app-as-shared-lib or --shared-resources ' 'can be used.') return options def _ExtractPackageIdFromApk(apk_path, aapt_path): """Extract the package ID of a given APK (even intermediate ones). Args: apk_path: Input apk path. aapt_path: Path to aapt tool. Returns: An integer corresponding to the APK's package id. Raises: Exception if there is no resources table in the input file. """ cmd_args = [ aapt_path, 'dump', 'resources', apk_path ] output = build_utils.CheckOutput(cmd_args) for line in output.splitlines(): m = _RE_PACKAGE_DECLARATION.match(line) if m: return int(m.group(2), 16) raise Exception("No resources in this APK!") def _SortZip(original_path, sorted_path): """Generate new zip archive by sorting all files in the original by name.""" with zipfile.ZipFile(sorted_path, 'w') as sorted_zip, \ zipfile.ZipFile(original_path, 'r') as original_zip: for info in sorted(original_zip.infolist(), key=lambda i: i.filename): sorted_zip.writestr(info, original_zip.read(info)) def _DuplicateZhResources(resource_dirs): """Duplicate Taiwanese resources into Hong-Kong specific directory.""" renamed_paths = dict() for resource_dir in resource_dirs: # We use zh-TW resources for zh-HK (if we have zh-TW resources). for path in build_utils.IterFiles(resource_dir): if 'zh-rTW' in path: hk_path = path.replace('zh-rTW', 'zh-rHK') build_utils.MakeDirectory(os.path.dirname(hk_path)) shutil.copyfile(path, hk_path) renamed_paths[os.path.relpath(hk_path, resource_dir)] = os.path.relpath( path, resource_dir) return renamed_paths def _ToAaptLocales(locale_whitelist, support_zh_hk): """Converts the list of Chrome locales to aapt config locales.""" ret = set() for locale in locale_whitelist: locale = resource_utils.CHROME_TO_ANDROID_LOCALE_MAP.get(locale, locale) if locale is None or ('-' in locale and '-r' not in locale): raise Exception('CHROME_TO_ANDROID_LOCALE_MAP needs updating.' ' Found: %s' % locale) ret.add(locale) # Always keep non-regional fall-backs. language = locale.split('-')[0] ret.add(language) # We don't actually support zh-HK in Chrome on Android, but we mimic the # native side behavior where we use zh-TW resources when the locale is set to # zh-HK. See https://crbug.com/780847. if support_zh_hk: assert not any('HK' in l for l in locale_whitelist), ( 'Remove special logic if zh-HK is now supported (crbug.com/780847).') ret.add('zh-rHK') return sorted(ret) def _MoveImagesToNonMdpiFolders(res_root): """Move images from drawable-*-mdpi-* folders to drawable-* folders. Why? http://crbug.com/289843 """ renamed_paths = dict() for src_dir_name in os.listdir(res_root): src_components = src_dir_name.split('-') if src_components[0] != 'drawable' or 'mdpi' not in src_components: continue src_dir = os.path.join(res_root, src_dir_name) if not os.path.isdir(src_dir): continue dst_components = [c for c in src_components if c != 'mdpi'] assert dst_components != src_components dst_dir_name = '-'.join(dst_components) dst_dir = os.path.join(res_root, dst_dir_name) build_utils.MakeDirectory(dst_dir) for src_file_name in os.listdir(src_dir): if not os.path.splitext(src_file_name)[1] in ('.png', '.webp'): continue src_file = os.path.join(src_dir, src_file_name) dst_file = os.path.join(dst_dir, src_file_name) assert not os.path.lexists(dst_file) shutil.move(src_file, dst_file) renamed_paths[os.path.relpath(dst_file, res_root)] = os.path.relpath( src_file, res_root) return renamed_paths def _CreateLinkApkArgs(options): """Create command-line arguments list to invoke 'aapt2 link'. Args: options: The command-line options tuple. Returns: A list of strings corresponding to the command-line invokation for the command, matching the arguments from |options|. """ link_command = [ options.aapt2_path, 'link', '--version-code', options.version_code, '--version-name', options.version_name, '--auto-add-overlay', '--no-version-vectors', '-o', options.apk_path, ] for j in options.android_sdk_jars: link_command += ['-I', j] if options.proguard_file: link_command += ['--proguard', options.proguard_file] if options.proguard_file_main_dex: link_command += ['--proguard-main-dex', options.proguard_file_main_dex] if options.no_compress: for ext in options.no_compress.split(','): link_command += ['-0', ext] # Note: only one of --proto-format, --shared-lib or --app-as-shared-lib # can be used with recent versions of aapt2. if options.proto_format: link_command.append('--proto-format') elif options.shared_resources: link_command.append('--shared-lib') if options.locale_whitelist: aapt_locales = _ToAaptLocales( options.locale_whitelist, options.support_zh_hk) link_command += ['-c', ','.join(aapt_locales)] if options.no_xml_namespaces: link_command.append('--no-xml-namespaces') return link_command def _ExtractVersionFromSdk(aapt_path, sdk_path): """Extract version code and name from Android SDK .jar file. Args: aapt_path: Path to 'aapt' build tool. sdk_path: Path to SDK-specific android.jar file. Returns: A (version_code, version_name) pair of strings. """ output = build_utils.CheckOutput( [aapt_path, 'dump', 'badging', sdk_path], print_stdout=False, print_stderr=False) version_code = re.search(r"versionCode='(.*?)'", output).group(1) version_name = re.search(r"versionName='(.*?)'", output).group(1) return version_code, version_name, def _FixManifest(options, temp_dir): """Fix the APK's AndroidManifest.xml. This adds any missing namespaces for 'android' and 'tools', and sets certains elements like 'platformBuildVersionCode' or 'android:debuggable' depending on the content of |options|. Args: options: The command-line arguments tuple. temp_dir: A temporary directory where the fixed manifest will be written to. Returns: Path to the fixed manifest within |temp_dir|. """ debug_manifest_path = os.path.join(temp_dir, 'AndroidManifest.xml') _ANDROID_NAMESPACE = 'http://schemas.android.com/apk/res/android' _TOOLS_NAMESPACE = 'http://schemas.android.com/tools' ElementTree.register_namespace('android', _ANDROID_NAMESPACE) ElementTree.register_namespace('tools', _TOOLS_NAMESPACE) original_manifest = ElementTree.parse(options.android_manifest) def maybe_extract_version(j): try: return _ExtractVersionFromSdk(options.aapt_path, j) except build_utils.CalledProcessError: return None extract_all = [maybe_extract_version(j) for j in options.android_sdk_jars] successful_extractions = [x for x in extract_all if x] if len(successful_extractions) == 0: raise Exception( 'Unable to find android SDK jar among candidates: %s' % ', '.join(options.android_sdk_jars)) elif len(successful_extractions) > 1: raise Exception( 'Found multiple android SDK jars among candidates: %s' % ', '.join(options.android_sdk_jars)) version_code, version_name = successful_extractions.pop() # ElementTree.find does not work if the required tag is the root. if original_manifest.getroot().tag == 'manifest': manifest_node = original_manifest.getroot() else: manifest_node = original_manifest.find('manifest') manifest_node.set('platformBuildVersionCode', version_code) manifest_node.set('platformBuildVersionName', version_name) if options.debuggable: app_node = original_manifest.find('application') app_node.set('{%s}%s' % (_ANDROID_NAMESPACE, 'debuggable'), 'true') with open(debug_manifest_path, 'w') as debug_manifest: debug_manifest.write(ElementTree.tostring( original_manifest.getroot(), encoding='UTF-8')) return debug_manifest_path def _ResourceNameFromPath(path): return os.path.splitext(os.path.basename(path))[0] def _CreateKeepPredicate(resource_dirs, exclude_xxxhdpi, xxxhdpi_whitelist): """Return a predicate lambda to determine which resource files to keep.""" if not exclude_xxxhdpi: # Do not extract dotfiles (e.g. ".gitkeep"). aapt ignores them anyways. return lambda path: os.path.basename(path)[0] != '.' # Returns False only for xxxhdpi non-mipmap, non-whitelisted drawables. naive_predicate = lambda path: ( not re.search(r'[/-]xxxhdpi[/-]', path) or re.search(r'[/-]mipmap[/-]', path) or build_utils.MatchesGlob(path, xxxhdpi_whitelist)) # Build a set of all non-xxxhdpi drawables to ensure that we never exclude any # xxxhdpi drawable that does not exist in other densities. non_xxxhdpi_drawables = set() for resource_dir in resource_dirs: for path in build_utils.IterFiles(resource_dir): if re.search(r'[/-]drawable[/-]', path) and naive_predicate(path): non_xxxhdpi_drawables.add(_ResourceNameFromPath(path)) return lambda path: (naive_predicate(path) or _ResourceNameFromPath(path) not in non_xxxhdpi_drawables) def _ConvertToWebP(webp_binary, png_files): renamed_paths = dict() pool = multiprocessing.pool.ThreadPool(10) def convert_image(png_path_tuple): png_path, original_dir = png_path_tuple root = os.path.splitext(png_path)[0] webp_path = root + '.webp' args = [webp_binary, png_path, '-mt', '-quiet', '-m', '6', '-q', '100', '-lossless', '-o', webp_path] subprocess.check_call(args) os.remove(png_path) renamed_paths[os.path.relpath(webp_path, original_dir)] = os.path.relpath( png_path, original_dir) pool.map(convert_image, [f for f in png_files if not _PNG_WEBP_BLACKLIST_PATTERN.match(f[0])]) pool.close() pool.join() return renamed_paths def _CompileDeps(aapt2_path, dep_subdirs, temp_dir): partials_dir = os.path.join(temp_dir, 'partials') build_utils.MakeDirectory(partials_dir) partial_compile_command = [ aapt2_path, 'compile', # TODO(wnwen): Turn this on once aapt2 forces 9-patch to be crunched. # '--no-crunch', ] pool = multiprocessing.pool.ThreadPool(10) def compile_partial(directory): dirname = os.path.basename(directory) partial_path = os.path.join(partials_dir, dirname + '.zip') compile_command = (partial_compile_command + ['--dir', directory, '-o', partial_path]) build_utils.CheckOutput(compile_command) # Sorting the files in the partial ensures deterministic output from the # aapt2 link step which uses order of files in the partial. sorted_partial_path = os.path.join(partials_dir, dirname + '.sorted.zip') _SortZip(partial_path, sorted_partial_path) return sorted_partial_path partials = pool.map(compile_partial, dep_subdirs) pool.close() pool.join() return partials def _CreateResourceInfoFile( renamed_paths, apk_info_path, dependencies_res_zips): lines = set() for zip_file in dependencies_res_zips: zip_info_file_path = zip_file + '.info' if os.path.exists(zip_info_file_path): with open(zip_info_file_path, 'r') as zip_info_file: lines.update(zip_info_file.readlines()) for dest, source in renamed_paths.iteritems(): lines.add('Rename:{},{}\n'.format(dest, source)) with open(apk_info_path, 'w') as info_file: info_file.writelines(sorted(lines)) def _PackageApk(options, dep_subdirs, temp_dir, gen_dir, r_txt_path): """Compile resources with aapt2 and generate intermediate .ap_ file. Args: options: The command-line options tuple. E.g. the generated apk will be written to |options.apk_path|. dep_subdirs: The list of directories where dependency resource zips were extracted (its content will be altered by this function). temp_dir: A temporary directory. gen_dir: Another temp directory where some intermediate files are generated. r_txt_path: The path where the R.txt file will written to. """ renamed_paths = dict() renamed_paths.update(_DuplicateZhResources(dep_subdirs)) keep_predicate = _CreateKeepPredicate( dep_subdirs, options.exclude_xxxhdpi, options.xxxhdpi_whitelist) png_paths = [] for directory in dep_subdirs: for f in build_utils.IterFiles(directory): if not keep_predicate(f): os.remove(f) elif f.endswith('.png'): png_paths.append((f, directory)) if png_paths and options.png_to_webp: renamed_paths.update(_ConvertToWebP(options.webp_binary, png_paths)) for directory in dep_subdirs: renamed_paths.update(_MoveImagesToNonMdpiFolders(directory)) link_command = _CreateLinkApkArgs(options) link_command += ['--output-text-symbols', r_txt_path] # TODO(digit): Is this below actually required for R.txt generation? link_command += ['--java', gen_dir] fixed_manifest = _FixManifest(options, temp_dir) link_command += ['--manifest', fixed_manifest] partials = _CompileDeps(options.aapt2_path, dep_subdirs, temp_dir) for partial in partials: link_command += ['-R', partial] # Creates a .zip with AndroidManifest.xml, resources.arsc, res/* # Also creates R.txt build_utils.CheckOutput( link_command, print_stdout=False, print_stderr=False) _CreateResourceInfoFile( renamed_paths, options.apk_info_path, options.dependencies_res_zips) def _WriteFinalRTxtFile(options, aapt_r_txt_path): """Determine final R.txt and return its location. This handles --r-text-in and --r-text-out options at the same time. Args: options: The command-line options tuple. aapt_r_txt_path: The path to the R.txt generated by aapt. Returns: Path to the final R.txt file. """ if options.r_text_in: r_txt_file = options.r_text_in else: # When an empty res/ directory is passed, aapt does not write an R.txt. r_txt_file = aapt_r_txt_path if not os.path.exists(r_txt_file): build_utils.Touch(r_txt_file) if options.r_text_out: shutil.copyfile(r_txt_file, options.r_text_out) return r_txt_file def _OnStaleMd5(options): with resource_utils.BuildContext() as build: dep_subdirs = resource_utils.ExtractDeps(options.dependencies_res_zips, build.deps_dir) _PackageApk(options, dep_subdirs, build.temp_dir, build.gen_dir, build.r_txt_path) r_txt_path = _WriteFinalRTxtFile(options, build.r_txt_path) package = resource_utils.ExtractPackageFromManifest( options.android_manifest) # If --shared-resources-whitelist is used, the all resources listed in # the corresponding R.txt file will be non-final, and an onResourcesLoaded() # will be generated to adjust them at runtime. # # Otherwise, if --shared-resources is used, the all resources will be # non-final, and an onResourcesLoaded() method will be generated too. # # Otherwise, all resources will be final, and no method will be generated. # rjava_build_options = resource_utils.RJavaBuildOptions() if options.shared_resources_whitelist: rjava_build_options.ExportSomeResources( options.shared_resources_whitelist) rjava_build_options.GenerateOnResourcesLoaded() elif options.shared_resources or options.app_as_shared_lib: rjava_build_options.ExportAllResources() rjava_build_options.GenerateOnResourcesLoaded() resource_utils.CreateRJavaFiles( build.srcjar_dir, package, r_txt_path, options.extra_res_packages, options.extra_r_text_files, rjava_build_options) if options.srcjar_out: build_utils.ZipDir(options.srcjar_out, build.srcjar_dir) if options.check_resources_pkg_id is not None: expected_id = options.check_resources_pkg_id package_id = _ExtractPackageIdFromApk(options.apk_path, options.aapt_path) if package_id != expected_id: raise Exception('Invalid package ID 0x%x (expected 0x%x)' % (package_id, expected_id)) def main(args): args = build_utils.ExpandFileArgs(args) options = _ParseArgs(args) # Order of these must match order specified in GN so that the correct one # appears first in the depfile. possible_output_paths = [ options.apk_path, options.apk_path + '.info', options.r_text_out, options.srcjar_out, options.proguard_file, options.proguard_file_main_dex, ] output_paths = [x for x in possible_output_paths if x] # List python deps in input_strings rather than input_paths since the contents # of them does not change what gets written to the depsfile. input_strings = options.extra_res_packages + [ options.shared_resources, options.exclude_xxxhdpi, options.xxxhdpi_whitelist, str(options.debuggable), str(options.png_to_webp), str(options.support_zh_hk), str(options.no_xml_namespaces), ] input_strings.extend(_CreateLinkApkArgs(options)) possible_input_paths = [ options.aapt_path, options.android_manifest, options.shared_resources_whitelist, ] possible_input_paths += options.android_sdk_jars input_paths = [x for x in possible_input_paths if x] input_paths.extend(options.dependencies_res_zips) input_paths.extend(options.extra_r_text_files) if options.webp_binary: input_paths.append(options.webp_binary) build_utils.CallAndWriteDepfileIfStale( lambda: _OnStaleMd5(options), options, input_paths=input_paths, input_strings=input_strings, output_paths=output_paths) if __name__ == '__main__': main(sys.argv[1:])
35.538462
80
0.700716
import argparse import collections import multiprocessing.pool import os import re import shutil import subprocess import sys import zipfile from xml.etree import ElementTree from util import build_utils from util import resource_utils _SOURCE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( __file__)))) sys.path.insert(1, os.path.join(_SOURCE_ROOT, 'third_party')) from jinja2 import Template _PNG_WEBP_BLACKLIST_PATTERN = re.compile('|'.join([ # Crashes on Galaxy S5 running L (https://crbug.com/807059). r'.*star_gray\.png', # Android requires pngs for 9-patch images. r'.*\.9\.png', # Daydream requires pngs for icon files. r'.*daydream_icon_.*\.png'])) # Regular expression for package declaration in 'aapt dump resources' output. _RE_PACKAGE_DECLARATION = re.compile( r'^Package Group ([0-9]+) id=0x([0-9a-fA-F]+)') def _PackageIdArgument(x): try: x = int(x, 0) if x < 0 or x > 127: x = -1 except ValueError: x = -1 return x def _ParseArgs(args): parser, input_opts, output_opts = resource_utils.ResourceArgsParser() input_opts.add_argument('--android-manifest', required=True, help='AndroidManifest.xml path') input_opts.add_argument( '--shared-resources', action='store_true', help='Make all resources in R.java non-final and allow the resource IDs ' 'to be reset to a different package index when the apk is loaded by ' 'another application at runtime.') input_opts.add_argument( '--app-as-shared-lib', action='store_true', help='Same as --shared-resources, but also ensures all resource IDs are ' 'directly usable from the APK loaded as an application.') input_opts.add_argument( '--shared-resources-whitelist', help='An R.txt file acting as a whitelist for resources that should be ' 'non-final and have their package ID changed at runtime in R.java. ' 'Implies and overrides --shared-resources.') input_opts.add_argument('--proto-format', action='store_true', help='Compile resources to protocol buffer format.') input_opts.add_argument('--support-zh-hk', action='store_true', help='Use zh-rTW resources for zh-rHK.') input_opts.add_argument('--debuggable', action='store_true', help='Whether to add android:debuggable="true"') input_opts.add_argument('--version-code', help='Version code for apk.') input_opts.add_argument('--version-name', help='Version name for apk.') input_opts.add_argument( '--no-compress', help='disables compression for the given comma-separated list of ' 'extensions') input_opts.add_argument( '--locale-whitelist', default='[]', help='GN list of languages to include. All other language configs will ' 'be stripped out. List may include a combination of Android locales ' 'or Chrome locales.') input_opts.add_argument('--exclude-xxxhdpi', action='store_true', help='Do not include xxxhdpi drawables.') input_opts.add_argument( '--xxxhdpi-whitelist', default='[]', help='GN list of globs that say which xxxhdpi images to include even ' 'when --exclude-xxxhdpi is set.') input_opts.add_argument('--png-to-webp', action='store_true', help='Convert png files to webp format.') input_opts.add_argument('--webp-binary', default='', help='Path to the cwebp binary.') input_opts.add_argument('--no-xml-namespaces', action='store_true', help='Whether to strip xml namespaces from processed ' 'xml resources') input_opts.add_argument( '--check-resources-pkg-id', type=_PackageIdArgument, help='Check the package ID of the generated resources table. ' 'Value must be integer in [0..127] range.') output_opts.add_argument('--apk-path', required=True, help='Path to output (partial) apk.') output_opts.add_argument('--apk-info-path', required=True, help='Path to output info file for the partial apk.') output_opts.add_argument('--srcjar-out', help='Path to srcjar to contain generated R.java.') output_opts.add_argument('--r-text-out', help='Path to store the generated R.txt file.') output_opts.add_argument('--proguard-file', help='Path to proguard.txt generated file') output_opts.add_argument( '--proguard-file-main-dex', help='Path to proguard.txt generated file for main dex') options = parser.parse_args(args) resource_utils.HandleCommonOptions(options) options.locale_whitelist = build_utils.ParseGnList(options.locale_whitelist) options.xxxhdpi_whitelist = build_utils.ParseGnList(options.xxxhdpi_whitelist) if options.check_resources_pkg_id is not None: if options.check_resources_pkg_id < 0: raise Exception( 'Package resource id should be integer in [0..127] range.') if options.shared_resources and options.app_as_shared_lib: raise Exception('Only one of --app-as-shared-lib or --shared-resources ' 'can be used.') return options def _ExtractPackageIdFromApk(apk_path, aapt_path): cmd_args = [ aapt_path, 'dump', 'resources', apk_path ] output = build_utils.CheckOutput(cmd_args) for line in output.splitlines(): m = _RE_PACKAGE_DECLARATION.match(line) if m: return int(m.group(2), 16) raise Exception("No resources in this APK!") def _SortZip(original_path, sorted_path): with zipfile.ZipFile(sorted_path, 'w') as sorted_zip, \ zipfile.ZipFile(original_path, 'r') as original_zip: for info in sorted(original_zip.infolist(), key=lambda i: i.filename): sorted_zip.writestr(info, original_zip.read(info)) def _DuplicateZhResources(resource_dirs): renamed_paths = dict() for resource_dir in resource_dirs: # We use zh-TW resources for zh-HK (if we have zh-TW resources). for path in build_utils.IterFiles(resource_dir): if 'zh-rTW' in path: hk_path = path.replace('zh-rTW', 'zh-rHK') build_utils.MakeDirectory(os.path.dirname(hk_path)) shutil.copyfile(path, hk_path) renamed_paths[os.path.relpath(hk_path, resource_dir)] = os.path.relpath( path, resource_dir) return renamed_paths def _ToAaptLocales(locale_whitelist, support_zh_hk): ret = set() for locale in locale_whitelist: locale = resource_utils.CHROME_TO_ANDROID_LOCALE_MAP.get(locale, locale) if locale is None or ('-' in locale and '-r' not in locale): raise Exception('CHROME_TO_ANDROID_LOCALE_MAP needs updating.' ' Found: %s' % locale) ret.add(locale) # Always keep non-regional fall-backs. language = locale.split('-')[0] ret.add(language) # We don't actually support zh-HK in Chrome on Android, but we mimic the if support_zh_hk: assert not any('HK' in l for l in locale_whitelist), ( 'Remove special logic if zh-HK is now supported (crbug.com/780847).') ret.add('zh-rHK') return sorted(ret) def _MoveImagesToNonMdpiFolders(res_root): renamed_paths = dict() for src_dir_name in os.listdir(res_root): src_components = src_dir_name.split('-') if src_components[0] != 'drawable' or 'mdpi' not in src_components: continue src_dir = os.path.join(res_root, src_dir_name) if not os.path.isdir(src_dir): continue dst_components = [c for c in src_components if c != 'mdpi'] assert dst_components != src_components dst_dir_name = '-'.join(dst_components) dst_dir = os.path.join(res_root, dst_dir_name) build_utils.MakeDirectory(dst_dir) for src_file_name in os.listdir(src_dir): if not os.path.splitext(src_file_name)[1] in ('.png', '.webp'): continue src_file = os.path.join(src_dir, src_file_name) dst_file = os.path.join(dst_dir, src_file_name) assert not os.path.lexists(dst_file) shutil.move(src_file, dst_file) renamed_paths[os.path.relpath(dst_file, res_root)] = os.path.relpath( src_file, res_root) return renamed_paths def _CreateLinkApkArgs(options): link_command = [ options.aapt2_path, 'link', '--version-code', options.version_code, '--version-name', options.version_name, '--auto-add-overlay', '--no-version-vectors', '-o', options.apk_path, ] for j in options.android_sdk_jars: link_command += ['-I', j] if options.proguard_file: link_command += ['--proguard', options.proguard_file] if options.proguard_file_main_dex: link_command += ['--proguard-main-dex', options.proguard_file_main_dex] if options.no_compress: for ext in options.no_compress.split(','): link_command += ['-0', ext] if options.proto_format: link_command.append('--proto-format') elif options.shared_resources: link_command.append('--shared-lib') if options.locale_whitelist: aapt_locales = _ToAaptLocales( options.locale_whitelist, options.support_zh_hk) link_command += ['-c', ','.join(aapt_locales)] if options.no_xml_namespaces: link_command.append('--no-xml-namespaces') return link_command def _ExtractVersionFromSdk(aapt_path, sdk_path): output = build_utils.CheckOutput( [aapt_path, 'dump', 'badging', sdk_path], print_stdout=False, print_stderr=False) version_code = re.search(r"versionCode='(.*?)'", output).group(1) version_name = re.search(r"versionName='(.*?)'", output).group(1) return version_code, version_name, def _FixManifest(options, temp_dir): debug_manifest_path = os.path.join(temp_dir, 'AndroidManifest.xml') _ANDROID_NAMESPACE = 'http://schemas.android.com/apk/res/android' _TOOLS_NAMESPACE = 'http://schemas.android.com/tools' ElementTree.register_namespace('android', _ANDROID_NAMESPACE) ElementTree.register_namespace('tools', _TOOLS_NAMESPACE) original_manifest = ElementTree.parse(options.android_manifest) def maybe_extract_version(j): try: return _ExtractVersionFromSdk(options.aapt_path, j) except build_utils.CalledProcessError: return None extract_all = [maybe_extract_version(j) for j in options.android_sdk_jars] successful_extractions = [x for x in extract_all if x] if len(successful_extractions) == 0: raise Exception( 'Unable to find android SDK jar among candidates: %s' % ', '.join(options.android_sdk_jars)) elif len(successful_extractions) > 1: raise Exception( 'Found multiple android SDK jars among candidates: %s' % ', '.join(options.android_sdk_jars)) version_code, version_name = successful_extractions.pop() if original_manifest.getroot().tag == 'manifest': manifest_node = original_manifest.getroot() else: manifest_node = original_manifest.find('manifest') manifest_node.set('platformBuildVersionCode', version_code) manifest_node.set('platformBuildVersionName', version_name) if options.debuggable: app_node = original_manifest.find('application') app_node.set('{%s}%s' % (_ANDROID_NAMESPACE, 'debuggable'), 'true') with open(debug_manifest_path, 'w') as debug_manifest: debug_manifest.write(ElementTree.tostring( original_manifest.getroot(), encoding='UTF-8')) return debug_manifest_path def _ResourceNameFromPath(path): return os.path.splitext(os.path.basename(path))[0] def _CreateKeepPredicate(resource_dirs, exclude_xxxhdpi, xxxhdpi_whitelist): if not exclude_xxxhdpi: return lambda path: os.path.basename(path)[0] != '.' naive_predicate = lambda path: ( not re.search(r'[/-]xxxhdpi[/-]', path) or re.search(r'[/-]mipmap[/-]', path) or build_utils.MatchesGlob(path, xxxhdpi_whitelist)) non_xxxhdpi_drawables = set() for resource_dir in resource_dirs: for path in build_utils.IterFiles(resource_dir): if re.search(r'[/-]drawable[/-]', path) and naive_predicate(path): non_xxxhdpi_drawables.add(_ResourceNameFromPath(path)) return lambda path: (naive_predicate(path) or _ResourceNameFromPath(path) not in non_xxxhdpi_drawables) def _ConvertToWebP(webp_binary, png_files): renamed_paths = dict() pool = multiprocessing.pool.ThreadPool(10) def convert_image(png_path_tuple): png_path, original_dir = png_path_tuple root = os.path.splitext(png_path)[0] webp_path = root + '.webp' args = [webp_binary, png_path, '-mt', '-quiet', '-m', '6', '-q', '100', '-lossless', '-o', webp_path] subprocess.check_call(args) os.remove(png_path) renamed_paths[os.path.relpath(webp_path, original_dir)] = os.path.relpath( png_path, original_dir) pool.map(convert_image, [f for f in png_files if not _PNG_WEBP_BLACKLIST_PATTERN.match(f[0])]) pool.close() pool.join() return renamed_paths def _CompileDeps(aapt2_path, dep_subdirs, temp_dir): partials_dir = os.path.join(temp_dir, 'partials') build_utils.MakeDirectory(partials_dir) partial_compile_command = [ aapt2_path, 'compile', ] pool = multiprocessing.pool.ThreadPool(10) def compile_partial(directory): dirname = os.path.basename(directory) partial_path = os.path.join(partials_dir, dirname + '.zip') compile_command = (partial_compile_command + ['--dir', directory, '-o', partial_path]) build_utils.CheckOutput(compile_command) sorted_partial_path = os.path.join(partials_dir, dirname + '.sorted.zip') _SortZip(partial_path, sorted_partial_path) return sorted_partial_path partials = pool.map(compile_partial, dep_subdirs) pool.close() pool.join() return partials def _CreateResourceInfoFile( renamed_paths, apk_info_path, dependencies_res_zips): lines = set() for zip_file in dependencies_res_zips: zip_info_file_path = zip_file + '.info' if os.path.exists(zip_info_file_path): with open(zip_info_file_path, 'r') as zip_info_file: lines.update(zip_info_file.readlines()) for dest, source in renamed_paths.iteritems(): lines.add('Rename:{},{}\n'.format(dest, source)) with open(apk_info_path, 'w') as info_file: info_file.writelines(sorted(lines)) def _PackageApk(options, dep_subdirs, temp_dir, gen_dir, r_txt_path): renamed_paths = dict() renamed_paths.update(_DuplicateZhResources(dep_subdirs)) keep_predicate = _CreateKeepPredicate( dep_subdirs, options.exclude_xxxhdpi, options.xxxhdpi_whitelist) png_paths = [] for directory in dep_subdirs: for f in build_utils.IterFiles(directory): if not keep_predicate(f): os.remove(f) elif f.endswith('.png'): png_paths.append((f, directory)) if png_paths and options.png_to_webp: renamed_paths.update(_ConvertToWebP(options.webp_binary, png_paths)) for directory in dep_subdirs: renamed_paths.update(_MoveImagesToNonMdpiFolders(directory)) link_command = _CreateLinkApkArgs(options) link_command += ['--output-text-symbols', r_txt_path] link_command += ['--java', gen_dir] fixed_manifest = _FixManifest(options, temp_dir) link_command += ['--manifest', fixed_manifest] partials = _CompileDeps(options.aapt2_path, dep_subdirs, temp_dir) for partial in partials: link_command += ['-R', partial] build_utils.CheckOutput( link_command, print_stdout=False, print_stderr=False) _CreateResourceInfoFile( renamed_paths, options.apk_info_path, options.dependencies_res_zips) def _WriteFinalRTxtFile(options, aapt_r_txt_path): if options.r_text_in: r_txt_file = options.r_text_in else: r_txt_file = aapt_r_txt_path if not os.path.exists(r_txt_file): build_utils.Touch(r_txt_file) if options.r_text_out: shutil.copyfile(r_txt_file, options.r_text_out) return r_txt_file def _OnStaleMd5(options): with resource_utils.BuildContext() as build: dep_subdirs = resource_utils.ExtractDeps(options.dependencies_res_zips, build.deps_dir) _PackageApk(options, dep_subdirs, build.temp_dir, build.gen_dir, build.r_txt_path) r_txt_path = _WriteFinalRTxtFile(options, build.r_txt_path) package = resource_utils.ExtractPackageFromManifest( options.android_manifest) rjava_build_options = resource_utils.RJavaBuildOptions() if options.shared_resources_whitelist: rjava_build_options.ExportSomeResources( options.shared_resources_whitelist) rjava_build_options.GenerateOnResourcesLoaded() elif options.shared_resources or options.app_as_shared_lib: rjava_build_options.ExportAllResources() rjava_build_options.GenerateOnResourcesLoaded() resource_utils.CreateRJavaFiles( build.srcjar_dir, package, r_txt_path, options.extra_res_packages, options.extra_r_text_files, rjava_build_options) if options.srcjar_out: build_utils.ZipDir(options.srcjar_out, build.srcjar_dir) if options.check_resources_pkg_id is not None: expected_id = options.check_resources_pkg_id package_id = _ExtractPackageIdFromApk(options.apk_path, options.aapt_path) if package_id != expected_id: raise Exception('Invalid package ID 0x%x (expected 0x%x)' % (package_id, expected_id)) def main(args): args = build_utils.ExpandFileArgs(args) options = _ParseArgs(args) possible_output_paths = [ options.apk_path, options.apk_path + '.info', options.r_text_out, options.srcjar_out, options.proguard_file, options.proguard_file_main_dex, ] output_paths = [x for x in possible_output_paths if x] input_strings = options.extra_res_packages + [ options.shared_resources, options.exclude_xxxhdpi, options.xxxhdpi_whitelist, str(options.debuggable), str(options.png_to_webp), str(options.support_zh_hk), str(options.no_xml_namespaces), ] input_strings.extend(_CreateLinkApkArgs(options)) possible_input_paths = [ options.aapt_path, options.android_manifest, options.shared_resources_whitelist, ] possible_input_paths += options.android_sdk_jars input_paths = [x for x in possible_input_paths if x] input_paths.extend(options.dependencies_res_zips) input_paths.extend(options.extra_r_text_files) if options.webp_binary: input_paths.append(options.webp_binary) build_utils.CallAndWriteDepfileIfStale( lambda: _OnStaleMd5(options), options, input_paths=input_paths, input_strings=input_strings, output_paths=output_paths) if __name__ == '__main__': main(sys.argv[1:])
true
true