hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
f420c7cad07b73b890ce9019d4a200470cb1bcbf
948
py
Python
scrapy_framework/midwares/download_midware.py
savor007/scrapy_framework
9f1266eb2d4bb7e181d1c5352b05298e77040980
[ "MIT" ]
null
null
null
scrapy_framework/midwares/download_midware.py
savor007/scrapy_framework
9f1266eb2d4bb7e181d1c5352b05298e77040980
[ "MIT" ]
null
null
null
scrapy_framework/midwares/download_midware.py
savor007/scrapy_framework
9f1266eb2d4bb7e181d1c5352b05298e77040980
[ "MIT" ]
null
null
null
from scrapy_framework.html.request import Request from scrapy_framework.html.response import Response import random def get_ua(): first_num=random.randint(55,69) third_num=random.randint(0,3200) forth_num=random.randint(0, 140) os_type = [ '(Windows NT 6.1; WOW64)', '(Windows NT 10.0; WOW64)', '(X11; Linux x86_64)', '(Macintosh; Intel Mac OS X 10_12_6)' ] chrome_version = 'Chrome/{}.0.{}.{}'.format(first_num, third_num, forth_num) ua = ' '.join(['Mozilla/5.0', random.choice(os_type), 'AppleWebKit/537.36', '(KHTML, like Gecko)', chrome_version, 'Safari/537.36'] ) return ua class DownloadMidware(object): def process_request(self, request): if request.headers==None: request.headers=dict() request.headers["User-Agent"]=get_ua() return request def process_response(self, response): return response
28.727273
85
0.635021
124
948
4.701613
0.516129
0.046312
0.082333
0.078902
0
0
0
0
0
0
0
0.062928
0.228903
948
33
86
28.727273
0.73461
0
0
0
0
0
0.200211
0
0
0
0
0
0
1
0.125
false
0
0.125
0.041667
0.416667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f420caa0d727e8d433f67df3503f8152d7e6f2e7
2,294
py
Python
tracardi/process_engine/action/v1/pro/scheduler/plugin.py
bytepl/tracardi
e8fa4684fa6bd3d05165fe48aa925fc6c1e74923
[ "MIT" ]
null
null
null
tracardi/process_engine/action/v1/pro/scheduler/plugin.py
bytepl/tracardi
e8fa4684fa6bd3d05165fe48aa925fc6c1e74923
[ "MIT" ]
null
null
null
tracardi/process_engine/action/v1/pro/scheduler/plugin.py
bytepl/tracardi
e8fa4684fa6bd3d05165fe48aa925fc6c1e74923
[ "MIT" ]
null
null
null
from pydantic import BaseModel from tracardi.domain.entity import Entity from tracardi.domain.scheduler_config import SchedulerConfig from tracardi.domain.resource import ResourceCredentials from tracardi.service.storage.driver import storage from tracardi.service.plugin.runner import ActionRunner from tracardi.service.plugin.domain.register import Plugin, Spec, MetaData, Form, FormGroup, FormField, FormComponent from tracardi.service.plugin.domain.result import Result class Configuration(BaseModel): source: Entity event_type: str properties: str = "{}" postpone: str def validate(config: dict) -> Configuration: return Configuration(**config) class SchedulerPlugin(ActionRunner): @staticmethod async def build(**kwargs) -> 'SchedulerPlugin': config = validate(kwargs) resource = await storage.driver.resource.load(config.source.id) plugin = SchedulerPlugin(config, resource.credentials) return plugin def __init__(self, config: Configuration, credentials: ResourceCredentials): self.config = config self.credentials = credentials.get_credentials( self, output=SchedulerConfig) # type: SchedulerConfig async def run(self, payload): run_in_background = True if not run_in_background: return Result(port="response", value=None) else: return Result(port="response", value=None) def register() -> Plugin: return Plugin( start=False, spec=Spec( module='tracardi.process_engine.action.v1.pro.scheduler.plugin', className='SchedulerPlugin', inputs=["payload"], outputs=['response', 'error'], version='0.6.2', license="MIT", author="Risto Kowaczewski", init= { "source": { "id": "" }, "event_type": "", "properties": "{}", "postpone": "+1m" } ), metadata=MetaData( name='Schedule event', desc='This plugin schedules events', icon='calendar', group=["Time"], tags=["Pro", "Scheduler"], pro=True, ) )
29.792208
117
0.610724
215
2,294
6.455814
0.446512
0.060519
0.054755
0.054035
0.092219
0.04755
0
0
0
0
0
0.003051
0.285527
2,294
76
118
30.184211
0.843807
0.009154
0
0.032258
0
0
0.111845
0.023778
0
0
0
0
0
1
0.048387
false
0
0.129032
0.032258
0.354839
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4211dfd13f13cb0b576625ee36371455d4c829c
568
py
Python
tests/test_covid_daily.py
alvarobartt/covid-daily
cb4506a007ac206e85409a13281028f6f82441a6
[ "MIT" ]
13
2020-05-23T12:25:04.000Z
2021-12-09T04:56:06.000Z
tests/test_covid_daily.py
alvarobartt/covid-daily
cb4506a007ac206e85409a13281028f6f82441a6
[ "MIT" ]
6
2020-06-02T12:18:12.000Z
2021-06-20T07:59:11.000Z
tests/test_covid_daily.py
alvarobartt/covid-daily
cb4506a007ac206e85409a13281028f6f82441a6
[ "MIT" ]
5
2020-07-02T16:48:19.000Z
2022-03-21T01:52:17.000Z
# Copyright 2020 Alvaro Bartolome, alvarobartt @ GitHub # See LICENSE for details. import pytest import covid_daily def test_overview(): params = [ { 'as_json': True }, { 'as_json': False } ] for param in params: covid_daily.overview(as_json=param['as_json']) def test_data(): data = covid_daily.data( country='france', chart='graph-deaths-daily', as_json=False ) print(data.tail()) if __name__ == "__main__": test_overview() test_data()
15.777778
55
0.568662
63
568
4.809524
0.555556
0.09901
0.072607
0
0
0
0
0
0
0
0
0.010363
0.320423
568
35
56
16.228571
0.774611
0.137324
0
0
0
0
0.10883
0
0
0
0
0
0
1
0.086957
false
0
0.086957
0
0.173913
0.043478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4214fc3ae97e545eaf80e0585a829da218ecbdc
6,132
py
Python
2021/HANFS/fence-agents/fence/agents/zvm/fence_zvmip.py
BryanWhitehurst/HPCCEA
54b7e7355b67ba3fdce2e28cc1b0e3b29d2bdefa
[ "MIT" ]
10
2019-08-12T23:00:20.000Z
2021-08-06T17:06:48.000Z
2021/HANFS/fence-agents/fence/agents/zvm/fence_zvmip.py
BryanWhitehurst/HPCCEA
54b7e7355b67ba3fdce2e28cc1b0e3b29d2bdefa
[ "MIT" ]
5
2020-06-18T23:51:58.000Z
2021-07-28T17:50:34.000Z
2021/HANFS/fence-agents/fence/agents/zvm/fence_zvmip.py
BryanWhitehurst/HPCCEA
54b7e7355b67ba3fdce2e28cc1b0e3b29d2bdefa
[ "MIT" ]
21
2019-06-10T21:03:03.000Z
2021-08-06T17:57:25.000Z
#!@PYTHON@ -tt import sys import atexit import socket import struct import logging sys.path.append("@FENCEAGENTSLIBDIR@") from fencing import * from fencing import fail, fail_usage, run_delay, EC_LOGIN_DENIED, EC_TIMED_OUT #BEGIN_VERSION_GENERATION RELEASE_VERSION="" REDHAT_COPYRIGHT="" BUILD_DATE="" #END_VERSION_GENERATION INT4 = 4 def open_socket(options): try: if "--inet6-only" in options: protocol = socket.AF_INET6 elif "--inet4-only" in options: protocol = socket.AF_INET else: protocol = 0 (_, _, _, _, addr) = socket.getaddrinfo( \ options["--ip"], options["--ipport"], protocol, 0, socket.IPPROTO_TCP, socket.AI_PASSIVE )[0] except socket.gaierror: fail(EC_LOGIN_DENIED) conn = socket.socket() conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) conn.settimeout(float(options["--shell-timeout"])) try: conn.connect(addr) except socket.error: fail(EC_LOGIN_DENIED) return conn def smapi_pack_string(string): return struct.pack("!i%ds" % (len(string)), len(string), string) def prepare_smapi_command(options, smapi_function, additional_args): packet_size = 3*INT4 + len(smapi_function) + len(options["--username"]) + len(options["--password"]) for arg in additional_args: packet_size += INT4 + len(arg) command = struct.pack("!i", packet_size) command += smapi_pack_string(smapi_function) command += smapi_pack_string(options["--username"]) command += smapi_pack_string(options["--password"]) for arg in additional_args: command += smapi_pack_string(arg) return command def get_power_status(conn, options): del conn if options.get("--original-action", None) == "monitor": (return_code, reason_code, images_active) = \ get_list_of_images(options, "Check_Authentication", None) logging.debug("Check_Authenticate (%d,%d)", return_code, reason_code) if return_code == 0: return {} else: fail(EC_LOGIN_DENIED) if options["--action"] == "list": # '*' = list all active images options["--plug"] = "*" (return_code, reason_code, images_active) = \ get_list_of_images(options, "Image_Status_Query", options["--plug"]) logging.debug("Image_Status_Query results are (%d,%d)", return_code, reason_code) if not options["--action"] == "list": if (return_code == 0) and (reason_code == 0): return "on" elif (return_code == 0) and (reason_code == 12): # We are running always with --missing-as-off because we can not check if image # is defined or not (look at rhbz#1188750) return "off" else: return "unknown" else: (return_code, reason_code, images_defined) = \ get_list_of_images(options, "Image_Name_Query_DM", options["--username"]) logging.debug("Image_Name_Query_DM results are (%d,%d)", return_code, reason_code) return dict([(i, ("", "on" if i in images_active else "off")) for i in images_defined]) def set_power_status(conn, options): conn = open_socket(options) packet = None if options["--action"] == "on": packet = prepare_smapi_command(options, "Image_Activate", [options["--plug"]]) elif options["--action"] == "off": packet = prepare_smapi_command(options, "Image_Deactivate", [options["--plug"], "IMMED"]) conn.send(packet) request_id = struct.unpack("!i", conn.recv(INT4))[0] (output_len, request_id, return_code, reason_code) = struct.unpack("!iiii", conn.recv(INT4 * 4)) logging.debug("Image_(De)Activate results are (%d,%d)", return_code, reason_code) conn.close() return def get_list_of_images(options, command, data_as_plug): conn = open_socket(options) if data_as_plug is None: packet = prepare_smapi_command(options, command, []) else: packet = prepare_smapi_command(options, command, [data_as_plug]) conn.send(packet) request_id = struct.unpack("!i", conn.recv(INT4))[0] (output_len, request_id, return_code, reason_code) = struct.unpack("!iiii", conn.recv(INT4 * 4)) images = set() if output_len > 3*INT4: array_len = struct.unpack("!i", conn.recv(INT4))[0] data = "" while True: read_data = conn.recv(1024, socket.MSG_WAITALL) data += read_data if array_len == len(data): break elif not read_data: logging.error("Failed: Not enough data read from socket") fail(EC_TIMED_OUT) parsed_len = 0 while parsed_len < array_len: string_len = struct.unpack("!i", data[parsed_len:parsed_len+INT4])[0] parsed_len += INT4 image_name = struct.unpack("!%ds" % (string_len), data[parsed_len:parsed_len+string_len])[0] parsed_len += string_len images.add(image_name) conn.close() return (return_code, reason_code, images) def main(): device_opt = ["ipaddr", "login", "passwd", "port", "method", "missing_as_off"] atexit.register(atexit_handler) all_opt["ipport"]["default"] = "44444" all_opt["shell_timeout"]["default"] = "5.0" all_opt["missing_as_off"]["default"] = "1" options = check_input(device_opt, process_input(device_opt), other_conditions=True) if len(options.get("--plug", "")) > 8: fail_usage("Failed: Name of image can not be longer than 8 characters") if options["--action"] == "validate-all": sys.exit(0) docs = {} docs["shortdesc"] = "Fence agent for use with z/VM Virtual Machines" docs["longdesc"] = """The fence_zvm agent is intended to be used with with z/VM SMAPI service via TCP/IP To use this agent the z/VM SMAPI service needs to be configured to allow the virtual machine running this agent to connect to it and issue the image_recycle operation. This involves updating the VSMWORK1 AUTHLIST VMSYS:VSMWORK1. file. The entry should look something similar to this: Column 1 Column 66 Column 131 | | | V V V XXXXXXXX ALL IMAGE_OPERATIONS Where XXXXXXX is the name of the virtual machine used in the authuser field of the request. """ docs["vendorurl"] = "http://www.ibm.com" show_docs(options, docs) run_delay(options) result = fence_action(None, options, set_power_status, get_power_status, get_power_status) sys.exit(result) if __name__ == "__main__": main()
31.285714
139
0.693575
867
6,132
4.681661
0.276817
0.032028
0.039419
0.049273
0.268785
0.219266
0.138458
0.107416
0.083764
0.083764
0
0.01276
0.169276
6,132
195
140
31.446154
0.78406
0.033594
0
0.163265
0
0.013605
0.258996
0
0
0
0
0
0
1
0.047619
false
0.027211
0.047619
0.006803
0.163265
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f422b787a305cf7e7c9786d86bf5d8569355733a
5,889
py
Python
fastestimator/architecture/pytorch/unet.py
DwijayDS/fastestimator
9b288cb2bd870f971ec4cee09d0b3205e1316a94
[ "Apache-2.0" ]
57
2019-05-21T21:29:26.000Z
2022-02-23T05:55:21.000Z
fastestimator/architecture/pytorch/unet.py
vbvg2008/fastestimator
6061a4fbbeb62a2194ef82ba8017f651710d0c65
[ "Apache-2.0" ]
93
2019-05-23T18:36:07.000Z
2022-03-23T17:15:55.000Z
fastestimator/architecture/pytorch/unet.py
vbvg2008/fastestimator
6061a4fbbeb62a2194ef82ba8017f651710d0c65
[ "Apache-2.0" ]
47
2019-05-09T15:41:37.000Z
2022-03-26T17:00:08.000Z
# Copyright 2019 The FastEstimator Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from typing import Tuple import torch import torch.nn as nn from torch.nn import functional as F from torch.nn.init import kaiming_normal_ as he_normal class UNetEncoderBlock(nn.Module): """A UNet encoder block. This class is intentionally not @traceable (models and layers are handled by a different process). Args: in_channels: How many channels enter the encoder. out_channels: How many channels leave the encoder. """ def __init__(self, in_channels: int, out_channels: int) -> None: super().__init__() self.layers = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), nn.ReLU(inplace=True)) for layer in self.layers: if isinstance(layer, nn.Conv2d): he_normal(layer.weight.data) layer.bias.data.zero_() def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: out = self.layers(x) return out, F.max_pool2d(out, 2) class UNetDecoderBlock(nn.Module): """A UNet decoder block. This class is intentionally not @traceable (models and layers are handled by a different process). Args: in_channels: How many channels enter the decoder. mid_channels: How many channels are used for the decoder's intermediate layer. out_channels: How many channels leave the decoder. """ def __init__(self, in_channels: int, mid_channels: int, out_channels: int) -> None: super().__init__() self.layers = nn.Sequential(nn.Conv2d(in_channels, mid_channels, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(mid_channels, mid_channels, 3, padding=1), nn.ReLU(inplace=True), nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True), nn.Conv2d(mid_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True)) for layer in self.layers: if isinstance(layer, nn.Conv2d): he_normal(layer.weight.data) layer.bias.data.zero_() def forward(self, x: torch.Tensor) -> torch.Tensor: return self.layers(x) class UNet(nn.Module): """A standard UNet implementation in PyTorch. This class is intentionally not @traceable (models and layers are handled by a different process). Args: input_size: The size of the input tensor (channels, height, width). Raises: ValueError: Length of `input_size` is not 3. ValueError: `input_size`[1] or `input_size`[2] is not a multiple of 16. """ def __init__(self, input_size: Tuple[int, int, int] = (1, 128, 128)) -> None: UNet._check_input_size(input_size) super().__init__() self.input_size = input_size self.enc1 = UNetEncoderBlock(in_channels=input_size[0], out_channels=64) self.enc2 = UNetEncoderBlock(in_channels=64, out_channels=128) self.enc3 = UNetEncoderBlock(in_channels=128, out_channels=256) self.enc4 = UNetEncoderBlock(in_channels=256, out_channels=512) self.bottle_neck = UNetDecoderBlock(in_channels=512, mid_channels=1024, out_channels=512) self.dec4 = UNetDecoderBlock(in_channels=1024, mid_channels=512, out_channels=256) self.dec3 = UNetDecoderBlock(in_channels=512, mid_channels=256, out_channels=128) self.dec2 = UNetDecoderBlock(in_channels=256, mid_channels=128, out_channels=64) self.dec1 = nn.Sequential(nn.Conv2d(128, 64, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(64, 64, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(64, 1, 1), nn.Sigmoid()) for layer in self.dec1: if isinstance(layer, nn.Conv2d): he_normal(layer.weight.data) layer.bias.data.zero_() def forward(self, x: torch.Tensor) -> torch.Tensor: x1, x_e1 = self.enc1(x) x2, x_e2 = self.enc2(x_e1) x3, x_e3 = self.enc3(x_e2) x4, x_e4 = self.enc4(x_e3) x_bottle_neck = self.bottle_neck(x_e4) x_d4 = self.dec4(torch.cat((x_bottle_neck, x4), 1)) x_d3 = self.dec3(torch.cat((x_d4, x3), 1)) x_d2 = self.dec2(torch.cat((x_d3, x2), 1)) x_out = self.dec1(torch.cat((x_d2, x1), 1)) return x_out @staticmethod def _check_input_size(input_size): if len(input_size) != 3: raise ValueError("Length of `input_size` is not 3 (channel, height, width)") _, height, width = input_size if height < 16 or not (height / 16.0).is_integer() or width < 16 or not (width / 16.0).is_integer(): raise ValueError("Both height and width of input_size need to be multiples of 16 (16, 32, 48...)")
43.301471
110
0.609611
779
5,889
4.450578
0.245186
0.050764
0.018171
0.022209
0.419383
0.398616
0.364869
0.345255
0.323911
0.319873
0
0.042598
0.278485
5,889
135
111
43.622222
0.773358
0.268127
0
0.302632
0
0
0.033761
0
0
0
0
0
0
1
0.092105
false
0
0.065789
0.013158
0.236842
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f422e0910bbd8a7ecf986379f467205dc93f05c0
5,660
py
Python
generalfile/path.py
Mandera/generalfile
5e476a1c075fa072c7e52e62455feeb78b9bb298
[ "MIT" ]
null
null
null
generalfile/path.py
Mandera/generalfile
5e476a1c075fa072c7e52e62455feeb78b9bb298
[ "MIT" ]
null
null
null
generalfile/path.py
Mandera/generalfile
5e476a1c075fa072c7e52e62455feeb78b9bb298
[ "MIT" ]
null
null
null
import pathlib import os from generallibrary import VerInfo, TreeDiagram, Recycle, classproperty, deco_cache from generalfile.errors import InvalidCharacterError from generalfile.path_lock import Path_ContextManager from generalfile.path_operations import Path_Operations from generalfile.path_strings import Path_Strings from generalfile.optional_dependencies.path_spreadsheet import Path_Spreadsheet from generalfile.optional_dependencies.path_text import Path_Text from generalfile.optional_dependencies.path_cfg import Path_Cfg from generalfile.optional_dependencies.path_pickle import Path_Pickle class Path(TreeDiagram, Recycle, Path_ContextManager, Path_Operations, Path_Strings, Path_Spreadsheet, Path_Text, Path_Cfg, Path_Pickle): """ Immutable cross-platform Path. Built on pathlib and TreeDiagram. Implements rules to ensure cross-platform compatability. Adds useful methods. Todo: Binary extension. """ verInfo = VerInfo() _path_delimiter = verInfo.pathDelimiter Path = ... _recycle_keys = {"path": lambda path: Path.scrub("" if path is None else str(path))} _alternative_chars = {_path_delimiter: "&#47;", ":": "&#58", ".": "&#46;"} def __init__(self, path=None): # Don't have parent here because of Recycle self.path = self.scrub(str_path="" if path is None else str(path)) self._path = pathlib.Path(self.path) self._latest_listdir = set() copy_node = NotImplemented # Maybe something like this to disable certain methods @classproperty def path_delimiter(cls): return cls._path_delimiter def spawn_parents(self): if not self.get_parent(spawn=False) and self.path and not self.is_root(): try: index = self.path.rindex(self.path_delimiter) + 1 except ValueError: index = 0 self.set_parent(Path(path=self.path[:index])) def spawn_children(self): if self.is_folder(): old_children = {path.name() for path in self.get_children(spawn=False)} try: new_children = set(os.listdir(self.path if self.path else ".")) except PermissionError: new_children = set() for name in old_children.symmetric_difference(new_children): path = Path(path=self / name) path.set_parent(self if name in new_children else None) def __str__(self): return getattr(self, "path", "<Path not loaded yet>") # return self.path def __repr__(self): return self.name() def __fspath__(self): return self.path def __format__(self, format_spec): return self.path.__format__(format_spec) def __truediv__(self, other): """ :rtype: generalfile.Path """ # print("here", self._recycle_instances) return self.Path(self._path / str(other)) def __eq__(self, other): if isinstance(other, Path): other = other.path else: other = self._scrub("" if other is None else str(other)) return self.path == other def __hash__(self): return hash(self.path) def __contains__(self, item): return self.path.__contains__(item) @classmethod def _scrub(cls, str_path): str_path = cls._replace_delimiters(str_path=str_path) str_path = cls._invalid_characters(str_path=str_path) str_path = cls._trim(str_path=str_path) str_path = cls._delimiter_suffix_if_root(str_path=str_path) return str_path @classmethod @deco_cache() def scrub(cls, str_path): return cls._scrub(str_path=str_path) @classmethod @deco_cache() def _replace_delimiters(cls, str_path): str_path = str_path.replace("/", cls.path_delimiter) str_path = str_path.replace("\\", cls.path_delimiter) return str_path @classmethod @deco_cache() def _invalid_characters(cls, str_path): # Simple invalid characters testing from Windows for character in '<>"|?*': if character in str_path: raise InvalidCharacterError(f"Invalid character '{character}' in '{str_path}'") if ":" in str_path: if not cls.verInfo.pathRootHasColon: raise InvalidCharacterError(f"Path has a colon but '{cls.verInfo.os}' doesn't use colon for path root: '{str_path}'") if str_path[1] != ":": raise InvalidCharacterError(f"Path has a colon but there's no colon at index 1: '{str_path}'") if len(str_path) >= 3 and str_path[2] != cls.path_delimiter: raise InvalidCharacterError(f"Path has a colon but index 2 is not a delimiter: '{str_path}'") if ":" in str_path[2:]: raise InvalidCharacterError(f"Path has a colon that's not at index 1: '{str_path}'") if str_path.endswith("."): raise InvalidCharacterError(f"Path cannot end with a dot ('.').") return str_path @classmethod @deco_cache() def _trim(cls, str_path): if not cls.verInfo.pathRootIsDelimiter and str_path.startswith(cls.path_delimiter): str_path = str_path[1:] if str_path.endswith(cls.path_delimiter) and len(str_path) > 1: str_path = str_path[0:-1] return str_path @classmethod @deco_cache() def _delimiter_suffix_if_root(cls, str_path): if len(str_path) == 2 and str_path[1] == ":": return f"{str_path}{cls.path_delimiter}" return str_path setattr(Path, "Path", Path)
28.3
137
0.649117
714
5,660
4.879552
0.221289
0.108496
0.040184
0.056257
0.276119
0.211538
0.153559
0.058266
0
0
0
0.004968
0.25318
5,660
199
138
28.442211
0.819257
0.069258
0
0.160714
0
0.008929
0.083913
0.0058
0
0
0
0.005025
0
1
0.160714
false
0
0.098214
0.071429
0.464286
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4244d996a4c380f34dcf151872e78afdd5ea5e0
7,569
py
Python
src/model/model.py
kwasnydam/animal_disambiguation
1dba0a2f40ca952a3adab925ff9ef54238cf7c1c
[ "MIT" ]
null
null
null
src/model/model.py
kwasnydam/animal_disambiguation
1dba0a2f40ca952a3adab925ff9ef54238cf7c1c
[ "MIT" ]
5
2020-03-24T17:52:45.000Z
2021-08-23T20:28:40.000Z
src/model/model.py
kwasnydam/animal_disambiguation
1dba0a2f40ca952a3adab925ff9ef54238cf7c1c
[ "MIT" ]
null
null
null
"""Contains the classification model I am going to use in my problem and some utility functions. Functions build_mmdisambiguator - build the core application object with the collaborators info Classes MMDisambiguator - core class of the application """ import pickle import os import numpy as np from sklearn.linear_model import LogisticRegression import sklearn.metrics as metrics from src.data import dataset DEFAULT_CLASSIFIER_SETTINGS = { 'solver': 'liblinear', 'class_weight': 'balanced', 'C': 1. } up = os.path.dirname DEFAULT_ROOT_DIRECTORY = up(up(up(__file__))) # Get directory two levels above DEFAULT_MODEL_DIRECTORY = os.path.join(DEFAULT_ROOT_DIRECTORY, 'models') def try_opening_file_pickle(path): try: with open(path, 'rb') as f: file_content = pickle.load(f) except FileNotFoundError as e: print('FileNotFound exception occured when trying to open: {}. Disambiguator build failed.'.format( path )) raise e except Exception as e: print('Exception occured when trying to open {}: {}'.format(path, e)) raise e return file_content def build_mmdisambiguator(data_model_params, data_model_path, classificator_parameters, classificator_path=None): """Given collaborator parameters and /or load paths, build the MMDisambiguator""" if classificator_path is None: data_model = dataset.TextLabelsVectorizer(data_model_params) data_model_saved = try_opening_file_pickle(data_model_path) data_model.deserialize(data_model_saved) classificator = LogisticRegression(**classificator_parameters) disambiguator = MMDisambiguator(data_model, classificator) else: disambiguator_pieces = try_opening_file_pickle(classificator_path) data_model = dataset.TextLabelsVectorizer(data_model_params) data_model.deserialize(disambiguator_pieces['data_model']) classificator = disambiguator_pieces['classificator'] disambiguator = MMDisambiguator(data_model, classificator) return disambiguator class NotTrainedException(Exception): pass class MMDisambiguator: """The class representing the core logic of the disambiguation app. It uses data_model for feature and text manipulation and Logistic Regression for performing prediction With 'source' flag user controls if the training/prediction is preformed from precomputed numercial features or text. If it is done from text, the input is put through feature_extraction first. Methods: train - fit the classifier or both data model and classifier from training data predict - get prediction on data. the data can be single or multiple samples transform_labels - get numerical representation of labels performance_report - generate summary of performance serialize - get representation for saving """ def __init__(self, data_model:dataset.TextLabelsVectorizer, classificator: LogisticRegression): self.data_model = data_model self.classificator = classificator def is_trained(self): """Returns True if the underlying classification model is trained""" return hasattr(self.classificator, "coef_") def train(self, data, classes, report=False, source='features'): """Train the model with training data DATA and training labels CLASSES Args: data - training data (text or features) classes- training classes (text or numerical) report - flag, if True generate training report source - 'features': numerical, train directly. 'text': train vectorizer, transfrom, then train classifier """ if source == 'text': features, classes = self.data_model.fit_transform(data, classes) else: features = data self.classificator.fit(features, classes) if report: return self.performance_report(self._classify(self.classificator.predict_proba(features)), classes) else: return None def transform_labels(self, labels): """Returns numerical encoding of text labels""" return self.data_model.transform_labels(labels) def predict(self, unseen_features, mode='classification', threshold=0.5, format='text', source='features'): """Predict classes on unseen data. Args: unseen_features - 'string' or list/pandas Series of 'string' if source = 'text'. numpy array if source = 'features' mode - 'classification' - predict probabilities and then make classifcation decision based on 'threshold 'predicition' - return predicted probabilities threshold - if mode = 'classification', threshold for the decision source - 'text' if sentences, 'features' if input already transformed """ if not self.is_trained(): raise NotTrainedException('Attempted to perform prediction on a model that has not been trained') if source == 'text': unseen_features = self.data_model.transform(unseen_features) predicted_probability = self.classificator.predict_proba(unseen_features) if mode == 'classification': classification_binary = self._classify(predicted_probability, threshold).astype(np.int) classification = classification_binary if format == 'text': classification = self.data_model.get_classes_name(classification_binary) result = [] for idx in range(classification.shape[0]): result.append([classification[idx], predicted_probability[idx,classification_binary[idx]]]) result = np.asarray(result) elif mode == 'prediction': result = predicted_probability return result def _classify(self, predicted_probabilities, threshold=0.5): """Decision: class based on predicted probability and threshold""" classes = predicted_probabilities.copy()[:,1] classes[classes >= threshold] = 1 classes[classes < threshold] = 0 return classes def performance_report(self, predicted_classes, real_classes): """Generates performance of the given classifier given predicted and real classes Args: predicted_classes - iterable containing the prediciton results, len(num_of_samples) real_classes - iterable containing ground truth classes, len(num_of_samples) Output: report - dictionary containing the following fields: 'accuracy', 'precision', 'recall', 'f1_score', 'confussion_matrix' """ report = { 'accuracy': metrics.accuracy_score(real_classes, predicted_classes), 'precision': metrics.precision_score(real_classes, predicted_classes), 'recall': metrics.recall_score(real_classes, predicted_classes), 'f1': metrics.f1_score(real_classes, predicted_classes), 'confussion_matrix': metrics.confusion_matrix(real_classes, predicted_classes, labels = [1, 0]).tolist() } return report def serialize(self): """Returns objects and parameters necessary to perform prediciton""" to_serialize = { 'data_model': self.data_model.serialize(), 'classificator': self.classificator } return to_serialize
39.836842
118
0.680011
821
7,569
6.109622
0.2838
0.044856
0.018142
0.026914
0.086922
0.036683
0.023923
0.023923
0.023923
0
0
0.002451
0.245343
7,569
189
119
40.047619
0.8757
0.337561
0
0.111111
0
0
0.08535
0
0
0
0
0
0
1
0.10101
false
0.010101
0.060606
0
0.282828
0.020202
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f425ac3324f9ff67c7cc522a90e36c4d71da699a
2,848
py
Python
v0.5.0/nvidia/submission/code/recommendation/pytorch/load.py
myelintek/results
11c38436a158c453e3011f8684570f7a55c03330
[ "Apache-2.0" ]
44
2018-11-07T18:52:33.000Z
2019-07-06T12:48:18.000Z
v0.5.0/nvidia/submission/code/recommendation/pytorch/load.py
myelintek/results
11c38436a158c453e3011f8684570f7a55c03330
[ "Apache-2.0" ]
12
2018-12-13T18:04:36.000Z
2019-06-14T20:49:33.000Z
v0.5.0/nvidia/submission/code/recommendation/pytorch/load.py
myelintek/results
11c38436a158c453e3011f8684570f7a55c03330
[ "Apache-2.0" ]
44
2018-11-09T21:04:52.000Z
2019-06-24T07:40:28.000Z
# Copyright (c) 2018, deepakn94. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import namedtuple import pandas as pd RatingData = namedtuple('RatingData', ['items', 'users', 'ratings', 'min_date', 'max_date']) def describe_ratings(ratings): info = RatingData(items=len(ratings['item_id'].unique()), users=len(ratings['user_id'].unique()), ratings=len(ratings), min_date=ratings['timestamp'].min(), max_date=ratings['timestamp'].max()) print("{ratings} ratings on {items} items from {users} users" " from {min_date} to {max_date}" .format(**(info._asdict()))) return info def process_movielens(ratings, sort=True): ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s') if sort: ratings.sort_values(by='timestamp', inplace=True) describe_ratings(ratings) return ratings def load_ml_100k(filename, sort=True): names = ['user_id', 'item_id', 'rating', 'timestamp'] ratings = pd.read_csv(filename, sep='\t', names=names) return process_movielens(ratings, sort=sort) def load_ml_1m(filename, sort=True): names = ['user_id', 'item_id', 'rating', 'timestamp'] ratings = pd.read_csv(filename, sep='::', names=names, engine='python') return process_movielens(ratings, sort=sort) def load_ml_10m(filename, sort=True): names = ['user_id', 'item_id', 'rating', 'timestamp'] ratings = pd.read_csv(filename, sep='::', names=names, engine='python') return process_movielens(ratings, sort=sort) def load_ml_20m(filename, sort=True): ratings = pd.read_csv(filename) ratings['timestamp'] = pd.to_datetime(ratings['timestamp'], unit='s') names = {'userId': 'user_id', 'movieId': 'item_id'} ratings.rename(columns=names, inplace=True) return process_movielens(ratings, sort=sort) DATASETS = [k.replace('load_', '') for k in locals().keys() if "load_" in k] def get_dataset_name(filename): for dataset in DATASETS: if dataset in filename.replace('-', '_').lower(): return dataset raise NotImplementedError def implicit_load(filename, sort=True): func = globals()["load_" + get_dataset_name(filename)] return func(filename, sort=sort)
34.313253
78
0.670997
370
2,848
5.035135
0.364865
0.032206
0.061728
0.072464
0.303811
0.290929
0.271068
0.271068
0.271068
0.193773
0
0.007836
0.193469
2,848
82
79
34.731707
0.803222
0.201194
0
0.234043
0
0
0.156568
0
0
0
0
0
0
1
0.170213
false
0
0.042553
0
0.382979
0.021277
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f427c8d1c78db5257b6c365066dd8f7483686e6c
10,390
py
Python
hummingbot/client/command/history_command.py
sanchaymittal/hummingbot
f8d1c19dfd0875bd12717f9c46ddbe20cc7b9a0d
[ "Apache-2.0" ]
null
null
null
hummingbot/client/command/history_command.py
sanchaymittal/hummingbot
f8d1c19dfd0875bd12717f9c46ddbe20cc7b9a0d
[ "Apache-2.0" ]
null
null
null
hummingbot/client/command/history_command.py
sanchaymittal/hummingbot
f8d1c19dfd0875bd12717f9c46ddbe20cc7b9a0d
[ "Apache-2.0" ]
null
null
null
from decimal import Decimal import pandas as pd from typing import ( Any, Dict, Set, Tuple, TYPE_CHECKING) from hummingbot.client.performance_analysis import PerformanceAnalysis from hummingbot.core.utils.exchange_rate_conversion import ExchangeRateConversion from hummingbot.market.market_base import MarketBase from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple ERC = ExchangeRateConversion.get_instance() s_float_0 = float(0) if TYPE_CHECKING: from hummingbot.client.hummingbot_application import HummingbotApplication class HistoryCommand: def history(self, # type: HummingbotApplication ): if not all(market.ready for market in self.markets.values()): self._notify(" History stats are not available before Markets are ready.") return self.list_trades() self.trade_performance_report() def balance_snapshot(self, # type: HummingbotApplication ) -> Dict[str, Dict[str, float]]: snapshot: Dict[str, Any] = {} for market_name in self.markets: balance_dict = self.markets[market_name].get_all_balances() balance_dict = {k.upper(): v for k, v in balance_dict.items()} for asset in self.assets: asset = asset.upper() if asset not in snapshot: snapshot[asset] = {} if asset in balance_dict: snapshot[asset][market_name] = balance_dict[asset] else: snapshot[asset][market_name] = 0.0 return snapshot def balance_comparison_data_frame(self, # type: HummingbotApplication market_trading_pair_stats: Dict[MarketTradingPairTuple, any], ) -> pd.DataFrame: if len(self.starting_balances) == 0: self._notify(" Balance snapshots are not available before bot starts") return rows = [] for market_trading_pair_tuple in self.market_trading_pair_tuples: market: MarketBase = market_trading_pair_tuple.market for asset in set(a.upper() for a in self.assets): asset_delta: Dict[str, float] = market_trading_pair_stats[market_trading_pair_tuple]["asset"].get( asset, {"delta": s_float_0}) starting_balance = self.starting_balances.get(asset).get(market.name) current_balance = self.balance_snapshot().get(asset).get(market.name) rows.append([market.display_name, asset, float(starting_balance), float(current_balance), float(current_balance - starting_balance), float(asset_delta["delta"]), ERC.adjust_token_rate(asset, Decimal(1))]) df = pd.DataFrame(rows, index=None, columns=["Market", "Asset", "Starting", "Current", "Net_Delta", "Trade_Delta", "Conversion_Rate"]) return df def get_performance_analysis_with_updated_balance(self, # type: HummingbotApplication ) -> PerformanceAnalysis: performance_analysis = PerformanceAnalysis() dedup_set: Set[Tuple[str, str, bool]] = set() for market_trading_pair_tuple in self.market_trading_pair_tuples: for is_base in [True, False]: for is_starting in [True, False]: market_name = market_trading_pair_tuple.market.name asset_name = market_trading_pair_tuple.base_asset if is_base else market_trading_pair_tuple.quote_asset asset_name = asset_name.upper() if len(self.assets) == 0 or len(self.markets) == 0: # Prevent KeyError '***SYMBOL***' amount = self.starting_balances[asset_name][market_name] else: amount = self.starting_balances[asset_name][market_name] if is_starting \ else self.balance_snapshot()[asset_name][market_name] amount = float(amount) # Adding this check to prevent assets in the same market to be added multiple times if (market_name, asset_name, is_starting) not in dedup_set: dedup_set.add((market_name, asset_name, is_starting)) performance_analysis.add_balances(asset_name, amount, is_base, is_starting) return performance_analysis def get_market_mid_price(self, # type: HummingbotApplication ) -> float: # Compute the current exchange rate. We use the first market_symbol_pair because # if the trading pairs are different, such as WETH-DAI and ETH-USD, the currency # pairs above will contain the information in terms of the first trading pair. market_pair_info = self.market_trading_pair_tuples[0] market = market_pair_info.market buy_price = market.get_price(market_pair_info.trading_pair, True) sell_price = market.get_price(market_pair_info.trading_pair, False) price = float((buy_price + sell_price) / 2) return price def analyze_performance(self, # type: HummingbotApplication ): """ Calculate bot profitability and print to output pane """ if len(self.starting_balances) == 0: self._notify(" Performance analysis is not available before bot starts") return performance_analysis: PerformanceAnalysis = self.get_performance_analysis_with_updated_balance() price: float = self.get_market_mid_price() starting_token, starting_amount = performance_analysis.compute_starting(price) current_token, current_amount = performance_analysis.compute_current(price) delta_token, delta_amount = performance_analysis.compute_delta(price) return_performance = performance_analysis.compute_return(price) starting_amount = round(starting_amount, 3) current_amount = round(current_amount, 3) delta_amount = round(delta_amount, 3) return_performance = round(return_performance, 3) print_performance = "\n" print_performance += " Performance:\n" print_performance += " - Starting Inventory Value: " + str(starting_amount) + " " + starting_token + "\n" print_performance += " - Current Inventory Value: " + str(current_amount) + " " + current_token + "\n" print_performance += " - Delta: " + str(delta_amount) + " " + delta_token + "\n" print_performance += " - Return: " + str(return_performance) + "%" self._notify(print_performance) def calculate_profitability(self) -> float: """ Determine the profitability of the trading bot. """ performance_analysis: PerformanceAnalysis = self.get_performance_analysis_with_updated_balance() price: float = self.get_market_mid_price() return_performance = performance_analysis.compute_return(price) return return_performance def trade_performance_report(self, # type: HummingbotApplication ) -> pd.DataFrame: if len(self.market_trading_pair_tuples) == 0: self._notify(" Performance analysis is not available before bot starts") return try: current_strategy_name: str = self.markets_recorder.strategy_name analysis_start_time: int = self.init_time primary_quote_asset: str = self.market_trading_pair_tuples[0].quote_asset.upper() performance_analysis: PerformanceAnalysis = PerformanceAnalysis() trade_performance_stats, market_trading_pair_stats = performance_analysis.calculate_trade_performance( analysis_start_time, current_strategy_name, self.market_trading_pair_tuples ) trade_performance_status_line = [] market_df_data: Set[Tuple[str, str, float, float, str, str]] = set() market_df_columns = ["Market", "Trading_Pair", "Start_Price", "End_Price", "Total_Value_Delta", "Profit"] for market_trading_pair_tuple, trading_pair_stats in market_trading_pair_stats.items(): market_df_data.add(( market_trading_pair_tuple.market.display_name, market_trading_pair_tuple.trading_pair.upper(), float(trading_pair_stats["starting_quote_rate"]), float(trading_pair_stats["end_quote_rate"]), f"{trading_pair_stats['trading_pair_delta']:.8f} {primary_quote_asset}", f"{trading_pair_stats['trading_pair_delta_percentage']:.3f} %" )) inventory_df: pd.DataFrame = self.balance_comparison_data_frame(market_trading_pair_stats) market_df: pd.DataFrame = pd.DataFrame(data=list(market_df_data), columns=market_df_columns) portfolio_delta: Decimal = trade_performance_stats["portfolio_delta"] portfolio_delta_percentage: Decimal = trade_performance_stats["portfolio_delta_percentage"] trade_performance_status_line.extend(["", " Inventory:"] + [" " + line for line in inventory_df.to_string().split("\n")]) trade_performance_status_line.extend(["", " Market Trading Pair Performance:"] + [" " + line for line in market_df.to_string().split("\n")]) trade_performance_status_line.extend( ["", " Portfolio Performance:"] + [f" Quote Value Delta: {portfolio_delta:.7g} {primary_quote_asset}"] + [f" Delta Percentage: {portfolio_delta_percentage:.3f} %"]) self._notify("\n".join(trade_performance_status_line)) except Exception: self.logger().error("Unexpected error running performance analysis.", exc_info=True) self._notify("Error running performance analysis")
52.474747
123
0.624254
1,104
10,390
5.564312
0.171196
0.062673
0.066417
0.039394
0.278528
0.21846
0.159206
0.148462
0.105486
0.091161
0
0.002995
0.292974
10,390
197
124
52.741117
0.833243
0.062368
0
0.135802
0
0
0.099815
0.02305
0
0
0
0
0
1
0.049383
false
0
0.049383
0
0.160494
0.04321
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f427f297c82ca0ccff892cae6ccdb0651100e3ef
3,271
py
Python
scripts/bin2asm.py
sami2316/asm2vec-pytorch
5de1351aeda61d7467b3231e48437fd8d34a970c
[ "MIT" ]
null
null
null
scripts/bin2asm.py
sami2316/asm2vec-pytorch
5de1351aeda61d7467b3231e48437fd8d34a970c
[ "MIT" ]
null
null
null
scripts/bin2asm.py
sami2316/asm2vec-pytorch
5de1351aeda61d7467b3231e48437fd8d34a970c
[ "MIT" ]
null
null
null
import re import os import click import r2pipe import hashlib from pathlib import Path import _pickle as cPickle def sha3(data): return hashlib.sha3_256(data.encode()).hexdigest() def validEXE(filename): magics = [bytes.fromhex('7f454c46')] with open(filename, 'rb') as f: header = f.read(4) return header in magics def normalize(opcode): opcode = opcode.replace(' - ', ' + ') opcode = re.sub(r'0x[0-9a-f]+', 'CONST', opcode) opcode = re.sub(r'\*[0-9]', '*CONST', opcode) opcode = re.sub(r' [0-9]', ' CONST', opcode) return opcode def fn2asm(pdf, minlen): # check if pdf is None: return if len(pdf['ops']) < minlen: return if 'invalid' in [op['type'] for op in pdf['ops']]: return ops = pdf['ops'] # set label labels, scope = {}, [op['offset'] for op in ops] assert(None not in scope) for i, op in enumerate(ops): if op.get('jump') in scope: labels.setdefault(op.get('jump'), i) # dump output output = '' for op in ops: # add label if labels.get(op.get('offset')) is not None: output += f'LABEL{labels[op["offset"]]}:\n' # add instruction if labels.get(op.get('jump')) is not None: output += f' {op["type"]} LABEL{labels[op["jump"]]}\n' else: output += f' {normalize(op["opcode"])}\n' return output def bin2asm(filename, opath, minlen): # # Create directory where results will be written to. # results_dir = os.path.join(opath, os.path.basename(filename)) if not os.access(results_dir, os.F_OK): os.makedirs(results_dir) r = r2pipe.open(str(filename)) r.cmd('aaaa') count = 0 fp = open("%s/fv.pcl" % (results_dir), 'wb') for fn in r.cmdj('aflj'): r.cmd(f's {fn["offset"]}') asm = fn2asm(r.cmdj('pdfj'), minlen) if asm: fv = [ fn["name"], asm ] cPickle.dump(fv, fp) count += 1 fp.close() print(f'[+] {filename}') return count @click.command() @click.option('-i', '--input', 'ipath', help='input directory / file', required=True) @click.option('-o', '--output', 'opath', default='asm', help='output directory') @click.option('-l', '--len', 'minlen', default=1, help='ignore assembly code with instructions amount smaller than minlen') def cli(ipath, opath, minlen): ''' Extract assembly functions from binary executable ''' ipath = Path(ipath) opath = Path(opath) # create output directory if not os.path.exists(opath): os.mkdir(opath) fcount, bcount = 0, 0 # directory if os.path.isdir(ipath): for f in os.listdir(ipath): if not os.path.islink(ipath / f) and not os.path.isdir(ipath / f): fcount += bin2asm(ipath / f, opath, minlen) bcount += 1 # file elif os.path.exists(ipath): fcount += bin2asm(ipath, opath, minlen) bcount += 1 else: print(f'[Error] No such file or directory: {ipath}') print(f'[+] Total scan binary: {bcount} => Total generated assembly functions: {fcount}') if __name__ == '__main__': cli()
26.811475
123
0.566188
436
3,271
4.213303
0.33945
0.022863
0.017964
0.019597
0.068046
0.033206
0.033206
0.033206
0.033206
0.033206
0
0.013971
0.277897
3,271
121
124
27.033058
0.76376
0.059309
0
0.081395
0
0
0.175025
0.027532
0
0
0
0
0.011628
1
0.069767
false
0
0.081395
0.011628
0.244186
0.034884
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4284681ecf92df1bb97ccccca1bcb0558c6d8a3
1,763
py
Python
LazyAngus/Assets/Extensions/IOSDeploy/Scripts/Editor/post_process.py
DougLazyAngus/lazyAngus
485a8d5061ab740ab055abfc7fc5b86b864a5c7e
[ "Apache-2.0" ]
null
null
null
LazyAngus/Assets/Extensions/IOSDeploy/Scripts/Editor/post_process.py
DougLazyAngus/lazyAngus
485a8d5061ab740ab055abfc7fc5b86b864a5c7e
[ "Apache-2.0" ]
null
null
null
LazyAngus/Assets/Extensions/IOSDeploy/Scripts/Editor/post_process.py
DougLazyAngus/lazyAngus
485a8d5061ab740ab055abfc7fc5b86b864a5c7e
[ "Apache-2.0" ]
null
null
null
import os from sys import argv from mod_pbxproj import XcodeProject #import appcontroller path = argv[1] frameworks = argv[2].split(' ') libraries = argv[3].split(' ') cflags = argv[4].split(' ') ldflags = argv[5].split(' ') folders = argv[6].split(' ') print('Step 1: add system frameworks ') #if framework is optional, add `weak=True` project = XcodeProject.Load(path +'/Unity-iPhone.xcodeproj/project.pbxproj') for frwrk in frameworks: files = project.get_files_by_name(frwrk) for f in files: project.remove_file(f) if len(frwrk) > 0: fo = frwrk.split('|') if int(fo[1]): project.add_file('System/Library/Frameworks/' + fo[0], tree='SDKROOT', weak=True) else: project.add_file('System/Library/Frameworks/' + fo[0], tree='SDKROOT') print('Step 2: add system libraries ') for lib in libraries: files = project.get_files_by_name(lib) for f in files: project.remove_file(f) if len(lib) > 0: lo = lib.split('|') if int(lo[1]): project.add_file('usr/lib/' + lo[0], tree='SDKROOT', weak=True) else: project.add_file('usr/lib/' + lo[0], tree='SDKROOT') print('Step 3: add CFLAGS ') for cf in cflags: if len(cf) > 0: project.add_other_cflags(cf) print('Step 4: add LDFLAGS ') for ldf in ldflags: if len(ldf) > 0: project.add_other_ldflags(ldf) print('Step 5: add language folders') for langFolder in folders: if len(langFolder) > 0: project.add_folder(path + '/' + langFolder + '.lproj') print('Step 6: save our change to xcode project file') if project.modified: project.backup() project.saveFormat3_2()
29.383333
94
0.608622
242
1,763
4.355372
0.301653
0.066414
0.053131
0.037951
0.314991
0.297913
0.248577
0.248577
0.248577
0.16129
0
0.018954
0.251843
1,763
59
95
29.881356
0.780136
0.0346
0
0.125
0
0
0.195003
0.055454
0
0
0
0
0
1
0
false
0
0.0625
0
0.0625
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4299097184e1727c715f499e066d9e69de9e523
26,771
py
Python
src/badge_hub.py
stottlerhenke-seattle/openbadge-hub-py
d0eb1772eb1250862041cc50071252f46d4c4771
[ "MIT" ]
null
null
null
src/badge_hub.py
stottlerhenke-seattle/openbadge-hub-py
d0eb1772eb1250862041cc50071252f46d4c4771
[ "MIT" ]
null
null
null
src/badge_hub.py
stottlerhenke-seattle/openbadge-hub-py
d0eb1772eb1250862041cc50071252f46d4c4771
[ "MIT" ]
null
null
null
#!/usr/bin/env python from __future__ import absolute_import, division, print_function import os import re import shlex import subprocess import signal import csv import logging import json import time from datetime import datetime as dt from requests.exceptions import RequestException import glob import traceback import random from badge import * from badge_discoverer import BadgeDiscoverer, BeaconDiscoverer from badge_manager_server import BadgeManagerServer from beacon_manager_server import BeaconManagerServer from badge_manager_standalone import BadgeManagerStandalone from beacon_manager_standalone import BeaconManagerStandalone import hub_manager from settings import DATA_DIR, LOG_DIR log_file_name = LOG_DIR + 'hub.log' scans_file_name = DATA_DIR + 'scan.txt' pending_file_prefix = DATA_DIR + 'pending_' audio_archive_file_name = DATA_DIR + 'audio_archive.txt' proximity_archive_file_name = DATA_DIR + 'proximity_archive.txt' standalone_audio_file = DATA_DIR + 'audio_data.txt' standalone_proximity_file = DATA_DIR + 'proximity_data.txt' AUDIO = "audio" PROXIMITY = "proximity" SCAN_DURATION = 3 # seconds #NOTE try to keep under 100MB or so due to memory constraints MAX_PENDING_FILE_SIZE = 15000000 # in bytes, so 15MB # create logger with 'badge_server' logger = logging.getLogger('badge_server') logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages fh = logging.FileHandler(log_file_name) fh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter and add it to the handlers # formatter = logging.Formatter('%(asctime)s - %(levelname)s - [%(mac)s] %(message)s') formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger logger.addHandler(fh) logger.addHandler(ch) def round_float_for_log(x): return float("{0:.3f}".format(x)) def has_chunks(filename): """ Returns true if there is data in the file, and false otherwise """ return os.path.exists(filename) and os.path.getsize(filename) > 0 def offload_data(): """ Send pending files to server and move pending to archive Return True on success, False on failure """ #TODO test with standalone #NOTE not currently doing anything with the True/False # return values, might decide to do something later pending_files = sorted(glob.glob(pending_file_prefix + "*")) for pending_file_name in pending_files: logger.debug("Sending {} to server".format(pending_file_name)) if not has_chunks(pending_file_name): continue chunks = [] with open(pending_file_name, "r") as pending_file: for line in pending_file: chunks.append(json.loads(line)) # real quick grab the data type from the first data entry data_type = "audio" if "audio" in chunks[0]["type"] else "proximity" # fire away! try: chunks_written = hub_manager.send_data_to_server(logger, data_type, chunks) if chunks_written == len(chunks): logger.debug("Successfully wrote {} data entries to server" .format(len(chunks))) else: # this seems unlikely to happen but is good to keep track of i guess logger.error("Data mismatch: {} data entries were not written to server" .format(len(chunks) - chunks_written)) logger.error("Error sending data from file {} to server!" .format(pending_file_name)) return False # write to archive and erase pending file with open(get_archive_name(data_type), "a") as archive_file: for chunk in chunks: archive_file.write(json.dumps(chunk) + "\n") os.remove(pending_file_name) except RequestException as e: s = traceback.format_exc() logger.error("Error sending data from file {} to server!" .format(pending_file_name)) logger.error("{},{}".format(e,s)) return False return True def get_archive_name(data_type): """ Return the name of the archive file for the passed data type """ if data_type == AUDIO: return audio_archive_file_name else: return proximity_archive_file_name def get_proximity_name(mode="server"): """ return the name of the existing pending proximity file, or a new one if either one doesn't exist or if the existing file is > MAX_PENDING_FILE_SIZE """ if mode == "server": return _get_pending_file_name(PROXIMITY) else: return standalone_proximity_file def get_audio_name(mode="server"): if mode == "server": return _get_pending_file_name(AUDIO) else: return standalone_audio_file def _get_pending_file_name(data_type): """ If there are no current pending files < MAX_PENDING_FILE_SIZE in size, return a new pending filename Else, return an existing one. """ filenames = filter( lambda x: os.path.getsize(x) < MAX_PENDING_FILE_SIZE, glob.glob("{}*{}*".format(pending_file_prefix, data_type))) if len(filenames) == 0: return _create_pending_file_name(data_type) else: return filenames[0] def _create_pending_file_name(data_type): """ Create a pending file name for the given data_type Uses the current date/time to create a unique filename """ now = dt.now().strftime("%Y%m%d%H%M%S") filename = "{}{}_{}.txt".format(pending_file_prefix, now, data_type) if os.path.exists(filename): # this seems unlikely to happen, but just in case :) # get the number of pending files that match this time and add one files = glob.glob("{}{}*{}*".format(pending_file_prefix, now, data_type)) now = '_'.join((now, str(len(files) + 1))) filename = "{}{}_{}.txt".format(pending_file_prefix, now, data_type) return filename def dialogue(bdg, activate_audio, activate_proximity, mode="server"): """ Attempts to read data from the device specified by the address. Reading is handled by gatttool. :param bdg: :return: """ ret = bdg.pull_data(activate_audio, activate_proximity) addr = bdg.addr if ret == 0: logger.info("Successfully pulled data") # if we were able to pull data, we saw the badge again bdg.last_seen_ts = time.time() else: logger.info("Errors pulling data.") if bdg.dlg.chunks: logger.info("Chunks received: {}".format(len(bdg.dlg.chunks))) logger.info("saving chunks to file") # store in JSON file with open(get_audio_name(mode), "a") as fout: for chunk in bdg.dlg.chunks: ts_with_ms = round_float_for_log(ts_and_fract_to_float(chunk.ts, chunk.fract)) log_line = { 'type': "audio received", 'log_timestamp': round_float_for_log(time.time()), 'log_index': -1, # need to find a good accumulator. 'data': { 'voltage': round_float_for_log(chunk.voltage), 'timestamp': ts_with_ms, 'sample_period': chunk.sampleDelay, 'num_samples': len(chunk.samples), 'samples': chunk.samples, 'badge_address': addr, 'member': bdg.key, 'member_id':bdg.badge_id } } logger.debug("Chunk timestamp: {0:.3f}, Voltage: {1:.3f}, Delay: {2}, Samples in chunk: {3}".format( ts_with_ms, chunk.voltage, chunk.sampleDelay, len(chunk.samples))) #logger.debug(json.dumps(log_line)) json.dump(log_line, fout) fout.write('\n') logger.info("done writing") # update badge object to hold latest timestamps last_chunk = bdg.dlg.chunks[-1] last_chunk_ts_pretty = dt.fromtimestamp(last_chunk.ts).strftime("%Y-%m-%d@%H:%M:%S UTC") if bdg.is_newer_audio_ts(last_chunk.ts, last_chunk.fract): logger.debug("Setting last badge audio timestamp to {} {} ({})".format( last_chunk.ts, last_chunk.fract, last_chunk_ts_pretty)) bdg.set_audio_ts(last_chunk.ts, last_chunk.fract) else: logger.debug("Keeping existing timestamp ({}.{}) for {}. Last chunk timestamp was: {}.{} ({})" .format(bdg.last_audio_ts_int,bdg.last_audio_ts_fract,bdg.addr, last_chunk.ts, last_chunk.fract, last_chunk_pretty)) else: logger.info("No mic data ready") if bdg.dlg.scans: logger.info("Proximity scans received: {}".format(len(bdg.dlg.scans))) logger.info("saving proximity scans to file") with open(get_proximity_name(mode), "a") as fout: for scan in bdg.dlg.scans: ts_with_ms = round_float_for_log(scan.ts) log_line = { 'type': "proximity received", 'log_timestamp': round_float_for_log(time.time()), 'log_index': -1, # need to find a good accumulator. 'data': { 'voltage': round_float_for_log(scan.voltage), 'timestamp': ts_with_ms, 'badge_address': addr, 'rssi_distances': { device.ID: {'rssi': device.rssi, 'count': device.count} for device in scan.devices }, 'member': bdg.key, 'member_id': bdg.badge_id } } logger.debug("SCAN: scan timestamp: {0:.3f}, voltage: {1:.3f}, Devices in scan: {2}".format( ts_with_ms, scan.voltage, scan.numDevices)) #logger.info(json.dumps(log_line)) json.dump(log_line, fout) fout.write('\n') # update badge object to hold latest timestamps last_scan = bdg.dlg.scans[-1] last_scan_ts_pretty = dt.fromtimestamp(last_scan.ts).strftime("%Y-%m-%d@%H:%M:%S UTC") logger.debug("Setting last badge proximity timestamp to {} ([])".format( last_scan.ts, last_scan_ts_pretty)) bdg.last_proximity_ts = last_scan.ts else: logger.info("No proximity scans ready") def scan_for_devices(devices_whitelist, show_all=False): bd = BadgeDiscoverer(logger) try: all_devices = bd.discover(scan_duration=SCAN_DURATION) except Exception as e: # catch *all* exceptions logger.error("[Badges] Scan failed,{}".format(e)) all_devices = {} scanned_devices = [] for addr,device_info in all_devices.iteritems(): if addr in devices_whitelist: logger.debug("\033[1;7m\033[1;32mFound {}, added. Device info: {}\033[0m".format(addr, device_info)) scanned_devices.append({'mac':addr,'device_info':device_info}) else: if show_all: logger.debug("Found {}, but not on whitelist. Device info: {}".format(addr, device_info)) pass time.sleep(2) # requires sometimes to prevent connection from failing return scanned_devices def scan_for_bc_devices(devices_whitelist, show_all=False): bc = BeaconDiscoverer(logger) try: all_bc_devices = bc.discover(scan_duration=SCAN_DURATION) except Exception as e: # catch *all* exceptions logger.error("[Beacons] Scan failed,{}".format(e)) all_bc_devices = {} scanned_bc_devices = [] for addr,device_info in all_bc_devices.iteritems(): if addr in devices_whitelist: logger.debug("\033[1;7m\033[1;32mFound {}, added. Device info: {}\033[0m".format(addr, device_info)) scanned_bc_devices.append({'mac':addr,'device_info':device_info}) else: if show_all: logger.debug("Found {}, but not on whitelist. Device info: {}".format(addr, device_info)) pass time.sleep(2) # requires sometimes to prevent connection from failing return scanned_bc_devices def create_badge_manager_instance(mode,timestamp): if mode == "server": mgr = BadgeManagerServer(logger=logger) else: mgr = BadgeManagerStandalone(logger=logger,timestamp=timestamp) return mgr def create_beacon_manager_instance(mode,timestamp): if mode == "server": mgrb = BeaconManagerServer(logger=logger) else: mgrb = BeaconManagerStandalone(logger=logger,timestamp=timestamp) return mgrb def reset(): ''' Resets and reconfigures Bluetooth parameters. The specific parameters affect connection speed negotiation. It's not pretty, but safer to change the conn params this way :return: ''' # Resets BLE hci logger.info("Resetting bluetooth") reset_command = "hciconfig hci0 reset" args = shlex.split(reset_command) p = subprocess.Popen(args) # israspberry pi? logger.info("Setting bluetooth connection parameters") if os.uname()[4][:3] == 'arm': logger.info("Raspberry Pi detected, changing bluetooth connection parameters") with open("/sys/kernel/debug/bluetooth/hci0/conn_min_interval", "w") as connparam: connparam.write("16") with open("/sys/kernel/debug/bluetooth/hci0/conn_max_interval", "w") as connparam: connparam.write("17") else: logger.warn("Not a Raspberry Pi, Bluetooth connection parameters remain untouched (communication may be slower)") time.sleep(2) # requires sleep after reset logger.info("Done resetting bluetooth") def kill_bluepy(): """ Kill orphaned/leftover/defunct bluepy-helper processes I'd like to move this to a separate utility file or something when we refactor """ # get all the bluepy-helper processes CMD="/bin/ps ax | grep bluepy-helper | grep -v grep | awk '{ print $1 }'" p = subprocess.Popen(CMD, shell=True, stdout=subprocess.PIPE) pidstr = p.communicate()[0] pids = pidstr.split("\n") pids = [int(pid) for pid in pids if pid.isdigit()] mypid = os.getpid() # dont wanna kill our process by accident :) if mypid in pids: pids.remove(mypid) for pid in pids: # KILL KILL KILL try: os.kill(int(pid), signal.SIGKILL) # we waitpid to clean up defunct processes os.waitpid(int(pid), 0) logger.info("Process with PID {} killed".format(pid)) except OSError as err: logger.error("Unable to kill process with pid {}".format(pid)) logger.error(err) def pull_devices(mgr, mgrb, start_recording): logger.info('Started pulling') activate_audio = False activate_proximity = False if start_recording is None or start_recording == "both": activate_audio = True activate_proximity = True elif start_recording == "audio": activate_audio = True elif start_recording == "proximity": activate_proximity = True elif start_recording == "none": activate_audio = False activate_proximity = False logger.info("Start recording: Audio = {}, Proximity = {}".format(activate_audio,activate_proximity)) mode = "server" if isinstance(mgr, BadgeManagerServer) else "standalone" while True: mgr.pull_badges_list() mgrb.pull_beacons_list() # When we refactor we can change this, but for now: if mode == "server": logger.info("Attempting to offload data to server") offload_data() logger.info("Scanning for members...") scanned_devices = scan_for_devices(mgr.badges.keys()) # Randomly shuffle devices random.shuffle(scanned_devices) # iterate before the actual data collection loop just to offload # voltages to the server (and update heartbeat on server) for device in scanned_devices: b = mgr.badges.get(device['mac']) # i don't think adv_payload is ever supposed to be empty, # but sometimes it is. and when it is, it breaks if device['device_info']['adv_payload'] is not None: b.last_voltage = device['device_info']['adv_payload']['voltage'] b.observed_id = device['device_info']['adv_payload']['badge_id'] observed_project_id = device['device_info']['adv_payload']['project_id'] if b.observed_id != b.badge_id or b.project_id != observed_project_id: logger.debug("Warning! Observed IDs do not match server settings. " "Observed: member_id:{}, project_id:{}. Expected: member_id:{}. project_id: {}" .format(b.observed_id,observed_project_id,b.badge_id,b.project_id)) b.last_seen_ts = time.time() mgr.send_badge(device['mac']) # now the actual data collection for device in scanned_devices: # try to update latest badge timestamps from the server mac = device['mac'] pull_success = mgr.pull_badge(mac) if not pull_success: logger.warn("""Problem pulling badge from server\n Skipping badge with mac {} until next full badge list refresh""" .format(mac)) continue b = mgr.badges.get(mac) # pull data dialogue(b, activate_audio, activate_proximity, mode) # update timestamps on server mgr.send_badge(mac) time.sleep(2) # requires sleep between devices logger.info("Scanning for beacons...") scanned_beacons = scan_for_bc_devices(mgrb.beacons.keys()) # Randomly shuffle devices random.shuffle(scanned_beacons) # iterate before the actual data collection loop just to offload # voltages to the server (and update heartbeat on server) for device in scanned_beacons: bcn = mgrb.beacons.get(device['mac']) if device['device_info']['adv_payload'] is not None: bcn.last_voltage = device['device_info']['adv_payload']['voltage'] bcn.observed_id = device['device_info']['adv_payload']['badge_id'] observed_project_id = device['device_info']['adv_payload']['project_id'] if bcn.observed_id != bcn.badge_id or bcn.project_id != observed_project_id: logger.debug("Warning! Observed IDs do not match server settings. " "Observed: beacon_id:{}, project_id:{}. Expected: beacon_id:{}. project_id: {}" .format(bcn.observed_id,observed_project_id,bcn.badge_id,bcn.project_id)) bcn.last_seen_ts = time.time() mgrb.send_beacon(device['mac']) # Update beacons with wrong id or project id for device in scanned_beacons: bcn = mgrb.beacons.get(device['mac']) if device['device_info']['adv_payload'] is not None: observed_id = device['device_info']['adv_payload']['badge_id'] observed_project_id = device['device_info']['adv_payload']['project_id'] if bcn.badge_id != observed_id or bcn.project_id != observed_project_id: bcn.sync_timestamp() mgrb.send_beacon(device['mac']) time.sleep(2) time.sleep(2) # allow BLE time to disconnect # clean up any leftover bluepy processes kill_bluepy() def sync_all_devices(mgr): logger.info('Syncing all badges recording.') mgr.pull_badges_list() for mac in mgr.badges: bdg = mgr.badges.get(mac) bdg.sync_timestamp() time.sleep(2) # requires sleep between devices time.sleep(2) # allow BLE time to disconnect def devices_scanner(mgr, mgrb, show_all=False): logger.info('Scanning for badges') mgr.pull_badges_list() logger.info('Scanning for beacons') mgrb.pull_beacons_list() while True: logger.info("Scanning for devices...") scanned_devices = scan_for_devices(mgr.badges.keys(), show_all) + scan_for_bc_devices(mgrb.beacons.keys()) with open(scans_file_name, "a") as fout: for device in scanned_devices: mac = device['mac'] scan_date = device['device_info']['scan_date'] rssi = device['device_info']['rssi'] if device['device_info']['adv_payload']: voltage = device['device_info']['adv_payload']['voltage'] observed_id = device['device_info']['adv_payload']['badge_id'] project_id = device['device_info']['adv_payload']['project_id'] else: voltage = 0.0 observed_id = -1 project_id = -1 logger.debug("{},{},{:.2f},{:.2f},{},{}". format(scan_date, mac, rssi, voltage, observed_id, project_id)) fout.write("{},{},{:.2f},{:.2f},{},{}\n". format(scan_date, mac, rssi, voltage, observed_id, project_id)) time.sleep(5) # give time to Ctrl-C def start_all_devices(mgr): logger.info('Starting all badges recording.') while True: mgr.pull_badges_list() logger.info("Scanning for devices...") scanned_devices = scan_for_devices(mgr.badges.keys()) for device in scanned_devices: dev_info = device['device_info'] if dev_info ['adv_payload']: sync = dev_info ['adv_payload']['sync_status'] audio = dev_info ['adv_payload']['audio_status'] proximity = dev_info ['adv_payload']['proximity_status'] badge_id = dev_info ['adv_payload']['badge_id'] project_id = dev_info ['adv_payload']['project_id'] if sync == 0 or audio == 0 or proximity == 0: if(project_id==0): logger.info("changing project ids {}".format(device['mac'])) logger.info("Starting {}".format(device['mac'])) bdg = mgr.badges.get(device['mac']) bdg.start_recording() time.sleep(2) # requires sleep between devices else: logger.info("Starting {}".format(device['mac'])) bdg = mgr.badges.get(device['mac']) bdg.start_recording() time.sleep(2) # requires sleep between devices else: logger.info("No need to start {}".format(device['mac'])) time.sleep(2) # allow BLE time to disconnect def print_badges(mgr, mgrb): logger.info("Printing badges:") mgr.pull_badges_list() mgrb.pull_beacons_list() badge_list = mgr.badges beacon_list = mgrb.beacons print("Members:") for key, value in badge_list.iteritems(): print("{},{},{},{}".format(value.key,value.addr,value.badge_id,value.project_id)) print("\nBadges:") for key, value in beacon_list.iteritems(): print("{},{},{},{}".format(value.key,value.addr,value.badge_id,value.project_id)) def add_pull_command_options(subparsers): pull_parser = subparsers.add_parser('pull', help='Continuously pull data from badges') pull_parser.add_argument('-r','--start_recording' , choices=('audio', 'proximity', 'both','none'), required=False , default='both' , dest='start_recording',help='data recording option') def add_scan_command_options(subparsers): scan_parser = subparsers.add_parser('scan', help='Continuously scan for badges') scan_parser.add_argument('-a','--show_all', action='store_true', default=False, help="Show all devices") def add_sync_all_command_options(subparsers): sa_parser = subparsers.add_parser('sync_all', help='Send date to all devices in whitelist') def add_start_all_command_options(subparsers): st_parser = subparsers.add_parser('start_all', help='Start recording on all devices in whitelist') def add_print_badges_command_options(subparsers): lb_parser = subparsers.add_parser('print_badges', help='print badges in a CSV format') if __name__ == "__main__": import time import argparse parser = argparse.ArgumentParser(description="Run scans, send dates, or continuously pull data") parser.add_argument('-dr','--disable_reset_ble', action='store_true', default=False, help="Do not reset BLE") parser.add_argument('-m','--hub_mode', choices=('server', 'standalone') , default='standalone', dest='hub_mode' , help="Operation mode - standalone (using a configuration file) or a server") parser.add_argument('-t', '--timestamp' , type=int, required=False , dest='timestamp', help='UTC timestamp to start pulling data from (int)') subparsers = parser.add_subparsers(help='Program mode (e.g. Scan, send dates, pull, scan etc.)', dest='mode') add_pull_command_options(subparsers) add_scan_command_options(subparsers) add_sync_all_command_options(subparsers) add_start_all_command_options(subparsers) add_print_badges_command_options(subparsers) args = parser.parse_args() mgr = create_badge_manager_instance(args.hub_mode, args.timestamp) mgrb = create_beacon_manager_instance(args.hub_mode, args.timestamp) if not args.disable_reset_ble: reset() if args.mode == "sync_all": sync_all_devices(mgr) # scan for devices if args.mode == "scan": devices_scanner(mgr,mgrb, args.show_all) # pull data from all devices if args.mode == "pull": pull_devices(mgr, mgrb, args.start_recording) if args.mode == "start_all": start_all_devices(mgr) if args.mode == "print_badges": print_badges(mgr, mgrb) exit(0)
38.298999
121
0.614956
3,289
26,771
4.812709
0.156279
0.020216
0.018574
0.018005
0.422579
0.347084
0.281825
0.23836
0.194264
0.173669
0
0.005378
0.277651
26,771
698
122
38.353868
0.813166
0.128273
0
0.288747
0
0.010616
0.18649
0.009576
0
0
0
0.001433
0
1
0.053079
false
0.004246
0.053079
0.002123
0.144374
0.025478
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f42aede445a90e085482590f47cc1c5cb9b7e7e5
5,215
py
Python
local_search/sat_isfayer.py
arnaubena97/SatSolver-sat_isfayer
db7edc83547786deb7bf6b1c5d75b406f877ca15
[ "MIT" ]
null
null
null
local_search/sat_isfayer.py
arnaubena97/SatSolver-sat_isfayer
db7edc83547786deb7bf6b1c5d75b406f877ca15
[ "MIT" ]
null
null
null
local_search/sat_isfayer.py
arnaubena97/SatSolver-sat_isfayer
db7edc83547786deb7bf6b1c5d75b406f877ca15
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import sys import random def read_file(file_name): """File reader and parser the num of variables, num of clauses and put the clauses in a list""" clauses =[] with open(file_name) as all_file: for line in all_file: if line.startswith('c'): continue #ignore comments if line.startswith('p'): num_variables = int(line.split()[2]) # set num_variables continue if line.strip() == "": continue clause = list(map(int, line.split())) clause.pop() clauses.append(clause) return num_variables, clauses def print_sol(solution): """Method to print the solution that satisfies all the clauses """ print("s SATISFIABLE") print("v %s 0" %" ".join(map(str, solution))) exit(0) class walksat_solver(): def __init__(self, clauses, num_variables): """Constructor of the solver""" self.clauses = clauses self.num_variables = num_variables self.formula=[] self.list_positions = self.create_positions() self.index_clauses_satisfied = [] def randomSolution(self): """Create a random solution of cnf formula. Ex: [-1, 2, 3, -4, ...]""" random_formula = [x if random.random() < 0.5 else -x for x in range(self.num_variables + 1)] return random_formula[1:] def create_positions(self): """Return a list with the clause index that apear in the clauses. First position is empty, and the index of list is the variable. Ex: [ [], [2], [2, 3], ....] """ vars_positions = [[] for _ in range(self.num_variables * 2 + 1)] for index, clause in enumerate(self.clauses): for var in clause: vars_positions[var].append(index) return vars_positions def calculate_all_clauses_satisfy(self): """Returns a list with the number of variables that satisfy the clause with the same index. Method for all clauses. Ex: [1, 0, 2, 2] in test_0.cnf """ list_variables_satisfies = [] for clause in range(len(self.clauses)): number_sat = self.clause_satisfy(clause) list_variables_satisfies.append(number_sat) return list_variables_satisfies def clause_satisfy(self, index): """Returns an integer, which is the number of variables in the formula that satisfy the clause indicated by the index. Ex: index = 1 --> cluse[1] = [1, -2, 3, ..] """ satisfy = 0 for variable in self.clauses[index]: if variable in self.formula: satisfy += 1 return satisfy def select_all_unsatisfied(self): """Returns a list of indexes whose clause is not satisfied.""" clauses_not_satisfied = [] for index, value in enumerate(self.index_clauses_satisfied): if value == 0: clauses_not_satisfied.append(index) return clauses_not_satisfied def get_clause_unsatisfied(self, list_all_unsatisfied): """Returns a randomly selected unsatisfied clause""" return self.clauses[random.choice(list_all_unsatisfied)] def update(self, variable, x): """It is responsible for updating the list of the number of variables that satisfy the clause""" for index in self.list_positions[x * variable]: self.index_clauses_satisfied[index] += x def change_variable(self, clause_to_review): """Is responsible for assessing which is the best variable in the clause to change""" worst_wrong = sys.maxsize bests_variables = [] for variable in clause_to_review: wrong = 0 for index in self.list_positions[-variable]: if not self.index_clauses_satisfied[index] > 1: wrong += 1 if wrong <= worst_wrong: worst_wrong = wrong bests_variables.append(variable) return random.choice(bests_variables) def solve(self, max_tries=50000000, max_flips=3000): """Implementation of the solver""" #for _ in range(max_tries): while(True): self.formula = self.randomSolution() self.index_clauses_satisfied = self.calculate_all_clauses_satisfy() for _ in range(max_flips): index_all_unsatisfied = self.select_all_unsatisfied() if len(index_all_unsatisfied)==0: print_sol(self.formula) clause_to_review = self.get_clause_unsatisfied(index_all_unsatisfied) variable = self.change_variable(clause_to_review) self.update(variable, 1) self.update(variable, -1) self.formula[abs(variable)-1] *= -1 #Main if __name__ == "__main__": if len(sys.argv) == 2: file_name = sys.argv[1] else: print("\n Command: python %s <file_name.cnf> \n" %sys.argv[0]) exit(0) num_variables, clauses = read_file(file_name) sat = walksat_solver(clauses, num_variables) sat.solve() exit(0)
36.985816
100
0.607095
649
5,215
4.694915
0.217257
0.039383
0.026255
0.041024
0.09255
0.043978
0.026255
0.026255
0
0
0
0.014982
0.296069
5,215
140
101
37.25
0.815037
0.210163
0
0.032258
0
0
0.017771
0
0
0
0
0
0
1
0.129032
false
0
0.021505
0
0.247312
0.053763
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f42c89b9ad4a67ef2088d23901ec3eee27d8dfed
1,426
py
Python
sparse_causal_model_learner_rl/annealer/threshold_projection.py
sergeivolodin/causality-disentanglement-rl
5a41b4a2e3d85fa7e9c8450215fdc6cf954df867
[ "CC0-1.0" ]
2
2020-12-11T05:26:24.000Z
2021-04-21T06:12:58.000Z
sparse_causal_model_learner_rl/annealer/threshold_projection.py
sergeivolodin/causality-disentanglement-rl
5a41b4a2e3d85fa7e9c8450215fdc6cf954df867
[ "CC0-1.0" ]
9
2020-04-30T16:29:50.000Z
2021-03-26T07:32:18.000Z
sparse_causal_model_learner_rl/annealer/threshold_projection.py
sergeivolodin/causality-disentanglement-rl
5a41b4a2e3d85fa7e9c8450215fdc6cf954df867
[ "CC0-1.0" ]
null
null
null
import gin import torch import logging from sparse_causal_model_learner_rl.metrics import find_value, find_key @gin.configurable def ProjectionThreshold(config, config_object, epoch_info, temp, adjust_every=100, metric_threshold=0.5, delta=0.5, source_metric_key=None, min_hyper=0, max_hyper=1000, gin_variable=None, **kwargs): try: metric_val = find_value(epoch_info, source_metric_key) except AssertionError as e: return config good = metric_val < metric_threshold hyper = gin.query_parameter(gin_variable) logging.info(f"Projection: metric={metric_val} threshold={metric_threshold} good={good} hyper={hyper}") if 'last_hyper_adjustment' not in temp: temp['last_hyper_adjustment'] = 0 i = epoch_info['epochs'] if good: temp['suggested_hyper'] = hyper - delta else: temp['suggested_hyper'] = hyper + delta if temp['suggested_hyper'] > max_hyper: temp['suggested_hyper'] = max_hyper if temp['suggested_hyper'] < min_hyper: temp['suggested_hyper'] = min_hyper if 'suggested_hyper' in temp and (i - temp['last_hyper_adjustment'] >= adjust_every): temp['last_hyper_adjustment'] = i with gin.unlock_config(): gin.bind_parameter(gin_variable, temp['suggested_hyper']) del temp['suggested_hyper'] return config
32.409091
108
0.670407
180
1,426
5.022222
0.355556
0.139381
0.159292
0.076327
0.176991
0
0
0
0
0
0
0.011851
0.230715
1,426
43
109
33.162791
0.812215
0
0
0.060606
0
0
0.224873
0.080983
0
0
0
0
0.030303
1
0.030303
false
0
0.121212
0
0.212121
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f42cd1526653837e6ebdebb62cc32ac0a5f88b7c
15,684
py
Python
numpyro/contrib/control_flow/scan.py
ucals/numpyro
566a5311d660d28a630188063c03a018165a38a9
[ "Apache-2.0" ]
2
2021-01-10T06:27:51.000Z
2021-01-10T06:27:55.000Z
numpyro/contrib/control_flow/scan.py
ucals/numpyro
566a5311d660d28a630188063c03a018165a38a9
[ "Apache-2.0" ]
null
null
null
numpyro/contrib/control_flow/scan.py
ucals/numpyro
566a5311d660d28a630188063c03a018165a38a9
[ "Apache-2.0" ]
1
2020-12-23T13:27:39.000Z
2020-12-23T13:27:39.000Z
# Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 from collections import OrderedDict from functools import partial from jax import lax, random, tree_flatten, tree_map, tree_multimap, tree_unflatten import jax.numpy as jnp from jax.tree_util import register_pytree_node_class from numpyro import handlers from numpyro.primitives import _PYRO_STACK, Messenger, apply_stack from numpyro.util import not_jax_tracer @register_pytree_node_class class PytreeTrace: def __init__(self, trace): self.trace = trace def tree_flatten(self): trace, aux_trace = {}, {} for name, site in self.trace.items(): if site['type'] in ['sample', 'deterministic']: trace[name], aux_trace[name] = {}, {'_control_flow_done': True} for key in site: if key in ['fn', 'args', 'value', 'intermediates']: trace[name][key] = site[key] # scanned sites have stop field because we trace them inside a block handler elif key != 'stop': aux_trace[name][key] = site[key] return (trace,), aux_trace @classmethod def tree_unflatten(cls, aux_data, children): trace, = children for name, site in trace.items(): site.update(aux_data[name]) return cls(trace) def _subs_wrapper(subs_map, i, length, site): value = None if isinstance(subs_map, dict) and site['name'] in subs_map: value = subs_map[site['name']] elif callable(subs_map): rng_key = site['kwargs'].get('rng_key') subs_map = handlers.seed(subs_map, rng_seed=rng_key) if rng_key is not None else subs_map value = subs_map(site) if value is not None: value_ndim = jnp.ndim(value) sample_shape = site['kwargs']['sample_shape'] fn_ndim = len(sample_shape + site['fn'].shape()) if value_ndim == fn_ndim: # this branch happens when substitute_fn is init_strategy, # where we apply init_strategy to each element in the scanned series return value elif value_ndim == fn_ndim + 1: # this branch happens when we substitute a series of values shape = jnp.shape(value) if shape[0] == length: return value[i] elif shape[0] < length: rng_key = site['kwargs']['rng_key'] assert rng_key is not None # we use the substituted values if i < shape[0] # and generate a new sample otherwise return lax.cond(i < shape[0], (value, i), lambda val: val[0][val[1]], rng_key, lambda val: site['fn'](rng_key=val, sample_shape=sample_shape)) else: raise RuntimeError(f"Substituted value for site {site['name']} " "requires length less than or equal to scan length." f" Expected length <= {length}, but got {shape[0]}.") else: raise RuntimeError(f"Something goes wrong. Expected ndim = {fn_ndim} or {fn_ndim+1}," f" but got {value_ndim}. This might happen when you use nested scan," " which is currently not supported. Please report the issue to us!") class promote_shapes(Messenger): # a helper messenger to promote shapes of `fn` and `value` # + msg: fn.batch_shape = (2, 3), value.shape = (3,) + fn.event_shape # process_message(msg): promote value so that value.shape = (1, 3) + fn.event_shape # + msg: fn.batch_shape = (3,), value.shape = (2, 3) + fn.event_shape # process_message(msg): promote fn so that fn.batch_shape = (1, 3). def process_message(self, msg): if msg["type"] == "sample" and msg["value"] is not None: fn, value = msg["fn"], msg["value"] value_batch_ndims = jnp.ndim(value) - fn.event_dim fn_batch_ndim = len(fn.batch_shape) prepend_shapes = (1,) * abs(fn_batch_ndim - value_batch_ndims) if fn_batch_ndim > value_batch_ndims: msg["value"] = jnp.reshape(value, prepend_shapes + jnp.shape(value)) elif fn_batch_ndim < value_batch_ndims: msg["fn"] = tree_map(lambda x: jnp.reshape(x, prepend_shapes + jnp.shape(x)), fn) def scan_enum(f, init, xs, length, reverse, rng_key=None, substitute_stack=None): from numpyro.contrib.funsor import enum, config_enumerate, markov, trace as packed_trace # XXX: This implementation only works for history size=1 but can be # extended to history size > 1 by running `f` `history_size` times # for initialization. However, `sequential_sum_product` does not # support history size > 1, so we skip supporting it here. # Note that `funsor.sum_product.sarkka_bilmes_product` does support history > 1. if reverse: x0 = tree_map(lambda x: x[-1], xs) xs_ = tree_map(lambda x: x[:-1], xs) else: x0 = tree_map(lambda x: x[0], xs) xs_ = tree_map(lambda x: x[1:], xs) carry_shape_at_t1 = None def body_fn(wrapped_carry, x, prefix=None): i, rng_key, carry = wrapped_carry init = True if (not_jax_tracer(i) and i == 0) else False rng_key, subkey = random.split(rng_key) if rng_key is not None else (None, None) seeded_fn = handlers.seed(f, subkey) if subkey is not None else f for subs_type, subs_map in substitute_stack: subs_fn = partial(_subs_wrapper, subs_map, i, length) if subs_type == 'condition': seeded_fn = handlers.condition(seeded_fn, condition_fn=subs_fn) elif subs_type == 'substitute': seeded_fn = handlers.substitute(seeded_fn, substitute_fn=subs_fn) if init: with handlers.scope(prefix="_init"): new_carry, y = seeded_fn(carry, x) trace = {} else: with handlers.block(), packed_trace() as trace, promote_shapes(), enum(), markov(): # Like scan_wrapper, we collect the trace of scan's transition function # `seeded_fn` here. To put time dimension to the correct position, we need to # promote shapes to make `fn` and `value` # at each site have the same batch dims (e.g. if `fn.batch_shape = (2, 3)`, # and value's batch_shape is (3,), then we promote shape of # value so that its batch shape is (1, 3)). new_carry, y = config_enumerate(seeded_fn)(carry, x) # store shape of new_carry at a global variable nonlocal carry_shape_at_t1 carry_shape_at_t1 = [jnp.shape(x) for x in tree_flatten(new_carry)[0]] # make new_carry have the same shape as carry # FIXME: is this rigorous? new_carry = tree_multimap(lambda a, b: jnp.reshape(a, jnp.shape(b)), new_carry, carry) return (i + jnp.array(1), rng_key, new_carry), (PytreeTrace(trace), y) with markov(): wrapped_carry = (0, rng_key, init) wrapped_carry, (_, y0) = body_fn(wrapped_carry, x0) if length == 1: ys = tree_map(lambda x: jnp.expand_dims(x, 0), y0) return wrapped_carry, (PytreeTrace({}), ys) wrapped_carry, (pytree_trace, ys) = lax.scan(body_fn, wrapped_carry, xs_, length - 1, reverse) first_var = None for name, site in pytree_trace.trace.items(): # add `time` dimension, the name will be '_time_{first variable in the trace}' if first_var is None: first_var = name leftmost_dim = min(site['infer']['dim_to_name']) site['infer']['dim_to_name'][leftmost_dim - 1] = '_time_{}'.format(first_var) # similar to carry, we need to reshape due to shape alternating in markov ys = tree_multimap(lambda z0, z: jnp.reshape(z, z.shape[:1] + jnp.shape(z0)), y0, ys) # we also need to reshape `carry` to match sequential behavior if length % 2 == 0: t, rng_key, carry = wrapped_carry flatten_carry, treedef = tree_flatten(carry) flatten_carry = [jnp.reshape(x, t1_shape) for x, t1_shape in zip(flatten_carry, carry_shape_at_t1)] carry = tree_unflatten(treedef, flatten_carry) wrapped_carry = (t, rng_key, carry) return wrapped_carry, (pytree_trace, ys) def scan_wrapper(f, init, xs, length, reverse, rng_key=None, substitute_stack=[], enum=False): if length is None: length = tree_flatten(xs)[0][0].shape[0] if enum: return scan_enum(f, init, xs, length, reverse, rng_key, substitute_stack) def body_fn(wrapped_carry, x): i, rng_key, carry = wrapped_carry rng_key, subkey = random.split(rng_key) if rng_key is not None else (None, None) with handlers.block(): seeded_fn = handlers.seed(f, subkey) if subkey is not None else f for subs_type, subs_map in substitute_stack: subs_fn = partial(_subs_wrapper, subs_map, i, length) if subs_type == 'condition': seeded_fn = handlers.condition(seeded_fn, condition_fn=subs_fn) elif subs_type == 'substitute': seeded_fn = handlers.substitute(seeded_fn, substitute_fn=subs_fn) with handlers.trace() as trace: carry, y = seeded_fn(carry, x) return (i + 1, rng_key, carry), (PytreeTrace(trace), y) return lax.scan(body_fn, (jnp.array(0), rng_key, init), xs, length=length, reverse=reverse) def scan(f, init, xs, length=None, reverse=False): """ This primitive scans a function over the leading array axes of `xs` while carrying along state. See :func:`jax.lax.scan` for more information. **Usage**: .. doctest:: >>> import numpy as np >>> import numpyro >>> import numpyro.distributions as dist >>> from numpyro.contrib.control_flow import scan >>> >>> def gaussian_hmm(y=None, T=10): ... def transition(x_prev, y_curr): ... x_curr = numpyro.sample('x', dist.Normal(x_prev, 1)) ... y_curr = numpyro.sample('y', dist.Normal(x_curr, 1), obs=y_curr) ... return x_curr, (x_curr, y_curr) ... ... x0 = numpyro.sample('x_0', dist.Normal(0, 1)) ... _, (x, y) = scan(transition, x0, y, length=T) ... return (x, y) >>> >>> # here we do some quick tests >>> with numpyro.handlers.seed(rng_seed=0): ... x, y = gaussian_hmm(np.arange(10.)) >>> assert x.shape == (10,) and y.shape == (10,) >>> assert np.all(y == np.arange(10)) >>> >>> with numpyro.handlers.seed(rng_seed=0): # generative ... x, y = gaussian_hmm() >>> assert x.shape == (10,) and y.shape == (10,) .. warning:: This is an experimental utility function that allows users to use JAX control flow with NumPyro's effect handlers. Currently, `sample` and `deterministic` sites within the scan body `f` are supported. If you notice that any effect handlers or distributions are unsupported, please file an issue. .. note:: It is ambiguous to align `scan` dimension inside a `plate` context. So the following pattern won't be supported .. code-block:: python with numpyro.plate('N', 10): last, ys = scan(f, init, xs) All `plate` statements should be put inside `f`. For example, the corresponding working code is .. code-block:: python def g(*args, **kwargs): with numpyro.plate('N', 10): return f(*arg, **kwargs) last, ys = scan(g, init, xs) .. note:: Nested scan is currently not supported. .. note:: We can scan over discrete latent variables in `f`. The joint density is evaluated using parallel-scan (reference [1]) over time dimension, which reduces parallel complexity to `O(log(length))`. Currently, only the equivalence to :class:`~numpyro.contrib.funsor.enum_messenger.markov(history_size=1)` is supported. A :class:`~numpyro.handlers.trace` of `scan` with discrete latent variables will contain the following sites: + init sites: those sites belong to the first trace of `f`. Each of them will have name prefixed with `_init/`. + scanned sites: those sites collect the values of the remaining scan loop over `f`. An addition time dimension `_time_foo` will be added to those sites, where `foo` is the name of the first site appeared in `f`. Not all transition functions `f` are supported. All of the restrictions from Pyro's enumeration tutorial [2] still apply here. In addition, there should not have any site outside of `scan` depend on the first output of `scan` (the last carry value). ** References ** 1. *Temporal Parallelization of Bayesian Smoothers*, Simo Sarkka, Angel F. Garcia-Fernandez (https://arxiv.org/abs/1905.13002) 2. *Inference with Discrete Latent Variables* (http://pyro.ai/examples/enumeration.html#Dependencies-among-plates) :param callable f: a function to be scanned. :param init: the initial carrying state :param xs: the values over which we scan along the leading axis. This can be any JAX pytree (e.g. list/dict of arrays). :param length: optional value specifying the length of `xs` but can be used when `xs` is an empty pytree (e.g. None) :param bool reverse: optional boolean specifying whether to run the scan iteration forward (the default) or in reverse :return: output of scan, quoted from :func:`jax.lax.scan` docs: "pair of type (c, [b]) where the first element represents the final loop carry value and the second element represents the stacked outputs of the second output of f when scanned over the leading axis of the inputs". """ # if there are no active Messengers, we just run and return it as expected: if not _PYRO_STACK: (length, rng_key, carry), (pytree_trace, ys) = scan_wrapper( f, init, xs, length=length, reverse=reverse) else: # Otherwise, we initialize a message... initial_msg = { 'type': 'control_flow', 'fn': scan_wrapper, 'args': (f, init, xs, length, reverse), 'kwargs': {'rng_key': None, 'substitute_stack': []}, 'value': None, } # ...and use apply_stack to send it to the Messengers msg = apply_stack(initial_msg) (length, rng_key, carry), (pytree_trace, ys) = msg['value'] if not msg["kwargs"].get("enum", False): for msg in pytree_trace.trace.values(): apply_stack(msg) else: from numpyro.contrib.funsor import to_funsor from numpyro.contrib.funsor.enum_messenger import LocalNamedMessenger for msg in pytree_trace.trace.values(): with LocalNamedMessenger(): dim_to_name = msg["infer"].get("dim_to_name") to_funsor(msg["value"], dim_to_name=OrderedDict([(k, dim_to_name[k]) for k in sorted(dim_to_name)])) apply_stack(msg) return carry, ys
44.939828
116
0.605011
2,129
15,684
4.310005
0.196806
0.018963
0.007847
0.009154
0.221774
0.170445
0.133936
0.106037
0.092524
0.080209
0
0.009539
0.291507
15,684
348
117
45.068966
0.816235
0.389378
0
0.16
0
0
0.072103
0
0
0
0
0.002874
0.005714
1
0.057143
false
0
0.062857
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f42e0214aa8abe8fa4ef98083bd64acd6f94ca90
1,245
py
Python
e2xgrader/preprocessors/overwritecells.py
divindevaiah/e2xgrader
19eb4662e4eee5ddef673097517e4bd4fb469e62
[ "MIT" ]
2
2021-10-02T10:48:47.000Z
2022-03-02T14:00:48.000Z
e2xgrader/preprocessors/overwritecells.py
divindevaiah/e2xgrader
19eb4662e4eee5ddef673097517e4bd4fb469e62
[ "MIT" ]
70
2020-10-23T16:42:01.000Z
2022-03-14T16:33:54.000Z
e2xgrader/preprocessors/overwritecells.py
divindevaiah/e2xgrader
19eb4662e4eee5ddef673097517e4bd4fb469e62
[ "MIT" ]
10
2020-11-22T16:36:16.000Z
2022-03-02T15:51:24.000Z
import json from nbformat.notebooknode import NotebookNode from nbconvert.exporters.exporter import ResourcesDict from typing import Tuple from nbgrader.api import MissingEntry from nbgrader.preprocessors import OverwriteCells as NbgraderOverwriteCells from ..utils.extra_cells import is_singlechoice, is_multiplechoice class OverwriteCells(NbgraderOverwriteCells): def preprocess_cell( self, cell: NotebookNode, resources: ResourcesDict, cell_index: int ) -> Tuple[NotebookNode, ResourcesDict]: if not (is_singlechoice(cell) or is_multiplechoice(cell)): return super().preprocess_cell(cell, resources, cell_index) grade_id = cell.metadata.get("nbgrader", {}).get("grade_id", None) if grade_id is None: return cell, resources try: source_cell = self.gradebook.find_source_cell( grade_id, self.notebook_id, self.assignment_id ) except MissingEntry: self.log.warning(f"Cell {grade_id} does not exist in database") del cell.metadata.nbgrader["grade_id"] return cell, resources cell.metadata.extended_cell.source = json.loads(source_cell.source) return cell, resources
35.571429
75
0.706024
142
1,245
6.035211
0.415493
0.049008
0.066511
0
0
0
0
0
0
0
0
0
0.219277
1,245
34
76
36.617647
0.881687
0
0
0.115385
0
0
0.053012
0
0
0
0
0
0
1
0.038462
false
0
0.269231
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f42ea50cd75ed3588bee01251935be095b9cd852
9,261
py
Python
tools/pdf2txt.py
ehtec/pdfminer.six
5b1823f25ab998e904fc5d81687732580f23e3b9
[ "MIT" ]
null
null
null
tools/pdf2txt.py
ehtec/pdfminer.six
5b1823f25ab998e904fc5d81687732580f23e3b9
[ "MIT" ]
1
2022-01-31T22:58:07.000Z
2022-01-31T22:58:07.000Z
tools/pdf2txt.py
phantomcyber/pdfminer.six
e35a9319a6ae5d310f08f07a5edf16aadc529c1e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """A command line tool for extracting text and images from PDF and output it to plain text, html, xml or tags.""" import argparse import logging import sys from typing import Any, Container, Iterable, List, Optional import pdfminer.high_level from pdfminer.layout import LAParams from pdfminer.utils import AnyIO logging.basicConfig() OUTPUT_TYPES = ((".htm", "html"), (".html", "html"), (".xml", "xml"), (".tag", "tag")) def float_or_disabled(x: str) -> Optional[float]: if x.lower().strip() == "disabled": return None try: return float(x) except ValueError: raise argparse.ArgumentTypeError("invalid float value: {}".format(x)) def extract_text( files: Iterable[str] = [], outfile: str = '-', laparams: Optional[LAParams] = None, output_type: str = 'text', codec: str = 'utf-8', strip_control: bool = False, maxpages: int = 0, page_numbers: Optional[Container[int]] = None, password: str = "", scale: float = 1.0, rotation: int = 0, layoutmode: str = 'normal', output_dir: Optional[str] = None, debug: bool = False, disable_caching: bool = False, **kwargs: Any ) -> AnyIO: if not files: raise ValueError("Must provide files to work upon!") if output_type == "text" and outfile != "-": for override, alttype in OUTPUT_TYPES: if outfile.endswith(override): output_type = alttype if outfile == "-": outfp: AnyIO = sys.stdout if sys.stdout.encoding is not None: codec = 'utf-8' else: outfp = open(outfile, "wb") for fname in files: with open(fname, "rb") as fp: pdfminer.high_level.extract_text_to_fp(fp, **locals()) return outfp def parse_args(args: Optional[List[str]]) -> argparse.Namespace: parser = argparse.ArgumentParser(description=__doc__, add_help=True) parser.add_argument( "files", type=str, default=None, nargs="+", help="One or more paths to PDF files.") parser.add_argument( "--version", "-v", action="version", version="pdfminer.six v{}".format(pdfminer.__version__)) parser.add_argument( "--debug", "-d", default=False, action="store_true", help="Use debug logging level.") parser.add_argument( "--disable-caching", "-C", default=False, action="store_true", help="If caching or resources, such as fonts, should be disabled.") parse_params = parser.add_argument_group( 'Parser', description='Used during PDF parsing') parse_params.add_argument( "--page-numbers", type=int, default=None, nargs="+", help="A space-seperated list of page numbers to parse.") parse_params.add_argument( "--pagenos", "-p", type=str, help="A comma-separated list of page numbers to parse. " "Included for legacy applications, use --page-numbers " "for more idiomatic argument entry.") parse_params.add_argument( "--maxpages", "-m", type=int, default=0, help="The maximum number of pages to parse.") parse_params.add_argument( "--password", "-P", type=str, default="", help="The password to use for decrypting PDF file.") parse_params.add_argument( "--rotation", "-R", default=0, type=int, help="The number of degrees to rotate the PDF " "before other types of processing.") la_params = LAParams() # will be used for defaults la_param_group = parser.add_argument_group( 'Layout analysis', description='Used during layout analysis.') la_param_group.add_argument( "--no-laparams", "-n", default=False, action="store_true", help="If layout analysis parameters should be ignored.") la_param_group.add_argument( "--detect-vertical", "-V", default=la_params.detect_vertical, action="store_true", help="If vertical text should be considered during layout analysis") la_param_group.add_argument( "--line-overlap", type=float, default=la_params.line_overlap, help='If two characters have more overlap than this they ' 'are considered to be on the same line. The overlap is specified ' 'relative to the minimum height of both characters.') la_param_group.add_argument( "--char-margin", "-M", type=float, default=la_params.char_margin, help="If two characters are closer together than this margin they " "are considered to be part of the same line. The margin is " "specified relative to the width of the character.") la_param_group.add_argument( "--word-margin", "-W", type=float, default=la_params.word_margin, help="If two characters on the same line are further apart than this " "margin then they are considered to be two separate words, and " "an intermediate space will be added for readability. The margin " "is specified relative to the width of the character.") la_param_group.add_argument( "--line-margin", "-L", type=float, default=la_params.line_margin, help="If two lines are close together they are considered to " "be part of the same paragraph. The margin is specified " "relative to the height of a line.") la_param_group.add_argument( "--boxes-flow", "-F", type=float_or_disabled, default=la_params.boxes_flow, help="Specifies how much a horizontal and vertical position of a " "text matters when determining the order of lines. The value " "should be within the range of -1.0 (only horizontal position " "matters) to +1.0 (only vertical position matters). You can also " "pass `disabled` to disable advanced layout analysis, and " "instead return text based on the position of the bottom left " "corner of the text box.") la_param_group.add_argument( "--all-texts", "-A", default=la_params.all_texts, action="store_true", help="If layout analysis should be performed on text in figures.") output_params = parser.add_argument_group( 'Output', description='Used during output generation.') output_params.add_argument( "--outfile", "-o", type=str, default="-", help="Path to file where output is written. " "Or \"-\" (default) to write to stdout.") output_params.add_argument( "--output_type", "-t", type=str, default="text", help="Type of output to generate {text,html,xml,tag}.") output_params.add_argument( "--codec", "-c", type=str, default="utf-8", help="Text encoding to use in output file.") output_params.add_argument( "--output-dir", "-O", default=None, help="The output directory to put extracted images in. If not given, " "images are not extracted.") output_params.add_argument( "--layoutmode", "-Y", default="normal", type=str, help="Type of layout to use when generating html " "{normal,exact,loose}. If normal,each line is" " positioned separately in the html. If exact" ", each character is positioned separately in" " the html. If loose, same result as normal " "but with an additional newline after each " "text line. Only used when output_type is html.") output_params.add_argument( "--scale", "-s", type=float, default=1.0, help="The amount of zoom to use when generating html file. " "Only used when output_type is html.") output_params.add_argument( "--strip-control", "-S", default=False, action="store_true", help="Remove control statement from text. " "Only used when output_type is xml.") parsed_args = parser.parse_args(args=args) # Propagate parsed layout parameters to LAParams object if parsed_args.no_laparams: parsed_args.laparams = None else: parsed_args.laparams = LAParams( line_overlap=parsed_args.line_overlap, char_margin=parsed_args.char_margin, line_margin=parsed_args.line_margin, word_margin=parsed_args.word_margin, boxes_flow=parsed_args.boxes_flow, detect_vertical=parsed_args.detect_vertical, all_texts=parsed_args.all_texts, ) if parsed_args.page_numbers: parsed_args.page_numbers = {x-1 for x in parsed_args.page_numbers} if parsed_args.pagenos: parsed_args.page_numbers = {int(x)-1 for x in parsed_args.pagenos.split(",")} if parsed_args.output_type == "text" and parsed_args.outfile != "-": for override, alttype in OUTPUT_TYPES: if parsed_args.outfile.endswith(override): parsed_args.output_type = alttype return parsed_args def main(args: Optional[List[str]] = None) -> int: parsed_args = parse_args(args) outfp = extract_text(**vars(parsed_args)) outfp.close() return 0 if __name__ == '__main__': sys.exit(main())
41.34375
85
0.632005
1,188
9,261
4.781145
0.239899
0.052289
0.035915
0.021127
0.250352
0.173415
0.129577
0.085563
0.056338
0.044366
0
0.002758
0.25602
9,261
223
86
41.529148
0.821626
0.022568
0
0.145078
0
0
0.341442
0.002322
0
0
0
0
0
1
0.020725
false
0.020725
0.036269
0
0.082902
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f42eca67de3f090707cbdfd6324c3cd84ee5458f
2,757
py
Python
nython/nythonize.py
agungnasik57/nython
cf499fe20f86e2685671495bd941b411fa066813
[ "MIT" ]
53
2020-02-11T15:10:23.000Z
2021-10-05T12:47:14.000Z
nython/nythonize.py
agungnasik57/nython
cf499fe20f86e2685671495bd941b411fa066813
[ "MIT" ]
null
null
null
nython/nythonize.py
agungnasik57/nython
cf499fe20f86e2685671495bd941b411fa066813
[ "MIT" ]
4
2020-02-12T07:03:06.000Z
2020-08-15T14:53:39.000Z
"""Compile Nim libraries as Python Extension Modules. If you want your namespace to coexist with your pthon code, name this ponim.nim and then your import will look like `from ponim.nim import adder` and `from ponim import subtractor`. There must be a way to smooth that out in the __init__.py file somehow. Note that the file must be in the included source code dir. Currently it is easiest to just put this in with your python code. """ from os import listdir, mkdir from os.path import join, expanduser from setuptools import Extension from shutil import copyfile, rmtree from typing import Sequence, Dict, List import subprocess import sys import pathlib # class NimLib(TypedDict): # """Wrapper around a lib name and path for nim cdoe""" # name: str # path: str def nythonize(nimbase: str, modules: Sequence[Dict[str, str]]) -> List[Extension]: """Compile a Nim library as a Python Extension Module. `nimbase` is the path to `nimbase.h` on your system, which is needed for Python to compile gene Nim generated C code. This builds a set of Extenstions, which are then passed back to setuptools. """ extensions = [] # Create a top level working dir rmtree(join("build", "nim_build"), ignore_errors=True) pathlib.Path(join("build", "nim_build")).mkdir(parents=True) for module in modules: module_dir = join("build", "nim_build", f"{module['name']}_build") rmtree(module_dir, ignore_errors=True) mkdir(module_dir) subprocess.run( [ "nim", "compileToC", "--compileOnly", "-d:release", "-d:ssl", "--app:lib", "--opt:speed", "--gc:markAndSweep", f"--nimcache:{module_dir}", module["path"], ], check=True, stderr=sys.stdout.buffer, ) copyfile( nimbase, join(module_dir, "nimbase.h"), ) sources = [] for c_source_file in listdir(module_dir): if c_source_file.endswith(".c"): sources.append(join(module_dir, c_source_file)) extensions.append( Extension( name=module["name"], sources=sources, extra_compile_args=[ "-flto", "-ffast-math", "-march=native", "-mtune=native", "-O3", "-fno-ident", "-fsingle-precision-constant", ], extra_link_args=["-s"], include_dirs=[module_dir], ) ) return extensions
31.689655
82
0.564382
321
2,757
4.757009
0.461059
0.047151
0.023576
0.033399
0
0
0
0
0
0
0
0.000545
0.334421
2,757
86
83
32.05814
0.831608
0.299601
0
0.035088
0
0
0.141728
0.037935
0
0
0
0
0
1
0.017544
false
0
0.140351
0
0.175439
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f42fc38f6dae49e6659d55730c3133cb884a1c0e
3,591
py
Python
tests/contrib/test_util.py
lixinso/pyro
ca0d6417bed3882a47cb8cbb01b36f403ee903d5
[ "MIT" ]
10
2020-03-18T14:41:25.000Z
2021-07-04T08:49:57.000Z
tests/contrib/test_util.py
lixinso/pyro
ca0d6417bed3882a47cb8cbb01b36f403ee903d5
[ "MIT" ]
19
2018-10-30T13:45:31.000Z
2019-09-27T14:16:57.000Z
tests/contrib/test_util.py
lixinso/pyro
ca0d6417bed3882a47cb8cbb01b36f403ee903d5
[ "MIT" ]
5
2020-06-21T23:40:35.000Z
2021-11-09T16:18:42.000Z
from collections import OrderedDict import pytest import torch import pyro.distributions as dist from pyro.contrib.util import ( get_indices, tensor_to_dict, rmv, rvv, lexpand, rexpand, rdiag, rtril, hessian ) from tests.common import assert_equal def test_get_indices_sizes(): sizes = OrderedDict([("a", 2), ("b", 2), ("c", 2)]) assert_equal(get_indices(["b"], sizes=sizes), torch.tensor([2, 3])) assert_equal(get_indices(["b", "c"], sizes=sizes), torch.tensor([2, 3, 4, 5])) tensors = OrderedDict([("a", torch.ones(2)), ("b", torch.ones(2)), ("c", torch.ones(2))]) assert_equal(get_indices(["b"], tensors=tensors), torch.tensor([2, 3])) assert_equal(get_indices(["b", "c"], tensors=tensors), torch.tensor([2, 3, 4, 5])) def test_tensor_to_dict(): sizes = OrderedDict([("a", 2), ("b", 2), ("c", 2)]) vector = torch.tensor([1., 2, 3, 4, 5, 6]) assert_equal(tensor_to_dict(sizes, vector), {"a": torch.tensor([1., 2.]), "b": torch.tensor([3., 4.]), "c": torch.tensor([5., 6.])}) assert_equal(tensor_to_dict(sizes, vector, subset=["b"]), {"b": torch.tensor([3., 4.])}) @pytest.mark.parametrize("A,b", [ (torch.tensor([[1., 2.], [2., -3.]]), torch.tensor([-1., 2.])) ]) def test_rmv(A, b): assert_equal(rmv(A, b), A.mv(b), prec=1e-8) batched_A = lexpand(A, 5, 4) batched_b = lexpand(b, 5, 4) expected_Ab = lexpand(A.mv(b), 5, 4) assert_equal(rmv(batched_A, batched_b), expected_Ab, prec=1e-8) @pytest.mark.parametrize("a,b", [ (torch.tensor([1., 2.]), torch.tensor([-1., 2.])) ]) def test_rvv(a, b): assert_equal(rvv(a, b), torch.dot(a, b), prec=1e-8) batched_a = lexpand(a, 5, 4) batched_b = lexpand(b, 5, 4) expected_ab = lexpand(torch.dot(a, b), 5, 4) assert_equal(rvv(batched_a, batched_b), expected_ab, prec=1e-8) def test_lexpand(): A = torch.tensor([[1., 2.], [-2., 0]]) assert_equal(lexpand(A), A, prec=1e-8) assert_equal(lexpand(A, 4), A.expand(4, 2, 2), prec=1e-8) assert_equal(lexpand(A, 4, 2), A.expand(4, 2, 2, 2), prec=1e-8) def test_rexpand(): A = torch.tensor([[1., 2.], [-2., 0]]) assert_equal(rexpand(A), A, prec=1e-8) assert_equal(rexpand(A, 4), A.unsqueeze(-1).expand(2, 2, 4), prec=1e-8) assert_equal(rexpand(A, 4, 2), A.unsqueeze(-1).unsqueeze(-1).expand(2, 2, 4, 2), prec=1e-8) def test_rtril(): A = torch.tensor([[1., 2.], [-2., 0]]) assert_equal(rtril(A), torch.tril(A), prec=1e-8) expanded = lexpand(A, 5, 4) expected = lexpand(torch.tril(A), 5, 4) assert_equal(rtril(expanded), expected, prec=1e-8) def test_rdiag(): v = torch.tensor([1., 2., -1.]) assert_equal(rdiag(v), torch.diag(v), prec=1e-8) expanded = lexpand(v, 5, 4) expeceted = lexpand(torch.diag(v), 5, 4) assert_equal(rdiag(expanded), expeceted, prec=1e-8) def test_hessian_mvn(): tmp = torch.randn(3, 10) cov = torch.matmul(tmp, tmp.t()) mvn = dist.MultivariateNormal(cov.new_zeros(3), cov) x = torch.randn(3, requires_grad=True) y = mvn.log_prob(x) assert_equal(hessian(y, x), -mvn.precision_matrix) def test_hessian_multi_variables(): x = torch.randn(3, requires_grad=True) z = torch.randn(3, requires_grad=True) y = (x ** 2 * z + z ** 3).sum() H = hessian(y, (x, z)) Hxx = (2 * z).diag() Hxz = (2 * x).diag() Hzz = (6 * z).diag() target_H = torch.cat([torch.cat([Hxx, Hxz]), torch.cat([Hxz, Hzz])], dim=1) assert_equal(H, target_H)
34.528846
95
0.588972
579
3,591
3.530225
0.158895
0.123777
0.047945
0.063601
0.55137
0.4682
0.355186
0.311644
0.23728
0.094912
0
0.050087
0.199387
3,591
103
96
34.864078
0.66087
0
0
0.1375
0
0
0.00724
0
0
0
0
0
0.2875
1
0.125
false
0
0.075
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4313551859e5b967cf0a91de7f015a788b3e06f
20,473
py
Python
Diffnet++/class/DataModule.py
mIXs222/diffnet
1f580332254a5113ed7b88b9b2e0aa467344e94d
[ "MIT" ]
null
null
null
Diffnet++/class/DataModule.py
mIXs222/diffnet
1f580332254a5113ed7b88b9b2e0aa467344e94d
[ "MIT" ]
null
null
null
Diffnet++/class/DataModule.py
mIXs222/diffnet
1f580332254a5113ed7b88b9b2e0aa467344e94d
[ "MIT" ]
null
null
null
from __future__ import division from collections import defaultdict import numpy as np from time import time import random import tensorflow.compat.v1 as tf tf.disable_v2_behavior() # import tensorflow as tf class DataModule(): def __init__(self, conf, filename): self.conf = conf self.data_dict = {} self.terminal_flag = 1 self.filename = filename self.index = 0 ####### Initalize Procedures ####### def prepareModelSupplement(self, model): data_dict = {} if 'CONSUMED_ITEMS_SPARSE_MATRIX' in model.supply_set: self.generateConsumedItemsSparseMatrix() #self.arrangePositiveData() data_dict['CONSUMED_ITEMS_INDICES_INPUT'] = self.consumed_items_indices_list data_dict['CONSUMED_ITEMS_VALUES_INPUT'] = self.consumed_items_values_list data_dict['CONSUMED_ITEMS_VALUES_WEIGHT_AVG_INPUT'] = self.consumed_items_values_weight_avg_list data_dict['CONSUMED_ITEMS_NUM_INPUT'] = self.consumed_item_num_list data_dict['CONSUMED_ITEMS_NUM_DICT_INPUT'] = self.user_item_num_dict data_dict['USER_ITEM_SPARSITY_DICT'] = self.user_item_sparsity_dict if 'SOCIAL_NEIGHBORS_SPARSE_MATRIX' in model.supply_set: self.readSocialNeighbors() self.generateSocialNeighborsSparseMatrix() data_dict['SOCIAL_NEIGHBORS_INDICES_INPUT'] = self.social_neighbors_indices_list data_dict['SOCIAL_NEIGHBORS_VALUES_INPUT'] = self.social_neighbors_values_list data_dict['SOCIAL_NEIGHBORS_VALUES_WEIGHT_AVG_INPUT'] = self.social_neighbors_values_weight_avg_list data_dict['SOCIAL_NEIGHBORS_NUM_INPUT'] = self.social_neighbor_num_list data_dict['SOCIAL_NEIGHBORS_NUM_DICT_INPUT'] = self.social_neighbors_num_dict data_dict['USER_USER_SPARSITY_DICT']= self.user_user_sparsity_dict if 'ITEM_CUSTOMER_SPARSE_MATRIX' in model.supply_set: self.generateConsumedItemsSparseMatrixForItemUser() data_dict['ITEM_CUSTOMER_INDICES_INPUT'] = self.item_customer_indices_list data_dict['ITEM_CUSTOMER_VALUES_INPUT'] = self.item_customer_values_list data_dict['ITEM_CUSTOMER_VALUES_WEIGHT_AVG_INPUT'] = self.item_customer_values_weight_avg_list data_dict['ITEM_CUSTOMER_NUM_INPUT'] = self.item_customer_num_list data_dict['ITEM_USER_NUM_DICT_INPUT'] = self.item_user_num_dict return data_dict def initializeRankingTrain(self): self.readData() self.arrangePositiveData() self.arrangePositiveDataForItemUser() self.generateTrainNegative() def initializeRankingVT(self): self.readData() self.arrangePositiveData() self.arrangePositiveDataForItemUser() self.generateTrainNegative() def initalizeRankingEva(self): self.readData() self.getEvaPositiveBatch() self.generateEvaNegative() def linkedMap(self): self.data_dict['USER_LIST'] = self.user_list self.data_dict['ITEM_LIST'] = self.item_list self.data_dict['LABEL_LIST'] = self.labels_list def linkedRankingEvaMap(self): self.data_dict['EVA_USER_LIST'] = self.eva_user_list self.data_dict['EVA_ITEM_LIST'] = self.eva_item_list ####### Data Loading ####### def readData(self): f = open(self.filename) total_user_list = set() hash_data = defaultdict(int) for _, line in enumerate(f): arr = line.split("\t") hash_data[(int(arr[0]), int(arr[1]))] = 1 total_user_list.add(int(arr[0])) self.total_user_list = list(total_user_list) self.hash_data = hash_data def arrangePositiveData(self): positive_data = defaultdict(set) user_item_num_dict = defaultdict(set) total_data = set() hash_data = self.hash_data for (u, i) in hash_data: total_data.add((u, i)) positive_data[u].add(i) user_list = sorted(list(positive_data.keys())) for u in range(self.conf.num_users): user_item_num_dict[u] = len(positive_data[u])+1 self.positive_data = positive_data self.user_item_num_dict = user_item_num_dict self.user_item_num_for_sparsity_dict = user_item_num_for_sparsity_dict self.total_data = len(total_data) def Sparsity_analysis_for_user_item_network(self): hash_data_for_user_item = self.hash_data sparisty_user_item_dict = {} def arrangePositiveDataForItemUser(self): positive_data_for_item_user = defaultdict(set) item_user_num_dict = defaultdict(set) total_data_for_item_user = set() hash_data_for_item_user = self.hash_data for (u, i) in hash_data_for_item_user: total_data_for_item_user.add((i, u)) positive_data_for_item_user[i].add(u) item_list = sorted(list(positive_data_for_item_user.keys())) for i in range(self.conf.num_items): item_user_num_dict[i] = len(positive_data_for_item_user[i])+1 self.item_user_num_dict = item_user_num_dict self.positive_data_for_item_user = positive_data_for_item_user self.total_data_for_item_user = len(total_data_for_item_user) # ---------------------- # This function designes for generating train/val/test negative def generateTrainNegative(self): num_items = self.conf.num_items num_negatives = self.conf.num_negatives negative_data = defaultdict(set) total_data = set() hash_data = self.hash_data for (u, i) in hash_data: total_data.add((u, i)) for _ in range(num_negatives): j = np.random.randint(num_items) while (u, j) in hash_data: j = np.random.randint(num_items) negative_data[u].add(j) total_data.add((u, j)) self.negative_data = negative_data self.terminal_flag = 1 # ---------------------- # This function designes for val/test set, compute loss def getVTRankingOneBatch(self): positive_data = self.positive_data negative_data = self.negative_data total_user_list = self.total_user_list user_list = [] item_list = [] labels_list = [] for u in total_user_list: user_list.extend([u] * len(positive_data[u])) item_list.extend(positive_data[u]) labels_list.extend([1] * len(positive_data[u])) user_list.extend([u] * len(negative_data[u])) item_list.extend(negative_data[u]) labels_list.extend([0] * len(negative_data[u])) self.user_list = np.reshape(user_list, [-1, 1]) self.item_list = np.reshape(item_list, [-1, 1]) self.labels_list = np.reshape(labels_list, [-1, 1]) # ---------------------- # This function designes for the training process def getTrainRankingBatch(self): positive_data = self.positive_data negative_data = self.negative_data total_user_list = self.total_user_list index = self.index batch_size = self.conf.training_batch_size user_list, item_list, labels_list = [], [], [] if index + batch_size < len(total_user_list): target_user_list = total_user_list[index:index+batch_size] self.index = index + batch_size else: target_user_list = total_user_list[index:len(total_user_list)] self.index = 0 self.terminal_flag = 0 for u in target_user_list: user_list.extend([u] * len(positive_data[u])) item_list.extend(list(positive_data[u])) labels_list.extend([1] * len(positive_data[u])) user_list.extend([u] * len(negative_data[u])) item_list.extend(list(negative_data[u])) labels_list.extend([0] * len(negative_data[u])) self.user_list = np.reshape(user_list, [-1, 1]) self.item_list = np.reshape(item_list, [-1, 1]) self.labels_list = np.reshape(labels_list, [-1, 1]) # ---------------------- # This function is designed for the positive data def getEvaPositiveBatch(self): hash_data = self.hash_data user_list = [] item_list = [] index_dict = defaultdict(list) index = 0 for (u, i) in hash_data: user_list.append(u) item_list.append(i) index_dict[u].append(index) index = index + 1 self.eva_user_list = np.reshape(user_list, [-1, 1]) self.eva_item_list = np.reshape(item_list, [-1, 1]) self.eva_index_dict = index_dict # ---------------------- #This function is designed for generating negative data def generateEvaNegative(self): hash_data = self.hash_data total_user_list = self.total_user_list num_evaluate = self.conf.num_evaluate num_items = self.conf.num_items eva_negative_data = defaultdict(list) for u in total_user_list: for _ in range(num_evaluate): j = np.random.randint(num_items) while (u, j) in hash_data: j = np.random.randint(num_items) eva_negative_data[u].append(j) self.eva_negative_data = eva_negative_data # ---------------------- #This function designs for generating negative batch in rating evaluation, def getEvaRankingBatch(self): batch_size = self.conf.evaluate_batch_size num_evaluate = self.conf.num_evaluate eva_negative_data = self.eva_negative_data total_user_list = self.total_user_list index = self.index terminal_flag = 1 total_users = len(total_user_list) user_list = [] item_list = [] if index + batch_size < total_users: batch_user_list = total_user_list[index:index+batch_size] self.index = index + batch_size else: terminal_flag = 0 batch_user_list = total_user_list[index:total_users] self.index = 0 for u in batch_user_list: user_list.extend([u]*num_evaluate) item_list.extend(eva_negative_data[u]) self.eva_user_list = np.reshape(user_list, [-1, 1]) self.eva_item_list = np.reshape(item_list, [-1, 1]) return batch_user_list, terminal_flag # ---------------------- # Read social network information def readSocialNeighbors(self, friends_flag=1): social_neighbors = defaultdict(set) social_neighbors_num_dict = defaultdict(set) links_file = open(self.conf.links_filename) for _, line in enumerate(links_file): tmp = line.split('\t') u1, u2 = int(tmp[0]), int(tmp[1]) social_neighbors[u1].add(u2) if friends_flag == 1: social_neighbors[u2].add(u1) user_list = sorted(list(social_neighbors.keys())) for u in range(self.conf.num_users): social_neighbors_num_dict[u] = len(social_neighbors[u])+1 self.social_neighbors_num_dict = social_neighbors_num_dict self.social_neighbors = social_neighbors def arrangePositiveData(self): positive_data = defaultdict(set) user_item_num_dict = defaultdict(set) total_data = set() hash_data = self.hash_data for (u, i) in hash_data: total_data.add((u, i)) positive_data[u].add(i) user_list = sorted(list(positive_data.keys())) for u in range(self.conf.num_users): user_item_num_dict[u] = len(positive_data[u])+1 self.positive_data = positive_data self.user_item_num_dict = user_item_num_dict self.total_data = len(total_data) # ---------------------- #Generate Social Neighbors Sparse Matrix Indices and Values def generateSocialNeighborsSparseMatrix(self): social_neighbors = self.social_neighbors social_neighbors_num_dict = self.social_neighbors_num_dict #weight avg social_neighbors_indices_list = [] social_neighbors_values_list = [] social_neighbors_values_weight_avg_list = [] social_neighbor_num_list = [] social_neighbors_dict = defaultdict(list) user_user_num_for_sparsity_dict = defaultdict(set) user_user_sparsity_dict = {} user_user_sparsity_dict['0-4'] = [] user_user_sparsity_dict['4-8'] = [] user_user_sparsity_dict['8-16'] = [] user_user_sparsity_dict['16-32'] = [] user_user_sparsity_dict['32-64'] = [] user_user_sparsity_dict['64-'] = [] for u in range(self.conf.num_users): user_user_num_for_sparsity_dict[u] = len(social_neighbors[u]) for u in social_neighbors: social_neighbors_dict[u] = sorted(social_neighbors[u]) user_list = sorted(list(social_neighbors.keys())) #node att for user in range(self.conf.num_users): if user in social_neighbors_dict: social_neighbor_num_list.append(len(social_neighbors_dict[user])) else: social_neighbor_num_list.append(1) for user in user_list: for friend in social_neighbors_dict[user]: social_neighbors_indices_list.append([user, friend]) social_neighbors_values_list.append(1.0/len(social_neighbors_dict[user])) social_neighbors_values_weight_avg_list.append(1.0/(np.sqrt(social_neighbors_num_dict[user])*np.sqrt(social_neighbors_num_dict[friend]))) #weight avg for u in range(self.conf.num_users): cur_user_neighbors_num = user_user_num_for_sparsity_dict[u] if( (cur_user_neighbors_num >=0) & (cur_user_neighbors_num<4) ): user_user_sparsity_dict['0-4'].append(u) elif( (cur_user_neighbors_num >=4) & (cur_user_neighbors_num<8) ): user_user_sparsity_dict['4-8'].append(u) elif( (cur_user_neighbors_num >=8) & (cur_user_neighbors_num<16) ): user_user_sparsity_dict['8-16'].append(u) elif( (cur_user_neighbors_num >=16) & (cur_user_neighbors_num<32) ): user_user_sparsity_dict['16-32'].append(u) elif( (cur_user_neighbors_num >=32) & (cur_user_neighbors_num<64) ): user_user_sparsity_dict['32-64'].append(u) elif( cur_user_neighbors_num >=64): user_user_sparsity_dict['64-'].append(u) self.user_user_sparsity_dict = user_user_sparsity_dict self.social_neighbors_indices_list = np.array(social_neighbors_indices_list).astype(np.int64) self.social_neighbors_values_list = np.array(social_neighbors_values_list).astype(np.float32) self.social_neighbors_values_weight_avg_list = np.array(social_neighbors_values_weight_avg_list).astype(np.float32) # weight avg self.social_neighbor_num_list = np.array(social_neighbor_num_list).astype(np.int64) #self.social_neighbors_values_list = tf.Variable(tf.random_normal([len(self.social_neighbors_indices_list)], stddev=0.01)) # ---------------------- #Generate Consumed Items Sparse Matrix Indices and Values def generateConsumedItemsSparseMatrix(self): positive_data = self.positive_data consumed_items_indices_list = [] consumed_items_values_list = [] consumed_items_values_weight_avg_list = [] consumed_item_num_list = [] consumed_items_dict = defaultdict(list) user_item_num_for_sparsity_dict = defaultdict(set) user_item_sparsity_dict = {} user_item_sparsity_dict['0-4'] = [] user_item_sparsity_dict['4-8'] = [] user_item_sparsity_dict['8-16'] = [] user_item_sparsity_dict['16-32'] = [] user_item_sparsity_dict['32-64'] = [] user_item_sparsity_dict['64-'] = [] consumed_items_num_dict = self.user_item_num_dict #weight avg #social_neighbors_num_dict = self.social_neighbors_num_dict #weight avg item_user_num_dict = self.item_user_num_dict #weight avg for u in positive_data: consumed_items_dict[u] = sorted(positive_data[u]) user_list = sorted(list(positive_data.keys())) for u in range(self.conf.num_users): user_item_num_for_sparsity_dict[u] = len(positive_data[u]) for user in range(self.conf.num_users): if user in consumed_items_dict: consumed_item_num_list.append(len(consumed_items_dict[user])) else: consumed_item_num_list.append(1) for u in user_list: for i in consumed_items_dict[u]: consumed_items_indices_list.append([u, i]) consumed_items_values_list.append(1.0/len(consumed_items_dict[u])) consumed_items_values_weight_avg_list.append(1.0/( np.sqrt(consumed_items_num_dict[u]) * np.sqrt(item_user_num_dict[i]) )) #weight avg for u in range(self.conf.num_users): cur_user_consumed_item_num = user_item_num_for_sparsity_dict[u] if( (cur_user_consumed_item_num >=0) & (cur_user_consumed_item_num<4) ): user_item_sparsity_dict['0-4'].append(u) elif( (cur_user_consumed_item_num >=4) & (cur_user_consumed_item_num<8) ): user_item_sparsity_dict['4-8'].append(u) elif( (cur_user_consumed_item_num >=8) & (cur_user_consumed_item_num<16) ): user_item_sparsity_dict['8-16'].append(u) elif( (cur_user_consumed_item_num >=16) & (cur_user_consumed_item_num<32) ): user_item_sparsity_dict['16-32'].append(u) elif( (cur_user_consumed_item_num >=32) & (cur_user_consumed_item_num<64) ): user_item_sparsity_dict['32-64'].append(u) elif( cur_user_consumed_item_num >=64): user_item_sparsity_dict['64-'].append(u) self.user_item_sparsity_dict = user_item_sparsity_dict self.consumed_items_indices_list = np.array(consumed_items_indices_list).astype(np.int64) self.consumed_items_values_list = np.array(consumed_items_values_list).astype(np.float32) self.consumed_items_values_weight_avg_list = np.array(consumed_items_values_weight_avg_list).astype(np.float32) #weight avg self.consumed_item_num_list = np.array(consumed_item_num_list).astype(np.int64) def generateConsumedItemsSparseMatrixForItemUser(self): positive_data_for_item_user = self.positive_data_for_item_user item_customer_indices_list = [] item_customer_values_list = [] item_customer_values_weight_avg_list = [] item_customer_num_list = [] item_customer_dict = defaultdict(list) consumed_items_num_dict = self.user_item_num_dict #weight avg #social_neighbors_num_dict = self.social_neighbors_num_dict #weight avg item_user_num_dict = self.item_user_num_dict #weight avg for i in positive_data_for_item_user: item_customer_dict[i] = sorted(positive_data_for_item_user[i]) item_list = sorted(list(positive_data_for_item_user.keys())) for item in range(self.conf.num_items): if item in item_customer_dict: item_customer_num_list.append(len(item_customer_dict[item])) else: item_customer_num_list.append(1) for i in item_list: for u in item_customer_dict[i]: item_customer_indices_list.append([i, u]) item_customer_values_list.append(1.0/len(item_customer_dict[i])) item_customer_values_weight_avg_list.append(1.0/( np.sqrt(consumed_items_num_dict[u]) * np.sqrt(item_user_num_dict[i]) )) self.item_customer_indices_list = np.array(item_customer_indices_list).astype(np.int64) self.item_customer_values_list = np.array(item_customer_values_list).astype(np.float32) self.item_customer_num_list = np.array(item_customer_num_list).astype(np.int64) self.item_customer_values_weight_avg_list = np.array(item_customer_values_weight_avg_list).astype(np.float32)
43.652452
166
0.652176
2,654
20,473
4.623587
0.061417
0.037161
0.022248
0.027708
0.708092
0.588705
0.413414
0.3395
0.30788
0.259637
0
0.012846
0.247155
20,473
468
167
43.745727
0.7833
0.05583
0
0.324251
0
0
0.037377
0.02959
0
0
0
0
0
1
0.059946
false
0
0.016349
0
0.084469
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4315741709ca1828a0cd87b2111a7735ecd6a23
2,656
py
Python
src/models/VanillaTransformer.py
iosurodri/annotated-transformer
e5a7e27067d08c09f51b57bbf2824fbcd80ae4d9
[ "MIT" ]
null
null
null
src/models/VanillaTransformer.py
iosurodri/annotated-transformer
e5a7e27067d08c09f51b57bbf2824fbcd80ae4d9
[ "MIT" ]
null
null
null
src/models/VanillaTransformer.py
iosurodri/annotated-transformer
e5a7e27067d08c09f51b57bbf2824fbcd80ae4d9
[ "MIT" ]
null
null
null
from xmlrpc.server import MultiPathXMLRPCServer import torch.nn as nn import torch.nn.functional as F import copy from src.layers.layers import Encoder, EncoderLayer, Decoder, DecoderLayer, PositionwiseFeedForward from src.layers.preprocessing import Embeddings, PositionalEncoding from src.layers.attention import MultiHeadedAttention ### Generic EncoderDecoder structure: class EncoderDecoder(nn.Module): """ A standard Encoder-Decoder architecture. Base for this and many other models. """ def __init__(self, encoder, decoder, src_embed, tgt_embed, generator): super(EncoderDecoder, self).__init__() self.encoder = encoder self.decoder = decoder self.src_embed = src_embed self.tgt_embed = tgt_embed self.generator = generator def forward(self, src, tgt, src_mask, tgt_mask): "Take in and process masked src and target sequences." encoded_src = self.encode(src, src_mask) return self.decode(encoded_src, src_mask, tgt, tgt_mask) def encode(self, src, src_mask): embedded_src = self.src_embed(src) return self.encoder(embedded_src, src_mask) def decode(self, memory, src_mask, tgt, tgt_mask): embedded_tgt = self.tgt_embed(tgt) return self.decoder(embedded_tgt, memory, src_mask, tgt_mask) class Generator(nn.Module): "Define standard linear + softmax generation step." def __init__(self, d_model, vocab): super(Generator, self).__init__() self.proj = nn.Linear(d_model, vocab) def forward(self, x): return F.log_softmax(self.proj(x), dim=-1) def make_model(src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1, alpha=0.5): "Helper: Construct a model from hyperparameters." c = copy.deepcopy attn = MultiHeadedAttention(h, d_model, alpha=alpha) ff = PositionwiseFeedForward(d_model, d_ff, dropout) position = PositionalEncoding(d_model, dropout) model = EncoderDecoder( Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N), Decoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N), nn.Sequential(Embeddings(d_model, src_vocab), c(position)), nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)), Generator(d_model, tgt_vocab) ) # This was important from their code. # Initialize parameters with Glorot / fan_avg. for p in model.parameters(): if p.dim() > 1: nn.init.xavier_uniform(p) return model if __name__ == '__main__': # Small example model tmp_model = make_model(10, 10, 2) print(tmp_model)
35.413333
99
0.6875
354
2,656
4.949153
0.322034
0.037671
0.022831
0.017123
0.079338
0.018265
0
0
0
0
0
0.009542
0.210843
2,656
74
100
35.891892
0.826336
0.137425
0
0
0
0
0.064543
0
0
0
0
0
0
1
0.134615
false
0
0.134615
0.019231
0.403846
0.019231
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f43380760e72e46d79cbcf3d20f37e8eb8257947
3,215
py
Python
hermetrics/damerau_levenshtein.py
SoldAI/hermetrics
5e07a4f40376779015ef2f5b964d7ac060ed6e25
[ "MIT" ]
3
2020-01-18T02:37:49.000Z
2022-01-27T19:24:15.000Z
hermetrics/damerau_levenshtein.py
SoldAI/hermetrics
5e07a4f40376779015ef2f5b964d7ac060ed6e25
[ "MIT" ]
null
null
null
hermetrics/damerau_levenshtein.py
SoldAI/hermetrics
5e07a4f40376779015ef2f5b964d7ac060ed6e25
[ "MIT" ]
2
2020-01-26T20:40:19.000Z
2021-08-11T12:05:01.000Z
from .levenshtein import Levenshtein class DamerauLevenshtein(Levenshtein): def __init__(self, name='Damerau-Levenshtein'): super().__init__(name=name) def distance(self, source, target, cost=(1, 1, 1, 1)): """Damerau-Levenshtein distance with costs for deletion, insertion, substitution and transposition""" s_len = len(source) t_len = len(target) if type(cost) == int or type(cost) == float: del_cost = ins_cost = sub_cost = tra_cost = cost else: del_cost, ins_cost, sub_cost, tra_cost = cost # Be sure to exceed maximum value #INF = float('inf') UPPER = max(del_cost, ins_cost, sub_cost, tra_cost) * (s_len + t_len) # Initialize matrix (s_len + 2) X (t_len + 2) D = [[UPPER for j in range(t_len + 2)]] D += [[UPPER] + [j*ins_cost for j in range(t_len + 1)]] D += [[UPPER, i] + [0]*t_len for i in range(1, s_len + 1)] # Holds last row each element was encountered last_row = {} for i in range(1, s_len + 1): # Current symbol in source s_symbol = source[i-1] # Column of lasta match on this row last_match_col = 0 for j in range(1, t_len + 1): # Current symbol in target t_symbol = target[j-1] # Last row with matching character last_match_row = last_row.get(t_symbol, 0) # Cost of substitution opt_sub_cost = 0 if s_symbol == t_symbol else sub_cost # Compute different options deletion = D[i][j+1] + del_cost insertion = D[i+1][j] + ins_cost substitution = D[i][j] + opt_sub_cost # Cost before transposition # + cost of operations between transposed letters # + cost of transposition # transposition = D[last_match_row][last_match_col] + \ # (i-last_match_row-1) * del_cost + \ # (j-last_match_col-1) * ins_cost + \ # tra_cost transposition = D[last_match_row][last_match_col] + \ max((i-last_match_row-1) * del_cost, \ (j-last_match_col-1) * ins_cost) + tra_cost D[i+1][j+1] = min(deletion, insertion, substitution, transposition) if opt_sub_cost == 0: last_match_col = j last_row[s_symbol] = i return D[-1][-1] def max_distance(self, source, target, cost=(1,1,1,1)): """Damerau-Levenshtein maximum distance value (same as Levenshtein to account for difference in operations)""" if type(cost) == int or type(cost) == float: lev_cost = cost else: lev_cost = cost[:3] return super().max_distance(source, target, lev_cost) if(__name__ == '__main__'): print("Damerau-Levenshtein distance")
40.696203
118
0.51353
392
3,215
3.977041
0.239796
0.063502
0.046183
0.02694
0.345093
0.311738
0.292495
0.292495
0.168056
0.127004
0
0.018339
0.389425
3,215
78
119
41.217949
0.775853
0.257543
0
0.095238
0
0
0.023305
0
0
0
0
0
0
1
0.071429
false
0
0.02381
0
0.166667
0.02381
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f434676fc528e9c88694b6e2adf610fc78d5e377
13,130
py
Python
etna/analysis/outliers/hist_outliers.py
Carlosbogo/etna
b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94
[ "Apache-2.0" ]
1
2021-11-11T21:18:42.000Z
2021-11-11T21:18:42.000Z
etna/analysis/outliers/hist_outliers.py
Carlosbogo/etna
b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94
[ "Apache-2.0" ]
null
null
null
etna/analysis/outliers/hist_outliers.py
Carlosbogo/etna
b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94
[ "Apache-2.0" ]
null
null
null
import typing from copy import deepcopy from typing import TYPE_CHECKING from typing import List import numba import numpy as np import pandas as pd if TYPE_CHECKING: from etna.datasets import TSDataset @numba.jit(nopython=True) def optimal_sse(left: int, right: int, p: np.ndarray, pp: np.ndarray) -> float: """ Count the approximation error by 1 bin from left to right elements. Parameters ---------- left: left border right: right border p: array of sums of elements, p[i] - sum from first to i elements pp: array of sums of squares of elements, p[i] - sum of squares from first to i elements Returns ------- result: float approximation error """ if left == 0: avg = p[right] return pp[right] - avg ** 2 / (right - left + 1) avg = p[right] - p[left - 1] return pp[right] - pp[left - 1] - avg ** 2 / (right - left + 1) @numba.jit(nopython=True) def adjust_estimation(i: int, k: int, sse: np.ndarray, sse_one_bin: np.ndarray) -> float: """ Count sse_one_bin[i][k] using binary search. Parameters ---------- i: left border of series k: number of bins sse: array of approximation errors sse_one_bin: array of approximation errors with one bin Returns ------- result: float calculated sse_one_bin[i][k] """ now_evaluated = sse[i - 1][k - 1] first_evaluated = sse[i - 1][k - 1] idx_prev = np.inf idx_now = 0 left = 0 while idx_now != idx_prev: right = i idx_prev = idx_now while right - left > 1: if sse_one_bin[(left + right) // 2][i] > now_evaluated: left = (left + right) // 2 else: right = (left + right) // 2 idx_now = left now_evaluated = first_evaluated - sse[idx_now][k - 1] now_min = np.inf for j in range(idx_now, i): now = sse[j][k - 1] + sse_one_bin[j + 1][i] now_min = min(now_min, now) return now_min @numba.jit(nopython=True) def v_optimal_hist(series: np.ndarray, bins_number: int, p: np.ndarray, pp: np.ndarray) -> np.ndarray: """ Count an approximation error of a series with [1, bins_number] bins. http://www.vldb.org/conf/1998/p275.pdf Parameters ---------- series: array to count an approximation error with bins_number bins bins_number: number of bins p: array of sums of elements, p[i] - sum from 0th to i elements pp: array of sums of squares of elements, p[i] - sum of squares from 0th to i elements Returns ------- error: np.ndarray approximation error of a series with [1, bins_number] bins """ sse = np.zeros((len(series), bins_number)) for i in range(len(series)): sse[i][0] = optimal_sse(0, i, p, pp) sse_one_bin = np.zeros((len(series), len(series))) for i in range(len(series)): for j in range(i, len(series)): sse_one_bin[i][j] = optimal_sse(i, j, p, pp) for tmp_bins_number in range(1, bins_number): for i in range(tmp_bins_number, len(series)): sse[i][tmp_bins_number] = adjust_estimation(i, tmp_bins_number, sse, sse_one_bin) return sse def compute_f(series: np.ndarray, k: int, p: np.ndarray, pp: np.ndarray) -> np.ndarray: """ Compute F. F[a][b][k] - minimum approximation error on series[a:b+1] with k outliers. http://www.vldb.org/conf/1999/P9.pdf Parameters ---------- series: array to count F k: number of outliers p: array of sums of elements, p[i] - sum from 0th to i elements pp: array of sums of squares of elements, p[i] - sum of squares from 0th to i elements Returns ------- result: np.ndarray array F, outliers_indices """ f = np.zeros((len(series), len(series), k + 1)) s: list = [[[[] for i in range(k + 1)] for j in range(len(series))] for s in range(len(series))] ss: list = [[[[] for i in range(k + 1)] for j in range(len(series))] for s in range(len(series))] outliers_indices: list = [[[[] for i in range(k + 1)] for j in range(len(series))] for s in range(len(series))] for right_border in range(0, len(series)): f[0][right_border][0] = optimal_sse(0, right_border, p, pp) s[0][right_border][0] = [p[right_border]] ss[0][right_border][0] = [pp[right_border]] for left_border in range(1, len(series)): for right_border in range(left_border, len(series)): f[left_border][right_border][0] = optimal_sse(left_border, right_border, p, pp) s[left_border][right_border][0] = [p[right_border] - p[left_border - 1]] ss[left_border][right_border][0] = [pp[right_border] - pp[left_border - 1]] for left_border in range(0, len(series)): for right_border in range(left_border, min(len(series), left_border + k)): s[left_border][right_border][right_border - left_border + 1] = [0] ss[left_border][right_border][right_border - left_border + 1] = [0] outliers_indices[left_border][right_border][right_border - left_border + 1] = [ list(np.arange(left_border, right_border + 1)) ] for left_border in range(len(series)): for right_border in range(left_border + 1, len(series)): for outlier_number in range(1, min(right_border - left_border + 1, k + 1)): f1 = f[left_border][right_border - 1][outlier_number - 1] tmp_ss = [] tmp_s = [] f2 = [] now_min = np.inf now_outliers_indices = [] where = 0 for i in range(len(ss[left_border][right_border - 1][outlier_number])): tmp_ss.append(ss[left_border][right_border - 1][outlier_number][i] + series[right_border] ** 2) tmp_s.append(s[left_border][right_border - 1][outlier_number][i] + series[right_border]) now_outliers_indices.append( deepcopy(outliers_indices[left_border][right_border - 1][outlier_number][i]) ) f2.append(tmp_ss[-1] - tmp_s[-1] ** 2 / (right_border - left_border + 1 - outlier_number)) if f2[-1] < now_min: now_min = f2[-1] where = i if f1 < now_min: f[left_border][right_border][outlier_number] = f1 s[left_border][right_border][outlier_number] = deepcopy( s[left_border][right_border - 1][outlier_number - 1] ) ss[left_border][right_border][outlier_number] = deepcopy( ss[left_border][right_border - 1][outlier_number - 1] ) outliers_indices[left_border][right_border][outlier_number] = deepcopy( outliers_indices[left_border][right_border - 1][outlier_number - 1] ) if len(outliers_indices[left_border][right_border][outlier_number]): for i in range(len(outliers_indices[left_border][right_border][outlier_number])): outliers_indices[left_border][right_border][outlier_number][i].append(right_border) else: outliers_indices[left_border][right_border][outlier_number].append([right_border]) elif f1 > now_min: f[left_border][right_border][outlier_number] = f2[where] s[left_border][right_border][outlier_number] = tmp_s ss[left_border][right_border][outlier_number] = tmp_ss outliers_indices[left_border][right_border][outlier_number] = now_outliers_indices else: f[left_border][right_border][outlier_number] = f1 tmp_s.extend(s[left_border][right_border - 1][outlier_number - 1]) tmp_ss.extend(ss[left_border][right_border - 1][outlier_number - 1]) s[left_border][right_border][outlier_number] = tmp_s ss[left_border][right_border][outlier_number] = tmp_ss tmp = deepcopy(outliers_indices[left_border][right_border - 1][outlier_number - 1]) if len(tmp): for i in range(len(tmp)): tmp[i].append(right_border) else: tmp = [[right_border]] outliers_indices[left_border][right_border][outlier_number].extend(now_outliers_indices) outliers_indices[left_border][right_border][outlier_number].extend(deepcopy(tmp)) return f, outliers_indices def hist(series: np.ndarray, bins_number: int) -> np.ndarray: """ Compute outliers indices according to hist rule. http://www.vldb.org/conf/1999/P9.pdf Parameters ---------- series: array to count F bins_number: number of bins Returns ------- indices: np.ndarray outliers indices """ approximation_error = np.zeros((len(series), bins_number + 1, bins_number)) anomalies: list = [[[[] for i in range(bins_number)] for j in range(bins_number + 1)] for s in range(len(series))] p, pp = np.empty_like(series), np.empty_like(series) p[0] = series[0] pp[0] = series[0] ** 2 for i in range(1, len(series)): p[i] = p[i - 1] + series[i] pp[i] = pp[i - 1] + series[i] ** 2 f, outliers_indices = compute_f(series, bins_number - 1, p, pp) approximation_error[:, 1:, 0] = v_optimal_hist(series, bins_number, p, pp) approximation_error[:, 1, :] = f[0] for right_border in range(len(series)): for outlier_number in range(1, bins_number): if len(outliers_indices[0][right_border][outlier_number]): anomalies[right_border][1][outlier_number] = deepcopy( outliers_indices[0][right_border][outlier_number][0] ) for right_border in range(1, len(series)): for tmp_bins_number in range(2, min(bins_number + 1, right_border + 2)): for outlier_number in range(1, min(bins_number, right_border + 2 - tmp_bins_number)): # см формулу выше tmp_approximation_error = approximation_error[:right_border, tmp_bins_number - 1, : outlier_number + 1] tmp_f = f[1 : right_border + 1, right_border, : outlier_number + 1][:, ::-1] approximation_error[right_border][tmp_bins_number][outlier_number] = np.min( tmp_approximation_error + tmp_f ) where = np.where( tmp_approximation_error + tmp_f == approximation_error[right_border][tmp_bins_number][outlier_number] ) if where[1][0] != outlier_number: anomalies[right_border][tmp_bins_number][outlier_number].extend( deepcopy(outliers_indices[1 + where[0][0]][right_border][outlier_number - where[1][0]][0]) ) anomalies[right_border][tmp_bins_number][outlier_number].extend( deepcopy(anomalies[where[0][0]][tmp_bins_number - 1][where[1][0]]) ) count = 0 now_min = approximation_error[-1][-1][0] for outlier_number in range(1, min(approximation_error.shape[1], approximation_error.shape[2])): if approximation_error[-1][approximation_error.shape[1] - 1 - outlier_number][outlier_number] <= now_min: count = outlier_number now_min = approximation_error[-1][approximation_error.shape[1] - 1 - outlier_number][outlier_number] return np.array(sorted(anomalies[-1][approximation_error.shape[1] - 1 - count][count])) def get_anomalies_hist( ts: "TSDataset", in_column: str = "target", bins_number: int = 10 ) -> typing.Dict[str, List[pd.Timestamp]]: """ Get point outliers in time series using histogram model. Outliers are all points that, when removed, result in a histogram with a lower approximation error, even with the number of bins less than the number of outliers. Parameters ---------- ts: TSDataset with timeseries data in_column: name of the column in which the anomaly is searching bins_number: number of bins Returns ------- dict of outliers: typing.Dict[str, typing.List[pd.Timestamp]] dict of outliers in format {segment: [outliers_timestamps]} """ outliers_per_segment = {} segments = ts.segments for seg in segments: segment_df = ts.df[seg].reset_index() values = segment_df[in_column].values timestamp = segment_df["timestamp"].values anomalies = hist(values, bins_number) outliers_per_segment[seg] = [timestamp[i] for i in anomalies] return outliers_per_segment
39.667674
119
0.591394
1,763
13,130
4.201929
0.093023
0.111366
0.089498
0.102052
0.593413
0.49973
0.423731
0.364606
0.31844
0.225027
0
0.019022
0.291318
13,130
330
120
39.787879
0.777109
0.176999
0
0.11413
0
0
0.002311
0
0
0
0
0
0
1
0.032609
false
0
0.043478
0
0.11413
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f43505730d577b131a0ebe06e14640a6a2175f31
2,094
py
Python
aws/securityGroup.py
emanueleleyland/sabd-project2
387b33443b87e78635d8d6c9a03faadbc90ae9da
[ "BSD-2-Clause" ]
null
null
null
aws/securityGroup.py
emanueleleyland/sabd-project2
387b33443b87e78635d8d6c9a03faadbc90ae9da
[ "BSD-2-Clause" ]
null
null
null
aws/securityGroup.py
emanueleleyland/sabd-project2
387b33443b87e78635d8d6c9a03faadbc90ae9da
[ "BSD-2-Clause" ]
null
null
null
def createKafkaSecurityGroup(ec2, vpc): sec_group_kafka = ec2.create_security_group( GroupName='kafka', Description='kafka sec group', VpcId=vpc.id) sec_group_kafka.authorize_ingress( IpPermissions=[{'IpProtocol': 'icmp', 'FromPort': -1, 'ToPort': -1, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}, {'IpProtocol': 'tcp', 'FromPort': 22, 'ToPort': 22, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}, {'IpProtocol': 'tcp', 'FromPort': 9092, 'ToPort': 9092, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}] ) print(sec_group_kafka.id) return sec_group_kafka def createZookeeperSecurityGroup(ec2, vpc): sec_group_zookeeper = ec2.create_security_group( GroupName='zookeeper', Description='zookeeper', VpcId=vpc.id) sec_group_zookeeper.authorize_ingress( IpPermissions=[{'IpProtocol': 'icmp', 'FromPort': -1, 'ToPort': -1, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}, {'IpProtocol': 'tcp', 'FromPort': 22, 'ToPort': 22, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}, {'IpProtocol': 'tcp', 'FromPort': 2181, 'ToPort': 2181, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}, {'IpProtocol': 'tcp', 'FromPort': 2888, 'ToPort': 2888, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}, {'IpProtocol': 'tcp', 'FromPort': 3888, 'ToPort': 3888, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}] ) print(sec_group_zookeeper.id) return sec_group_zookeeper def create_redis_security_group(ec2, vpc): sec_group_redis = ec2.create_security_group( GroupName='redis', Description='redis', VpcId=vpc.id) sec_group_redis.authorize_ingress( IpPermissions=[{'IpProtocol': 'icmp', 'FromPort': -1, 'ToPort': -1, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}, {'IpProtocol': 'tcp', 'FromPort': 22, 'ToPort': 22, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}, {'IpProtocol': 'tcp', 'FromPort': 6379, 'ToPort': 6379, 'IpRanges': [{'CidrIp': '0.0.0.0/0'}]}] ) print(sec_group_redis.id) return sec_group_redis
53.692308
118
0.575931
247
2,094
4.736842
0.145749
0.075214
0.084615
0.075214
0.65812
0.532479
0.532479
0.532479
0.532479
0.532479
0
0.07199
0.210602
2,094
39
119
53.692308
0.635814
0
0
0.1875
0
0
0.286874
0
0
0
0
0
0
1
0.09375
false
0
0
0
0.1875
0.09375
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f43612b155ef29350dd3f083a77ca91ae4d8fa46
7,537
py
Python
inconnu/character/update/parse.py
tiltowait/inconnu
6cca5fed520899d159537701b695c94222d8dc45
[ "MIT" ]
4
2021-09-06T20:18:13.000Z
2022-02-05T17:08:44.000Z
inconnu/character/update/parse.py
tiltowait/inconnu
6cca5fed520899d159537701b695c94222d8dc45
[ "MIT" ]
7
2021-09-13T00:46:57.000Z
2022-01-11T06:38:50.000Z
inconnu/character/update/parse.py
tiltowait/inconnu
6cca5fed520899d159537701b695c94222d8dc45
[ "MIT" ]
2
2021-11-27T22:24:53.000Z
2022-03-16T21:05:00.000Z
"""character/update/parse.py - Defines an interface for updating character traits.""" # pylint: disable=too-many-arguments import re import discord from discord_ui.components import LinkButton from . import paramupdate from ..display import display from ... import common, constants from ...log import Log from ...vchar import VChar __MATCHES = {} __KEYS = { "name": "The character's name", "health": "The character's max Health", "willpower": "The character's max Willpower", "humanity": "The character's Humanity", "splat": "The type of character: `vampire`, `mortal`, or `ghoul`", "sh": "+/- Superficial Health damage", "ah": "+/- Aggravated Health damage", "sw": "+/- Superficial Willpower damage", "aw": "+/- Aggravated Willpower damage", "stains": "+/- Stains", "unspent_xp": "+/- Unspent XP", "lifetime_xp": "+/- Total Lifetime XP", "hunger": "+/- The character's Hunger", "potency": "+/- The character's Blood Potency" } __HELP_URL = "https://www.inconnu-bot.com/#/character-tracking?id=tracker-updates" async def update( ctx, parameters: str, character=None, color=None, update_message=None, player=None ): """ Process the user's arguments. Allow the user to omit a character if they have only one. """ args = re.sub(r":", r"=", parameters) # Some people think colons work ... args = re.sub(r"(\w)\s*([+-])\s*(\w)", r"\g<1>=\g<2>\g<3>", args) # Stop the sh+3 madness args = re.sub(r"\s*([+-])\s*=\s*", r"=\g<1>", args) # Let +/-= work, for the CS nerds args = re.sub(r"\s*=\s*([+-])\s*", r"=\g<1>", args) # Remove gaps between keys and values args = list(args.split()) # To allow element removal if len(args) == 0: await update_help(ctx) return try: owner = await common.player_lookup(ctx, player) tip = f"`/character update` `parameters:{parameters}` `character:CHARACTER`" character = await common.fetch_character(ctx, character, tip, __HELP_URL, owner=owner) parameters = __parse_arguments(*args) updates = [] for parameter, new_value in parameters.items(): update_msg = __update_character(character, parameter, new_value) updates.append(update_msg) Log.log("update", user=ctx.author.id, guild=ctx.guild.id, charid=character.id, syntax=" ".join(args) ) # Ignore generated output if we got a custom message if update_message is None: update_message = "\n".join(updates) await display(ctx, character, color=color, owner=player, message=update_message) except (SyntaxError, ValueError) as err: Log.log("update_error", user=ctx.author.id, guild=ctx.guild.id, charid=character.id, syntax=" ".join(args) ) await update_help(ctx, err) except LookupError as err: await common.present_error(ctx, err, help_url=__HELP_URL) except common.FetchError: pass def __parse_arguments(*arguments): """ Parse the user's arguments. Raises ValueErrors and KeyErrors on exceptions. """ if len(arguments) == 0: raise ValueError("You must supply some parameters!") parameters = {} for argument in arguments: split = argument.split("=") key = split[0].lower() if len(split) != 2: err = "Parameters must be in `key = value` pairs." if key not in __KEYS: err += f" Also, `{key}` is not a valid option." raise SyntaxError(err) if key in parameters: raise ValueError(f"You cannot use `{key}` more than once.") if key not in __MATCHES: raise ValueError(f"Unknown parameter: `{key}`.") key = __MATCHES[key] # Get the canonical key value = split[1] if len(value) == 0: raise ValueError(f"No value given for `{key}`.") parameters[key] = value # Don't do any validation here return parameters def __update_character(character: VChar, param: str, value: str) -> str: """ Update one of a character's parameters. Args: character (VChar): The character being updated param (str): The parameter to update value (str): The parameter's new value Raises ValueError if the parameter's value is invalid. """ return getattr(paramupdate, f"update_{param}")(character, value) async def update_help(ctx, err=None, hidden=True): """Display a help message that details the available keys.""" embed = discord.Embed( title="Character Tracking", ) embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.display_avatar) if err is not None: embed.add_field(name="Error", value=str(err), inline=False) inst = "To update a character, use `/character update` with one or more `KEY=VALUE` pairs." embed.add_field(name="Instructions", value=inst, inline=False) parameters = [f"***{key}:*** {val}" for key, val in __KEYS.items()] parameters = "\n".join(parameters) embed.add_field(name="Keys", value=parameters, inline=False) embed.add_field( name="Example", value="Character takes 4 Superficial Health damage:```/character update parameters:sh+4```" ) embed.set_footer(text="You may modify more than one tracker at a time.") documentation = LinkButton( "http://www.inconnu-bot.com/#/character-tracking?id=tracker-updates", label="Full Documentation" ) support = LinkButton(constants.SUPPORT_URL, "Support") await ctx.respond(embed=embed, components=[documentation, support], hidden=hidden) # We do flexible matching for the keys. Many of these are the same as RoD's # keys, while others have been observed in syntax error logs. This should be # a little more user-friendly. def __setup_matches(): """Register all the update keys.""" __register_keys("name") __register_keys("health", "hp") __register_keys("willpower", "wp", "w") __register_keys("humanity", "hm") __register_keys("splat", "type") __register_keys( "sh", "sd", "shp", "suphp", "suph", "supd", "superficialhealth", "superficialdamage" ) __register_keys("ah", "ad", "ahp", "agghp", "aggd", "aggh", "agghealth", "aggdamage") __register_keys("sw", "swp", "supwp", "supw", "superficialwillpower") __register_keys("aw", "awp", "aggwp", "aggw", "aggwillpower") __register_keys("stains", "stain", "s") __register_keys( "current_xp", "xp_current", "current_exp", "exp_current", "currentxp", "currentexp", "xpcurrent", "expcurrent", "cxp", "unspent_xp", "xp_unspent", "unspent_exp", "exp_unspent", "unspentxp", "unspentexp", "xpunspent", "expunspent", "uxp" ) __register_keys( "total_xp", "xp_total", "total_exp", "exp_total", "totalxp", "totalexp", "xptotal", "exptotal", "txp", "lifetimexp", "xplifetime", "explifetime", "lxp", "lifetime_xp", "life_time_xp" ) __register_keys("hunger", "h") __register_keys("potency", "bp", "p") def __register_keys(canonical, *alternates): """Register an update key along with some alternates.""" __MATCHES[canonical] = canonical for alternate in alternates: if alternate in __MATCHES: raise KeyError(f"{alternate} is already an update parameter.") __MATCHES[alternate] = canonical __setup_matches()
34.573394
99
0.629959
929
7,537
4.965554
0.326157
0.03902
0.016909
0.008671
0.056362
0.056362
0.056362
0.056362
0.056362
0.035118
0
0.002405
0.227677
7,537
217
100
34.732719
0.79007
0.12432
0
0.074324
0
0.006757
0.289561
0.007243
0
0
0
0
0
1
0.027027
false
0.006757
0.054054
0
0.101351
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f43697d11efae6dda37ec02c7a022ad4d3dc4330
11,009
py
Python
formation.py
graham-kim/pygremlin-graph-visualiser
65cb4d4fb71c8dde46ff1a36a40adcbdf233448c
[ "MIT" ]
null
null
null
formation.py
graham-kim/pygremlin-graph-visualiser
65cb4d4fb71c8dde46ff1a36a40adcbdf233448c
[ "MIT" ]
39
2020-07-25T10:58:19.000Z
2020-08-28T15:02:12.000Z
formation.py
graham-kim/pygremlin-graph-visualiser
65cb4d4fb71c8dde46ff1a36a40adcbdf233448c
[ "MIT" ]
null
null
null
import sys import os sys.path.append( os.path.dirname(__file__) ) import numpy as np import typing as tp import angles from model import Node, Link, Label from spec import ArrowDraw, NodeSpec class FormationManager: def __init__(self): self._nodes = {} self._links = [] self._labels = [] @property def nodes(self) -> tp.List[Node]: return [n for n in self._nodes.values()] @property def links(self) -> tp.List[Link]: return self._links @property def labels(self) -> tp.List[Link]: return self._labels def _id_if_str(self, node: tp.Tuple[str, int]) -> int: if isinstance(node, int): return node else: return self.id_of(node) def text_of(self, node_id: int) -> str: if not isinstance(node_id, int): raise TypeError("Expected node_id to be int: {}".format(node_id)) return self._nodes[node_id].text def pos_of(self, node_id: tp.Tuple[str, int]) -> np.array: node_id = self._id_if_str(node_id) return np.array(self._nodes[node_id].pos) def pos_perp_to(self, from_id: int, to_id: int, shift_breadth: int, to_left: bool) -> np.array: from_vec2 = np.array(self._nodes[from_id].pos) to_vec2 = np.array(self._nodes[to_id].pos) rel_vec2 = to_vec2 - from_vec2 flipped_y_unit_rel = angles.flip_y( angles.unit(rel_vec2) ) if to_left: rotated_dir = angles.flip_y( \ angles.rotate_vector_to_left_by_90_deg( flipped_y_unit_rel ) ) else: rotated_dir = angles.flip_y( \ angles.rotate_vector_to_right_by_90_deg( flipped_y_unit_rel ) ) return (from_vec2 + rel_vec2 / 2 + rotated_dir * shift_breadth).astype(int) def id_of(self, text: str) -> int: if not isinstance(text, str): raise TypeError("{} should be a string".format(text)) ans = [] for key in self._nodes.keys(): if text == self._nodes[key].text: ans.append(key) if len(ans) == 0: raise ValueError("No node has this text: {}".format(text)) elif len(ans) == 1: return ans[0] else: raise ValueError("More than one node has the text {}: {}".format(text, ans)) def add_node(self, text: str, pos: tp.Tuple[int, int], colour: str="green", multibox: bool = False) -> int: new_node = Node(text, pos, colour, multibox) new_id = id(new_node) self._nodes[new_id] = new_node return new_id def add_label(self, text: str, pos: tp.Tuple[int, int], colour: str="red"): self._labels.append( Label(text, pos, colour) ) def add_link(self, from_id: tp.Tuple[str, int], to_id: tp.Tuple[str, int], colour: str="black", \ arrow_draw: ArrowDraw = ArrowDraw.FWD_ARROW, link_2_col: tp.Optional[str] = None): self._links.append( Link(self._id_if_str(from_id), self._id_if_str(to_id), colour, arrow_draw, link_2_col) ) def add_dual_link(self, from_id: tp.Tuple[str, int], to_id: tp.Tuple[str, int], colour: str="black", \ second_colour: str="black"): self.add_link(from_id, to_id, colour, ArrowDraw.DUAL_LINK, second_colour) def add_linked_node(self, from_id: tp.Tuple[str, int], pos: tp.Tuple[int, int], spec: NodeSpec) -> int: new_id = self.add_node(spec.text, pos, spec.node_col, spec.multibox) self.add_link(from_id, new_id, spec.link_col, spec.link_draw, spec.link_2_col) return new_id def add_daisy_chain_links(self, nodes: tp.List[tp.Tuple[str, int]], arrow_draw: ArrowDraw = ArrowDraw.FWD_ARROW, \ link_col: str="black", link_2_col: tp.Optional[str] = None): if not isinstance(nodes, list): raise TypeError("Expected a list for nodes: {}".format(nodes)) if len(nodes) < 2: raise ValueError("Expected at least 2 nodes, got {}".format(len(nodes))) for i, item in enumerate(nodes[1:]): prev_node = self._id_if_str(nodes[i]) # i is already the previous index this_node = self._id_if_str(item) self.add_link(prev_node, this_node, link_col, arrow_draw, link_2_col) def add_depth_line_of_linked_nodes(self, start_id: tp.Tuple[str, int], dir: tp.Tuple[int, int], \ link_length: int, \ node_specs: tp.List[tp.Optional[NodeSpec]] \ ) -> tp.List[int]: added_ids = [] start_id = self._id_if_str(start_id) start_pos = angles.vec2(self._nodes[start_id].pos) unit_dir = angles.unit( dir ) count = 1 from_id = start_id for spec in node_specs: if spec is not None: pos = start_pos + unit_dir * link_length * count new_id = self.add_node(spec.text, pos, spec.node_col, spec.multibox) if spec.link_draw == ArrowDraw.BACK_ARROW: self.add_link(new_id, from_id, spec.link_col, ArrowDraw.FWD_ARROW, None) elif spec.link_draw != ArrowDraw.NO_LINK: self.add_link(from_id, new_id, spec.link_col, spec.link_draw, spec.link_2_col) added_ids.append(new_id) from_id = new_id count += 1 return added_ids def add_rail_of_nodes(self, start_coord: tp.Tuple[int, int], dir: tp.Tuple[int, int], \ link_length: int, \ node_specs: tp.List[tp.Optional[NodeSpec]] \ ) -> tp.List[int]: num_specs = len(node_specs) if num_specs < 2: raise ValueError("node_specs must have at least 2 elements") if node_specs[0] is None or node_specs[-1] is None: raise ValueError("The first and last item of node_specs must not be None") first_id = self.add_node(node_specs[0].text, start_coord, \ node_specs[0].node_col, node_specs[0].multibox) added_ids = [first_id] new_ids = self.add_depth_line_of_linked_nodes(first_id, dir, link_length, node_specs[1:]) added_ids.extend(new_ids) return added_ids def add_breadth_line_of_sibling_nodes(self, parent_id: tp.Tuple[str, int], start_coord: tp.Tuple[int, int], \ end_coord: tp.Tuple[int, int], \ node_specs: tp.List[tp.Optional[NodeSpec]] \ ) -> tp.List[int]: num_specs = len(node_specs) parent_id = self._id_if_str(parent_id) if num_specs < 2: raise ValueError("node_specs must have at least 2 elements") if node_specs[0] is None or node_specs[-1] is None: raise ValueError("The first and last item of node_specs must not be None") added_ids = [] start_vec2 = angles.vec2(start_coord) end_vec2 = angles.vec2(end_coord) rel_vec2 = end_vec2 - start_vec2 count = 0 for spec in node_specs: if spec is not None: pos = start_vec2 + rel_vec2 * count / (num_specs - 1) new_id = self.add_node(spec.text, pos, spec.node_col, spec.multibox) if spec.link_draw == ArrowDraw.BACK_ARROW: self.add_link(new_id, parent_id, spec.link_col, ArrowDraw.FWD_ARROW, None) elif spec.link_draw != ArrowDraw.NO_LINK: self.add_link(parent_id, new_id, spec.link_col, spec.link_draw, spec.link_2_col) added_ids.append(new_id) count += 1 return added_ids def add_breadth_line_centered_on(self, parent_id: tp.Tuple[str, int], center_coord: tp.Tuple[int, int], \ link_length: int, node_specs: tp.List[tp.Optional[NodeSpec]] \ ) -> tp.List[int]: num_specs = len(node_specs) if num_specs < 2: raise ValueError("node_specs must have at least 2 elements") parent_pos = self.pos_of(parent_id) rel_vec2 = angles.vec2(center_coord) - parent_pos rotated_vec2 = angles.flip_y( \ angles.rotate_vector_to_left_by_90_deg( \ angles.flip_y( angles.unit(rel_vec2) ))) half_total_length = link_length * float(num_specs-1) / 2.0 start_coord = center_coord + rotated_vec2 * half_total_length end_coord = center_coord - rotated_vec2 * half_total_length return self.add_breadth_line_of_sibling_nodes(parent_id, start_coord, end_coord, node_specs) def add_arc_of_sibling_nodes(self, parent_id: tp.Tuple[str, int], radius: int, start_dir_coord: tp.Tuple[int, int], \ end_dir_coord: tp.Tuple[int, int], clockwise: bool, \ node_specs: tp.List[tp.Optional[NodeSpec]] \ ) -> tp.List[int]: parent_id = self._id_if_str(parent_id) num_specs = len(node_specs) if num_specs < 2: raise ValueError("node_specs must have at least 2 elements") if node_specs[0] is None or node_specs[-1] is None: raise ValueError("The first and last item of node_specs must not be None") added_ids = [] parent_pos = self._nodes[parent_id].pos parent_vec2 = angles.vec2(parent_pos) start_vec2 = angles.vec2(start_dir_coord) - parent_vec2 end_vec2 = angles.vec2(end_dir_coord) - parent_vec2 start_bear_rad = angles.get_bearing_rad_of( angles.flip_y(start_vec2) ) end_bear_rad = angles.get_bearing_rad_of( angles.flip_y(end_vec2) ) bear_diff_rad = angles.normalise_angle(end_bear_rad - start_bear_rad) if clockwise: bear_diff_rad = angles.flip_angle(bear_diff_rad) count = 0 for spec in node_specs: if spec is not None: rotate_anticlockwise_by = bear_diff_rad * count / (num_specs - 1) if clockwise: rotate_anticlockwise_by *= -1 dir_vec = angles.flip_y( \ angles.get_unit_vector_after_rotating( \ angles.flip_y(start_vec2), rotate_anticlockwise_by )) pos = parent_pos + dir_vec * radius new_id = self.add_node(spec.text, pos, spec.node_col, spec.multibox) if spec.link_draw == ArrowDraw.BACK_ARROW: self.add_link(new_id, parent_id, spec.link_col, ArrowDraw.FWD_ARROW, None) elif spec.link_draw != ArrowDraw.NO_LINK: self.add_link(parent_id, new_id, spec.link_col, spec.link_draw, spec.link_2_col) added_ids.append(new_id) count += 1 return added_ids
42.670543
121
0.592788
1,539
11,009
3.946719
0.107862
0.044452
0.019756
0.025683
0.573757
0.517781
0.482549
0.424432
0.401712
0.387224
0
0.011257
0.306022
11,009
257
122
42.836576
0.78377
0.002816
0
0.39604
0
0
0.047923
0
0
0
0
0
0
1
0.09901
false
0
0.034653
0.014851
0.217822
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f439222f5a9cee3a82981ad6666b33d56810e907
3,571
py
Python
Code_Hybrid_SLIMBPR_CBF_RP3Beta.py
SamanFekri/BookRecommendation
07dfa875154af39546cb263d4407339ce26d47e8
[ "MIT" ]
null
null
null
Code_Hybrid_SLIMBPR_CBF_RP3Beta.py
SamanFekri/BookRecommendation
07dfa875154af39546cb263d4407339ce26d47e8
[ "MIT" ]
null
null
null
Code_Hybrid_SLIMBPR_CBF_RP3Beta.py
SamanFekri/BookRecommendation
07dfa875154af39546cb263d4407339ce26d47e8
[ "MIT" ]
null
null
null
# This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import scipy.sparse as sps import time RM_train=pd.read_csv('./input/data_train.csv') R_test=pd.read_csv('./input/data_target_users_test.csv') URM=pd.read_csv('./input/data_train.csv') ICM = pd.read_csv('./input/data_ICM_title_abstract.csv') ##### URM URM_tuples = [tuple(x) for x in URM.to_numpy()] userList, itemList, ratingList = zip(*URM_tuples) userList = list(userList) userList=np.array(userList,dtype=np.int64) itemList = list(itemList) itemList=np.array(itemList,dtype=np.int64) ratingList = list(ratingList) #not needed ratingList=np.array(ratingList,dtype=np.int64) #not needed URM_all = sps.coo_matrix((ratingList, (userList, itemList))) URM_all = URM_all.tocsr() #### ICM ICM_tuples = [tuple(x) for x in ICM.to_numpy()] itemList_icm, featureList_icm, scoreList_icm = zip(*ICM_tuples) itemList_icm = list(itemList_icm) itemList_icm = np.array(itemList_icm,dtype=np.int64) featureList_icm = list(featureList_icm) featureList_icm = np.array(featureList_icm,dtype=np.int64) scoreList_icm = list(scoreList_icm) scoreList_icm = np.array(scoreList_icm,dtype=np.float64) ICM_all = sps.coo_matrix((scoreList_icm, (itemList_icm, featureList_icm))) #### Test userTestList = [x for x in R_test.to_numpy()] userTestList = zip(*userTestList) userTestList = [list(a) for a in userTestList][0] #### make validation and test from Base.Evaluation.Evaluator import EvaluatorHoldout from Data_manager.split_functions.split_train_validation_random_holdout import split_train_in_two_percentage_global_sample URM_train, URM_test = split_train_in_two_percentage_global_sample(URM_all, train_percentage = 0.80) URM_train, URM_validation = split_train_in_two_percentage_global_sample(URM_train, train_percentage = 0.80) evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[10]) evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[10]) ### hybrid recommender ### Usinng TF IDF ICM_all = ICM_all.tocsr() num_tot_items = ICM_all.shape[0] # let's count how many items have a certain feature items_per_feature = np.ediff1d(ICM_all.indptr) + 1 # print(items_per_feature) IDF = np.array(np.log(num_tot_items / items_per_feature)) from scipy.sparse import diags diags(IDF) ICM_idf = ICM_all.copy() ICM_idf = diags(IDF)*ICM_idf ############## top pop item_popularity = np.ediff1d(URM_all.tocsc().indptr) popular_items = np.argsort(item_popularity) popular_items = np.flip(popular_items, axis=0) popular_items = popular_items[0:10] ########### from HybridRecommender import HybridRecommender recommender = HybridRecommender(URM_all) recommender.fit([0.2, 0.3, 0.2], ICM_idf) recoms = recommender.recommend(userTestList, cutoff=10) recomList = [] for i in range(len(recoms)): user_id = userTestList[i] start_pos = URM_train.indptr[user_id] end_pos = URM_train.indptr[user_id + 1] if start_pos == end_pos: recomList.append(' '.join(str(e) for e in popular_items)) else: recomList.append(' '.join(str(e) for e in recoms[i])) # print(recomList) res = {"user_id": userTestList, "item_list": recomList} result = pd.DataFrame(res, columns= ['user_id', 'item_list']) result.to_csv('outputs/hybrid_slim_cbf_rp3v1.csv', index = False, header=True)
31.324561
122
0.758891
543
3,571
4.74954
0.309392
0.019
0.017449
0.021714
0.138813
0.124855
0.093059
0.072896
0.034897
0
0
0.013659
0.118454
3,571
113
123
31.60177
0.805591
0.133296
0
0
0
0
0.059308
0.048105
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f43a93adbb44a173a83f3be2da8ae94b9ee5a0d3
989
py
Python
dodge/config.py
MoyTW/7DRL2016_Rewrite
99e092dcb8797a25caa3c8a989a574efae19e4d4
[ "MIT" ]
2
2020-05-10T02:16:28.000Z
2021-04-05T21:54:10.000Z
dodge/config.py
MoyTW/7DRL2016_Rewrite
99e092dcb8797a25caa3c8a989a574efae19e4d4
[ "MIT" ]
null
null
null
dodge/config.py
MoyTW/7DRL2016_Rewrite
99e092dcb8797a25caa3c8a989a574efae19e4d4
[ "MIT" ]
null
null
null
import json class Config(object): def __init__(self, file_location): with open(file_location, 'r') as f: config = json.load(f) self.SCREEN_WIDTH = int(config["SCREEN_WIDTH"]) self.SCREEN_HEIGHT = int(config["SCREEN_HEIGHT"]) self.MAP_WIDTH = int(config["MAP_WIDTH"]) self.MAP_HEIGHT = int(config["MAP_HEIGHT"]) self.PANEL_HEIGHT = int(config["PANEL_HEIGHT"]) self.FULL_SCREEN = bool(config["FULL_SCREEN"]) self.CAMERA_WIDTH = int(config["CAMERA_WIDTH"]) self.CAMERA_HEIGHT = int(config["CAMERA_HEIGHT"]) self.VISION_RADIUS = int(config["VISION_RADIUS"]) self.FOV_ALGO = int(config["FOV_ALGO"]) self.FOV_LIGHT_WALLS = bool(config["FOV_LIGHT_WALLS"]) self.HP_BAR_WIDTH = int(config["HP_BAR_WIDTH"]) # Derived values self.PANEL_Y = self.SCREEN_HEIGHT - self.PANEL_HEIGHT # etc etc etc
41.208333
66
0.605662
123
989
4.569106
0.300813
0.160142
0.099644
0.074733
0
0
0
0
0
0
0
0
0.273003
989
23
67
43
0.781641
0.026289
0
0
0
0
0.146875
0
0
0
0
0
0
1
0.055556
false
0
0.055556
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f43be3dcb74991918120ac726f26bac6d8cff63f
524
py
Python
incal_lib/create_dataframe.py
barel-mishal/InCal_lib
3aa63ebccf2ed3277fac55049c88178541cbb94b
[ "MIT" ]
null
null
null
incal_lib/create_dataframe.py
barel-mishal/InCal_lib
3aa63ebccf2ed3277fac55049c88178541cbb94b
[ "MIT" ]
null
null
null
incal_lib/create_dataframe.py
barel-mishal/InCal_lib
3aa63ebccf2ed3277fac55049c88178541cbb94b
[ "MIT" ]
null
null
null
import pandas as pd import numpy as np def create_calr_example_df(n_rows, start_date): ''' ''' np.random.seed(20) array = np.random.rand(n_rows) cumulative = np.cumsum(array) d = { 'feature1_subject_1': array, 'feature1_subject_2': array, 'feature2_subject_1': cumulative, 'feature2_subject_2': cumulative*2 } idx = pd.date_range(start_date, periods=n_rows, freq="MIN", name='Date_Time_1') return pd.DataFrame(data=d, index=idx)
24.952381
55
0.622137
71
524
4.323944
0.549296
0.04886
0
0
0
0
0
0
0
0
0
0.030928
0.259542
524
20
56
26.2
0.760309
0
0
0
0
0
0.167969
0
0
0
0
0
0
1
0.066667
false
0
0.133333
0
0.266667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f43cde7e64305b95ccb8abd4674e469455ce57e1
4,663
py
Python
HybridSN/DataLoadAndOperate.py
lms-07/HybridSN
7580d67a5879d5b53ced75a653d4f198a8aefde2
[ "MIT" ]
null
null
null
HybridSN/DataLoadAndOperate.py
lms-07/HybridSN
7580d67a5879d5b53ced75a653d4f198a8aefde2
[ "MIT" ]
null
null
null
HybridSN/DataLoadAndOperate.py
lms-07/HybridSN
7580d67a5879d5b53ced75a653d4f198a8aefde2
[ "MIT" ]
null
null
null
import os import numpy as np import scipy.io as sio import tifffile from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split #Load dataset def loadData(name,data_path): if name == 'IP': data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected'] labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt'] elif name == 'SA': data = sio.loadmat(os.path.join(data_path, 'Salinas_corrected.mat'))['salinas_corrected'] labels = sio.loadmat(os.path.join(data_path, 'Salinas_gt.mat'))['salinas_gt'] elif name == 'PU': data = sio.loadmat(os.path.join(data_path, 'PaviaU.mat'))['paviaU'] labels = sio.loadmat(os.path.join(data_path, 'PaviaU_gt.mat'))['paviaU_gt'] elif name == 'HU13': # dict_keys(['__header__', '__version__', '__globals__', 'Houston']) #dict_values([b'MATLAB 5.0 MAT-file, Platform: PCWIN64, Created on: Wed Jul 17 16:45:01 2019', '1.0', [], array()]) #data = sio.loadmat(os.path.join(data_path, 'Houston.mat')) #labels = sio.loadmat(os.path.join(data_path,'Houston_gt.mat')) data = sio.loadmat(os.path.join(data_path, 'Houston.mat'))['Houston'] labels = sio.loadmat(os.path.join(data_path,'Houston_gt.mat'))['Houston_gt'] elif name == 'KSC': data = sio.loadmat(os.path.join(data_path, 'KSC.mat'))['KSC'] labels = sio.loadmat(os.path.join(data_path,'KSC_gt.mat'))['KSC_gt'] return data, labels # Use tifffile pkg read the hyperspectral img. # Load .tiff data set and converted to .mat data def loadTifDataTomat(data_path,save_DataPath,name): if name=='HU13': totalTif=tifffile.imread(os.path.join(data_path,'2013_IEEE_GRSS_DF_Contest_CASI.tif')) trainTif=tifffile.imread(os.path.join(data_path,'train_roi.tif')) valTif=tifffile.imread(os.path.join(data_path,'val_roi.tif')) print(totalTif.shape,trainTif.shape,valTif.shape) #spectral.imshow(totalTif) #spectral.imshow(trainTif) sio.savemat(os.path.join(save_DataPath,"totalTifHouston13.mat"),{'totalTifHouston13':totalTif}) sio.savemat(os.path.join(save_DataPath,"trainTifHouston13.mat"),{'trainTifHouston13':trainTif}) sio.savemat(os.path.join(save_DataPath,"valTifHouston13.mat"),{'valTifHouston13':valTif}) def loadTifMat(data_path,name): if name=='HU13': data=sio.loadmat(os.path.join(data_path, 'totalTifHouston13.mat'))['totalTifHouston13'] train=sio.loadmat(os.path.join(data_path, 'trainTifHouston13.mat'))['trainTifHouston13'] val=sio.loadmat(os.path.join(data_path, 'valTifHouston13.mat'))['valTifHouston13'] return data,train,val ### Using PCA for removing the spectral redundancy(冗余) ### Reduce the spectral dimension, from high-dimensional to low-dimensional. def applyPCA(X, numComponents=75): newX = np.reshape(X, (-1, X.shape[2])) pca = PCA(n_components=numComponents, whiten=True) newX = pca.fit_transform(newX) newX = np.reshape(newX, (X.shape[0],X.shape[1], numComponents)) return newX, pca ### Padding zeros def padWithZeros(X, margin=2): newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2])) x_offset = margin y_offset = margin newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X return newX ### Create data cube,3D-patch. def createImageCubes(X, y, windowSize=5, removeZeroLabels = True): margin = int((windowSize - 1) / 2) zeroPaddedX = padWithZeros(X, margin=margin) # split patches patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2])) patchesLabels = np.zeros((X.shape[0] * X.shape[1])) patchIndex = 0 for r in range(margin, zeroPaddedX.shape[0] - margin): for c in range(margin, zeroPaddedX.shape[1] - margin): patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1] patchesData[patchIndex, :, :, :] = patch patchesLabels[patchIndex] = y[r-margin, c-margin] patchIndex = patchIndex + 1 if removeZeroLabels: patchesData = patchesData[patchesLabels>0,:,:,:] patchesLabels = patchesLabels[patchesLabels>0] patchesLabels -= 1 return patchesData, patchesLabels # Dataset split. def splitTrainTestSet(X, y, testRatio, randomState=345): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testRatio, random_state=randomState, stratify=y) return X_train, X_test, y_train, y_test
43.579439
123
0.671885
633
4,663
4.810427
0.249605
0.055172
0.068966
0.082759
0.321839
0.298194
0.293596
0.233169
0.102463
0.0578
0
0.022763
0.180356
4,663
107
124
43.579439
0.77394
0.139181
0
0.028169
0
0
0.137525
0.046844
0
0
0
0
0
1
0.098592
false
0
0.084507
0
0.267606
0.014085
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f43e3816708a9a04921f14baa15850bfa0137251
1,873
py
Python
alipay/aop/api/domain/AlipayOpenIotmbsDooropenresultSyncModel.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/domain/AlipayOpenIotmbsDooropenresultSyncModel.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
null
null
null
alipay/aop/api/domain/AlipayOpenIotmbsDooropenresultSyncModel.py
antopen/alipay-sdk-python-all
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class AlipayOpenIotmbsDooropenresultSyncModel(object): def __init__(self): self._dev_id = None self._door_state = None self._project_id = None @property def dev_id(self): return self._dev_id @dev_id.setter def dev_id(self, value): self._dev_id = value @property def door_state(self): return self._door_state @door_state.setter def door_state(self, value): self._door_state = value @property def project_id(self): return self._project_id @project_id.setter def project_id(self, value): self._project_id = value def to_alipay_dict(self): params = dict() if self.dev_id: if hasattr(self.dev_id, 'to_alipay_dict'): params['dev_id'] = self.dev_id.to_alipay_dict() else: params['dev_id'] = self.dev_id if self.door_state: if hasattr(self.door_state, 'to_alipay_dict'): params['door_state'] = self.door_state.to_alipay_dict() else: params['door_state'] = self.door_state if self.project_id: if hasattr(self.project_id, 'to_alipay_dict'): params['project_id'] = self.project_id.to_alipay_dict() else: params['project_id'] = self.project_id return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayOpenIotmbsDooropenresultSyncModel() if 'dev_id' in d: o.dev_id = d['dev_id'] if 'door_state' in d: o.door_state = d['door_state'] if 'project_id' in d: o.project_id = d['project_id'] return o
26.380282
71
0.584624
238
1,873
4.289916
0.176471
0.073457
0.061704
0.054848
0.265426
0.261508
0
0
0
0
0
0.000781
0.31607
1,873
70
72
26.757143
0.796253
0.022424
0
0.109091
0
0
0.079912
0
0
0
0
0
0
1
0.163636
false
0
0.036364
0.054545
0.327273
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f43f0adac87483d74d65bc876a1b45c40eb3778c
958
py
Python
setup.py
ghost58400/marlin-binary-protocol
fb93603866ecfce84e887c159bbbb9f9d2f01f17
[ "MIT" ]
null
null
null
setup.py
ghost58400/marlin-binary-protocol
fb93603866ecfce84e887c159bbbb9f9d2f01f17
[ "MIT" ]
null
null
null
setup.py
ghost58400/marlin-binary-protocol
fb93603866ecfce84e887c159bbbb9f9d2f01f17
[ "MIT" ]
null
null
null
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="marlin_binary_protocol", version="0.0.7", author="Charles Willis", author_email="charleswillis3@users.noreply.github.com", description="Transfer files with Marlin 2.0 firmware using Marlin Binary Protocol Mark II", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/charleswillis3/marlin-binary-protocol", packages=setuptools.find_packages(), install_requires=["heatshrink2>=0.9", "pyserial>=3.4", "backports.time_perf_counter; python_version < '3.3'"], classifiers=[ "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4', )
38.32
114
0.662839
119
958
5.210084
0.579832
0.096774
0.096774
0.096774
0
0
0
0
0
0
0
0.037688
0.169102
958
24
115
39.916667
0.741206
0
0
0
0
0.045455
0.535491
0.092902
0
0
0
0
0
1
0
false
0
0.045455
0
0.045455
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f44057beff2cbba250db617a96a21c14300e3ae1
18,028
py
Python
taut_euler_class.py
henryseg/Veering
50ebdcd5bde582726aefdd564c43e17890651282
[ "CC0-1.0" ]
2
2020-08-17T21:38:16.000Z
2021-08-29T21:38:43.000Z
taut_euler_class.py
henryseg/Veering
50ebdcd5bde582726aefdd564c43e17890651282
[ "CC0-1.0" ]
null
null
null
taut_euler_class.py
henryseg/Veering
50ebdcd5bde582726aefdd564c43e17890651282
[ "CC0-1.0" ]
null
null
null
# # taut_euler_class.py # from file_io import parse_data_file, write_data_file from taut import liberal, isosig_to_tri_angle from transverse_taut import is_transverse_taut from sage.matrix.constructor import Matrix from sage.modules.free_module_element import vector from sage.arith.misc import gcd from sage.arith.functions import lcm # # Goal - given a transverse taut triangulation, decide if the # associated "absolute" euler class is torsion or not. If it is # torsion, determine its order. # # Contents and overview: # 1. References. # # 2. Background. # # 3. Helper functions. # # 4. Truncate. We build the correct "truncated" cell structure \calT' # from (M, \calT) and give generators for the cochain groups # C^k(\calT', \ZZ) (k = 1, 2). # # 5. Representative. We find a two-cocycle E \in Z^2(\calT', \ZZ) # that represents E(\calT) \in H^2(M, \ZZ). # # 6. Coboundary. We find the matrix for the coboundary operator # \delta^1. # # 7. Linear algebra. We solve the linear problem to decide if E is a # coboundary - that is, if E lies in B^2(\calT', \ZZ) - that is, if E # is in the image of \delta^1. # # 8. Remarks. # # 9. Calling code # # 1. References. # # Culler, Dunfield - Orderability and Dehn filling # Ghys - Groups acting on the circle # Thurston - A norm for the homology of three-manifolds # Candel, Conlon - Foliations, chapter four # 2. Background: # Suppose that (M, \calT) is a transverse taut triangulation. Then # \calT^{2} is the "horizontal branched surface". This caries various # laminations, which extend to foliations on M. All of these have the # same Euler class, which we will denote E(\calT) \in H^2(M, \ZZ). # Suppose that \calF is a carried foliation and let UT\calF be the # unit tangent bundle over \calF. The Euler class E vanishes exactly # when UT\calF has a section; that is, when the unit tangent bundle is # trivialisable. # Recall: # Suppose that X is an F-bundle over B. We have # # i # F -------> X <--. # | | # | | # p| |s # | | # v | # B ---' # # So s \from B \to X is a \emph{section} if p \circ s = Id_B # 3. Helper functions def diagonal(D): return [D[i][i] for i in range(min(D.dimensions()))] # 4. Truncate. # Suppose that M is a connected, cusped, oriented three-manifold. Let # C = C(M) \geq 1 be the number of cusps of M. Suppose that \calT is a # transverse taut ideal triangulation of M. Let T = T(\calT) \geq 1 # be the number of tetrahedra of \calT. # We use Regina to number and orient the edges \{e_i\}_{i = 0}^{T-1}, # the faces \{f_i\}_{i = 0}^{2T-1}, and the tetrahedra \{t_i\}_{i = # 0}^{T-1} of \calT. We call all of these \emph{ideal} cells. Note # that the transverse structure also gives us co-orientations of the # e_i and the f_i, called "upwards" # We remove a small open neighbourbood of all ideal vertices of all # model tetrahedra. This gives the \emph{truncated} cell structure # \calT'. The remains of the ideal cells are called \emph{truncated} # cells; we abuse and reuse the notations e_i and f_i for these. The # truncated cells inherit orientations and co-orientations. The new # cells are called \emph{peripheral} cells. We number these as # follows: # e_{ij} is the peripheral edge cutting vertex v_j off of ideal face f_i # f_{ij} is the peripheral face cutting vertex v_j off of ideal tetrahedron t_i # Note that every truncated face is combinatorially a hexagon. The # boundary of this hexagon contains three truncated edges alternating # with three peripheral edges. We orient each peripheral edge e_{ij} # so that the orientation of e_{ij} agrees with the orientation # induced by \bdy f_i. We orient each peripheral face f_{ij} # anti-clockwise, as viewed from infinity (that is, from outside of # M). Also, we equip e_{ij} and f_{ij} with co-orientations pointing # out of M, called "outward". # e_{i0} # --- # / \ # e_2 / \ e_1 # / \ # / f_i \ # \ / # e_{i1} --------- e_{i2} # e_0 # For an edge e or a face f we use e^* and f^* to denote the dual in # C^1(\calT', \ZZ) or C^2(\calT', \ZZ). Thus \{e^*_i\} \cup # \{e^*_{ij}\} generates C^1(\calT', \ZZ) while \{f^*_i\} \cup # \{f^*_{ij}\} generates C^2(\calT', \ZZ). # For more pictures, see # /Veering_code/NotesPictures/euler_notes_from_nathan.jpg # 5. Representative # We now construct a two-cocycle E \in Z^2(\calT', \ZZ). For every # peripheral face f we take # E(f) = 0. # \begin{remark} # To see that this is correct, let \calF be any foliation of M, # transverse to the boundary. Suppose that f is the given peripheral # triangle. We have a section of the restriction of UT\calF to \bdy # f; namely the outward field. This extends over f to give a section # of UT\calF restricted to f. So there is no obstruction to the # extension. See below for a more precise discussion in terms of # "Poincar\'e-Hopf index". # \end{remark} # Now suppose that f is a truncated face. Suppose that e_0, e_1, e_2 # are its three truncated edges. Recall that these are all oriented. # Let AC(f) be the number of the edges e_0, e_1, e_2 that are # oriented anti-clockwise (that is, agree with their induced # orientation coming from f). We take # E(f) = AC(f) - 2 # If we flip the transverse direction: AC(f') = 3 - AC(f), # so E(f') = AC(f') - 2 = 1 - AC(f) = 2 - AC(f) - 1 = -E(f) - 1 # \begin{remark} # Here is one way to remember (and explain!) this rule. Suppose that # f is the given truncated face. Suppose that s is a section of UTf | # \bdy f. Then index(s) is the total rotation of s with respect to # the tangent field, _plus_ one. This can be rephrased in terms of # the index of tangent vector fields extending s over all of f. # Our choices of orientations of edges determine a section of UTf | # \bdy f. Since all of the boundary edges e_{ij} of f are oriented # the same way, we choose a standard framing there; Nathan tells us to # just use the outward pointing section on all of the e_{ij}. Our # choice of section on e_0 (say) has to (a) depend only on the # orientation of e_0 and (b) has to be outward at the endpoints of # e_0. The simplest choice is the section that rotates by +\pi with # respect to the tangent along \bdy f_i, as we move forward along e_0. # So s points _back_ at the beginning of e_0, points _right_ in the # middle of e_0, and points _forwards_ at the end of e_0. The total # rotation of the resulting field (with respect to the tangent field) # is AC(f) - 3. Thus E(f) = AC(f) - 2 is the index. You can check # this works by drawing the four possible pictures and computing the index # of any extension of s over f. # \end{remark} # Claim: \delta^2 E = 0. # That is, E is a cocycle. # Proof of claim: Fix a truncated tetrahedron t and fix some oriention # of its truncated edges. A direct calculation shows that # \delta E (t) = E \bdy t = 0. # Likewise, a direct computation shows that switching the orientation # of a single edge leaves E \bdy t unchanged. QED. ### It would be nice to have a less computational proof! def euler_cocycle(tri, angle): """ Given a regina triangulation "tri", with oriented edges, and a transverse taut angle structure "angle", returns the associated two-cocycle E representing the Euler class E(tri). """ assert is_transverse_taut(tri, angle) face_coorientations = is_transverse_taut(tri, angle, return_type = "face_coorientations") # E will be a _row_ vector, because it eats column vectors. E = [] # First deal with the truncated faces for face in tri.faces(2): # 2 = dimension # First we compute the number of Regina oriented edges that agree with the Regina orientation on face AC = 0 for i in range(3): perm = face.faceMapping(1, i) # print perm[0], perm[1] if perm[1] == ((perm[0] + 1) % 3): # the edge and face orientations agree so, AC = AC + 1 # print "AC", AC # Now we condition on whether or not Regina and angle agree on the (co-)orientation of the face. if face_coorientations[face.index()] == 1: E.append(AC - 2) else: E.append(1 - AC) # Now deal with the peripheral faces for tet in tri.tetrahedra(): for j in range(4): E.append(0) return E # 6. Coboundary # Suppose that e is a truncated edge. Let LF be the set of truncated # faces to the left of e and let RF be the set of faces to the right. Then # \delta e^* = \sum_{f \in LF} f^* - \sum_{f \in RF} f^*. # Suppose that e is a peripheral edge. So there is a unique truncated # face f meeting e. Note that f is to the left of e. There are # also a pair of boundary faces meeting e: say f' _above_ e and f'' # _below_ e. Then # \delta e^* = f^* + (f')^* - (f'')^*. def coboundary(tri, angle): """ Given a triangulation "tri" (T), with oriented edges, and a transverse taut angle structure "angle", returns the co-boundary operator delta^1 \from C^1(T', ZZ) \to C^2(T', ZZ), as a matrix, for the truncated triangulation T'. Note that, strictly speaking, we don't need to use "angle" for this, but we use it to determine orientation on faces for the Euler class, so we might as well use it again here. """ # \delta^1 takes row vectors (functions on edges) and spits out # row vectors (functions on faces). So, if c is a one-cochain # then c \cdot \delta is a two-cochain. delta = [] assert is_transverse_taut(tri, angle) tet_vert_coorientations = is_transverse_taut(tri, angle, return_type = "tet_vert_coorientations") face_coorientations = is_transverse_taut(tri, angle, return_type = "face_coorientations") for edge in tri.edges(): # A row for every truncated edge row = [] for face in tri.triangles(): # A row entry for every truncated face count = 0 for i in range(3): if face.edge(i) == edge: perm = face.faceMapping(1, i) if perm[1] == ((perm[0] + 1) % 3): # the edge and face orientations agree so, count += 1 else: count -= 1 row.append(count * face_coorientations[face.index()]) # +1 if face is to the left of the edge, -1 if face is to # the right of the edge, using Regina's edge orientation # when viewed from above (using the transverse taut notion # of up) # ,'| # ,' | # ,' | # ,' CCW | gets a +1 # `. ^ # `. | # `. | # `.| for tet in tri.simplices(): for i in range(4): row.append(0) delta.append(row) for face in tri.triangles(): face_embeddings = [] for j in range(2): face_embeddings.append( face.embedding(j) ) for i in range(3): # vertices of the face # A row for every peripheral edge row = [] for face2 in tri.triangles(): # A row entry for every truncated face if face2 == face: row.append(1) else: row.append(0) for tet in tri.simplices(): for k in range(4): # A row entry for every peripheral face count = 0 for j in range(2): if (tet == face_embeddings[j].simplex()) and (face_embeddings[j].vertices()[i] == k): # the tetrahedron is on the jth side of the # face and the ith vertex of face is the kth # vertex of tet face_num_in_tet = face_embeddings[j].vertices()[3] count -= tet_vert_coorientations[tet.index()][face_num_in_tet] # tet_vert_coorientations is +1 if # coorientation on face points out of the # tetrahedron, and we want count += 1 if # the peripheral face is above the # peripheral edge row.append(count) delta.append(row) return delta # 7. Linear algebra # We ask: is there a one-cocycle C \in C^1(\calT', \ZZ) so that # \delta C = E? If so, then [E] = E(\calT) is zero in H^2, as # desired. # This is a linear algebra problem, so can be solved by, say, sage. def order_of_euler_class(delta, E): """ Given the coboundary operator delta and an Euler two-cocycle E, returns k if [E] is k--torsion. By convention, returns zero if [E] is non-torsion. Note that the trivial element is 1--torsion. """ delta = Matrix(delta) E = vector(E) # Note that E is a coboundary if there is a one-cocycle C solving # # E = C*delta # # We can find C (if it exists at all) using Smith normal form. D, U, V = delta.smith_form() assert D == U*delta*V # So we are trying to solve # # C*delta = C*U.inverse()*D*V.inverse() = E # # for a one-cochain C. Multiply by V to get # # C*delta*V = C*U.inverse()*D = E*V # # Now set # # B = C*U.inverse(), and so B*U = C # # and rewrite to get # # B*U*delta*V = B*D = E*V # # So define E' by: Ep = E*V # Finally we attempt to solve B * D = Ep. Note that D is # diagonal: so if we can solve all of the equations # B[i] * D[i][i] == Ep[i] # with B[i] integers, then [E] = 0 in cohomology. diag = diagonal(D) if any( (diag[i] == 0 and Ep[i] != 0) for i in range(len(Ep)) ): return 0 # All zeros are at the end in Smith normal form. Since we've # passed the above we can now remove them. first_zero = diag.index(0) diag = diag[:first_zero] Ep = Ep[:first_zero] # Since diag[i] is (now) never zero we can divide to get the # fractions Ep[i]/diag[i] and then find the scaling that makes # them simultaneously integral. denoms = [ diag[i] / gcd(Ep[i], diag[i]) for i in range(len(Ep)) ] return lcm(denoms) # 8. Remarks # a) Here is a nice trick that proves [E] = 0 in some cases. Suppose # that \gamma is an oriented path in \bdy M. Suppose that \gamma is # transverse to the one-skeleton of \calT'. We form a one-cocycle # D_\gamma by adding up the boundary edges that \gamma crosses, with # sign. The sign is positive if \gamma crosses from below to above, # and negative otherwise. Note that \delta D_\gamma vanishes on all # boundary faces. # b) Marc Lackenby says that we should take the paths that go up # through the centres of tetrahedra and take the Poincare dual. BUT I # think this is not what we want... Marc is thinking of the relative # Euler class as discussed on page 390 of his paper "Taut ideal # triangulations of three-manifolds". The relative Euler class lives # in H^2(M, \bdy M), so is Poincare dual to an element of H_1(M), # represented by a collection of loops. # c) [2019-03-31] It seems that, for transverse veering triangulations # in the 16 census, the Euler class is always zero or two-torsion. # Note that there are manifolds M in the census where H^2(M, \ZZ) has # positive rank... What about odd torsion? # Question: If the veering triangulation is edge-orientable, does the # Euler class vanish? # Answer: Yes. Here is a version of a discussion with Nathan # [2020-04-03] - he says the following: # Suppose that F is a foliation carried by the horizontal branched # surface. Let UTF be the unit tangent bundle to F. We think of # e(UTF) as being the obstruction to UTF having a section. Let G be # the foliation carried by the upper (aka green) branched surface. If # G is transversely orientable (aka edge-orientability of the veering # triangulation) then G \cap F gives the desired section, and e(UTF) = # 0. Note that G \cap F gives, for every point, a pair of points in # the unit tangent circle. So let PUTF be the projective unit tangent # bundle to F. This definitely has a section, so e(PUTF) = 0. Now, # the bundle UTF is a double cover of the bundle PUTF. # Claim: The euler class is multiplicative with respect to covers (in # both senses). # With the claim in hand, we have # 2 * e(UTF) = e(PUTF) = 0 # We deduce that e(UTF) is either zero or two-torsion. # 9. Calling code @liberal def order_of_euler_class_wrapper(tri, angle): """ Returns the order of the euler class. """ return order_of_euler_class(coboundary(tri, angle), euler_cocycle(tri, angle)) def compute_order_of_euler_classes(file_in, number=None, file_out=None): data_in = parse_data_file(file_in) data_in = [line.split(" ") for line in data_in] if number != None: data_in = data_in[:number] data_out = [] evil = [] for i, line in enumerate(data_in): if i % 50 == 0: print( ((1.0*i)/(1.0*len(data_in)), len(data_out)) ) sig = line[0] tri, angle = isosig_to_tri_angle(sig) # angle = [int(letter) for letter in angle_s] curr_euler = order_of_euler_class(coboundary(tri, angle), euler_cocycle(tri, angle)) if curr_euler == "non-torsion": evil.append(sig) print(sig + " has non-torsion Euler class!!!!") elif curr_euler == 1: # order is one so [E] = 0. Boring. pass else: line_out = [sig, str(curr_euler)] line_out.extend(line[1:]) data_out.append(line_out) if file_out != None: write_data_file(data_out, file_out) print( ("list of evil:", evil) ) return data_out
35.279843
109
0.625194
2,873
18,028
3.859729
0.194222
0.005681
0.008206
0.006944
0.147534
0.103165
0.069438
0.058797
0.054468
0.050501
0
0.012892
0.27718
18,028
510
110
35.34902
0.838078
0.684713
0
0.258929
0
0
0.022256
0.004338
0
0
0
0
0.026786
1
0.053571
false
0.008929
0.0625
0.008929
0.178571
0.026786
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f442619ffa1142c65bd44ce29ca3a9c6c0e0aea7
5,153
py
Python
preprocess/utils/liftOver_vcf.py
Rongtingting/xcltk
2e86207c45a1caa7f905a89e1c121c3c203eab2d
[ "Apache-2.0" ]
null
null
null
preprocess/utils/liftOver_vcf.py
Rongtingting/xcltk
2e86207c45a1caa7f905a89e1c121c3c203eab2d
[ "Apache-2.0" ]
null
null
null
preprocess/utils/liftOver_vcf.py
Rongtingting/xcltk
2e86207c45a1caa7f905a89e1c121c3c203eab2d
[ "Apache-2.0" ]
2
2021-01-26T02:07:32.000Z
2021-02-03T03:56:55.000Z
# forked from https://github.com/single-cell-genetics/cellSNP ## A python wrap of UCSC liftOver function for vcf file ## UCSC liftOver binary and hg19 to hg38 chain file: ## https://genome.ucsc.edu/cgi-bin/hgLiftOver ## http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/liftOver ## http://hgdownload.soe.ucsc.edu/goldenPath/hg19/liftOver/hg19ToHg38.over.chain.gz import sys import gzip import subprocess from optparse import OptionParser LIFTOVER_INFO = '##INFO=<ID=OLD,Number=1,Type=Integer,' LIFTOVER_INFO += 'Description="position before liftover">\n' def vcf_to_bed(vcf_file, out_file, chr_in=True): if vcf_file[-3:] == ".gz": is_gzip = True fid_in = gzip.open(vcf_file, "r") else: is_gzip = False fid_in = open(vcf_file, "r") fid_out = open(out_file, "w") for line in fid_in: if is_gzip: line = line.decode('utf-8') if line.startswith("#") == False: line_val = line.rstrip().split("\t")[:8] if chr_in and line_val[0].startswith("chr") == False: line_val[0] = "chr" + line_val[0] line_val[2] = str(int(line_val[1]) + 1) fid_out.writelines("\t".join(line_val[:3]) + "\n") fid_in.close() fid_out.close() return None def update_vcf(vcf_file, bed_new, bed_unmap, out_file): ## unmapped lines unmap_pos = [] _fid = open(bed_unmap, "r") for line in _fid: if not line.startswith("#"): _pos_id = "_".join(line.rstrip().split("\t")[:2]) unmap_pos.append(_pos_id) _fid.close() if vcf_file[-3:] == ".gz": is_gzip = True fid_in = gzip.open(vcf_file, "r") else: is_gzip = False fid_in = open(vcf_file, "r") cnt1 = 0 idx_unmap = 0 fid_bed = open(bed_new, "r") fid_out = open(out_file, "w") for line in fid_in: if is_gzip: line = line.decode('utf-8') if line.startswith("#"): if line.startswith("#CHROM"): fid_out.writelines(LIFTOVER_INFO) fid_out.writelines(line) else: line_val = line.rstrip().split("\t") if idx_unmap < len(unmap_pos): _pos_id = "_".join(line_val[:2]) if line_val[0].startswith("chr") == False: _pos_id = "chr" + _pos_id if _pos_id == unmap_pos[idx_unmap]: idx_unmap += 1 continue cnt1 += 1 bed_line = fid_bed.readline() line_val[7] = "OLD=" + line_val[1] + ";" + line_val[7] line_val[1] = bed_line.rstrip().split("\t")[1] fid_out.writelines("\t".join(line_val) + "\n") print(cnt1, idx_unmap) fid_in.close() fid_bed.close() fid_out.close() return None def main(): import warnings warnings.filterwarnings('error') # parse command line options parser = OptionParser() parser.add_option("--chainFile", "-c", dest="chain_file", default=None, help=("Chain file, full path.")) parser.add_option("--inFile", "-i", dest="in_file", default=None, help=("Input vcf file, full path.")) parser.add_option("--outFile", "-o", dest="out_file", default=None, help=("Output VCF file, full path.")) parser.add_option("--liftOverPath", "-P", dest="liftOver_path", default=None, help=("liftOver_path if it is not in PATH variable.")) (options, args) = parser.parse_args() if len(sys.argv[1:]) == 0: print("liftOver-vcf: a wrap of UCSC liftOver for VCF file.\n") print("use -h or --help for help on argument.") sys.exit(1) in_file = options.in_file bed_file = options.in_file.split(".vcf")[0] + ".bed" new_bed_file = options.out_file.split(".vcf")[0] + ".bed" unmap_bed_file = options.out_file.split(".vcf")[0] + ".unmap.bed" ## generate bed file print("converting vcf to bed file ... ") vcf_to_bed(in_file, bed_file) ## UCSC liftOver on bed file chain_file = options.chain_file if options.liftOver_path is None: liftOver = "liftOver" else: # check if path exists liftOver = options.liftOver_path print("liftOver bed file ... ") bashCommand = "%s %s %s %s %s" %(liftOver, bed_file, chain_file, new_bed_file, unmap_bed_file) #print(bashCommand) pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE) pro.communicate()[0] ## update vcf file out_file = options.out_file if out_file[-3:] == ".gz": out_file = out_file[:-3] print("updating vcf file ... ") update_vcf(in_file, new_bed_file, unmap_bed_file, out_file) print("gzip vcf file ... ") import shutil if shutil.which("bgzip") is not None: bashCommand = "bgzip -f %s" %(out_file) else: bashCommand = "gzip -f %s" %(out_file) pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE) pro.communicate()[0] return None if __name__ == "__main__": main()
33.901316
83
0.581603
702
5,153
4.05698
0.229345
0.036868
0.015449
0.016854
0.305126
0.298455
0.254565
0.194874
0.153441
0.153441
0
0.013856
0.271686
5,153
151
84
34.125828
0.745004
0.09509
0
0.285714
0
0
0.135991
0.0125
0
0
0
0
0
1
0.02521
false
0
0.05042
0
0.10084
0.058824
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f444f9703d175494884baaba0472ab27a4d9a8a1
75,692
py
Python
sapmon/payload/provider/sapnetweaver.py
gummadirajesh/AzureMonitorForSAPSolutions
9f8e9dbd38141b5de4782d40556c4368f6ad8d0b
[ "MIT" ]
null
null
null
sapmon/payload/provider/sapnetweaver.py
gummadirajesh/AzureMonitorForSAPSolutions
9f8e9dbd38141b5de4782d40556c4368f6ad8d0b
[ "MIT" ]
null
null
null
sapmon/payload/provider/sapnetweaver.py
gummadirajesh/AzureMonitorForSAPSolutions
9f8e9dbd38141b5de4782d40556c4368f6ad8d0b
[ "MIT" ]
null
null
null
# Python modules import json import logging from datetime import datetime, timedelta, timezone from time import time from typing import Any, Callable import re import requests from requests import Session from threading import Lock # SOAP Client modules from zeep import Client from zeep import helpers from zeep.transports import Transport from zeep.exceptions import Fault # Payload modules from const import * from helper.azure import AzureStorageAccount from helper.context import * from helper.tools import * from provider.base import ProviderInstance, ProviderCheck from netweaver.metricclientfactory import NetWeaverMetricClient, MetricClientFactory from netweaver.rfcsdkinstaller import PATH_RFC_SDK_INSTALL, SapRfcSdkInstaller from typing import Dict # Suppress SSLError warning due to missing SAP server certificate import urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) # wait time in between attempts to re-download and install RFC SDK package if we have a download blob # URL defined and previous install attempt was not successful MINIMUM_RFC_INSTALL_RETRY_INTERVAL = timedelta(minutes=30) # timeout to use for all SOAP WSDL fetch and other API calls SOAP_API_TIMEOUT_SECS = 5 # soap client cache expiration, after which amount of time both successful + failed soap client instantiation attempts will be refreshed SOAP_CLIENT_CACHE_EXPIRATIION = timedelta(minutes=10) class sapNetweaverProviderInstance(ProviderInstance): # static / class variables to enforce singleton behavior around rfc sdk installation attempts across all # instances of SAP Netweaver provider _isRfcInstalled = None _rfcInstallerLock = Lock() def __init__(self, tracer: logging.Logger, ctx: Context, providerInstance: Dict[str, str], skipContent: bool = False, **kwargs) -> None: self.sapSid = None self.sapHostName = None self.sapInstanceNr = None self.sapSubdomain = None # RFC SDK call settings self.sapUsername = None self.sapPassword = None self.sapClientId = None self.sapRfcSdkBlobUrl = None self.sapLogonGroup = None # provider instance flag for whether RFC calls should be enabled for this specific Netweaver provider instance self._areRfcCallsEnabled = None # cache WSDL SOAP clients so we can re-use them across checks for the same provider and cut down off-box calls self._soapClientCache = {} # the RFC SDK does not allow client to specify a timeout and in fact appears to have a connection timeout of 60 secs. # In cases where RFC calls timeout due to some misconfiguration, multiple retries can lead to metric gaps of several minutes. # We are limiting retries here because it is extremely rare for SOAP or RFC call to fail on first attempt and succeed on retry, # as most of these failures are due to persistent issues. Better to not waste limited time budget. retrySettings = { "retries": 1, "delayInSeconds": 1, "backoffMultiplier": 2 } super().__init__(tracer, ctx, providerInstance, retrySettings, skipContent, **kwargs) """ parse provider properties and get sid, host name and instance number """ def parseProperties(self) -> bool: self.sapSid = self.metadata.get("sapSid", "") if not self.sapSid: self.tracer.error("%s sapSid cannot be empty", self.fullName) return False # provider level common logging prefix self.logTag = "[%s][%s]" % (self.fullName, self.sapSid) self.sapHostName = self.providerProperties.get("sapHostName", None) if not self.sapHostName: self.tracer.error("%s sapHostName cannot be empty", self.logTag) return False instanceNr = self.providerProperties.get("sapInstanceNr", None) if instanceNr is None: # 0 is an acceptable value for Instance Number self.tracer.error("%s sapInstanceNr cannot be empty", self.logTag) return False if not type(instanceNr) is int or instanceNr < 0 or instanceNr > 98: self.tracer.error("%s sapInstanceNr can only be between 00 and 98 but %s was passed", self.logTag, str(instanceNr)) return False self.sapInstanceNr = str(instanceNr).zfill(2) self.sapSubdomain = self.providerProperties.get("sapSubdomain", "") self.sapUsername = self.providerProperties.get('sapUsername', None) self.sapPassword = self.providerProperties.get('sapPassword', None) self.sapClientId = self.providerProperties.get('sapClientId', None) self.sapLogonGroup = self.providerProperties.get('sapLogonGroup',None) self.sapRfcSdkBlobUrl = self.providerProperties.get('sapRfcSdkBlobUrl', None) # if user did not specify password directly via UI, check to see if they instead # provided link to Key Vault secret if not self.sapPassword: sapPasswordKeyVaultUrl = self.providerProperties.get("sapPasswordKeyVaultUrl", None) if sapPasswordKeyVaultUrl: self.tracer.info("%s sapPassword key vault URL specified, attempting to fetch from %s", self.logTag, sapPasswordKeyVaultUrl) try: keyVaultUrlPatternMatch = re.match(REGEX_EXTERNAL_KEYVAULT_URL, sapPasswordKeyVaultUrl, re.IGNORECASE) keyVaultName = keyVaultUrlPatternMatch.group(1) secretName = keyVaultUrlPatternMatch.group(2) except Exception as e: self.tracer.error("%s invalid sapPassword Key Vault secret url format: %s", self.logTag, sapPasswordKeyVaultUrl) return False try: kv = AzureKeyVault(self.tracer, keyVaultName, self.ctx.msiClientId) self.sapPassword = kv.getSecret(secretName, None).value if not self.sapPassword: raise Exception("failed to read sapPassword secret") except Exception as e: self.tracer.error("%s error fetching sapPassword secret from keyVault url: %s, %s", self.logTag, sapPasswordKeyVaultUrl, e) return False return True def _getHttpPortFromInstanceNr(self, instanceNr: str) -> str: return '5%s13' % instanceNr # As per SAP documentation, default http port is of the form 5<NR>13 def _getHttpsPortFromInstanceNr(self, instanceNr: str) -> str: return '5%s14' % instanceNr # As per SAP documentation, default https port is of the form 5<NR>14 def getMessageServerPortFromInstanceNr(self, instanceNr: str) -> str: return '81%s' % instanceNr # As per SAP documentation, default http port is of the form 81<NR> def getFullyQualifiedDomainName(self, hostname: str) -> str: if self.sapSubdomain: return hostname + "." + self.sapSubdomain else: return hostname """ will first attempt to create SOAP client for hostname using the HTTPS port derived from the SAP instance number, and if that does not succeed will then try to create client using the derived HTTP port (if neither hostname or instance are specified, will default to the primary hostname/instance that the provider was initialized with from properties) """ def getDefaultClient(self, hostname: str = None, instance: str = None) -> Client: if not hostname: hostname = self.sapHostName if not instance: instance = self.sapInstanceNr httpsPort = self._getHttpsPortFromInstanceNr(instance) httpPort = self._getHttpPortFromInstanceNr(instance) portList = [(httpsPort,"https"),(httpPort,"http")] exceptionDetails = None startTime = time() for port,protocol in portList: startTime = time() self.tracer.info("%s attempting to fetch default client for hostname=%s on %s port %s", self.logTag, hostname, protocol, port) try: client = self.getClient(hostname, httpProtocol=protocol, port=port) return client except Exception as e: exceptionDetails = e self.tracer.info("%s error fetching default client hostname=%s on %s port %s: %s [%d ms]", self.logTag, self.sapHostName, protocol, port, e, TimeUtils.getElapsedMilliseconds(startTime)) self.tracer.error("[%s] error fetching default client hostname=%s on port %s : %s [%d ms]", self.logTag, self.sapHostName, portList, exceptionDetails, TimeUtils.getElapsedMilliseconds(startTime), exc_info=True) raise exceptionDetails """ attempt to create a SOAP client for the specified hostname using specific protocol and port (for when we already have a known hostconfig for this hostname, and already know whether HTTPS or HTTP should be used) Store successful clients in cache so we don't make unnecessary WSDL fetchs for future API calls to the same instance """ def getClient(self, hostname: str, httpProtocol: str, port: str, useCache: bool = True) -> Client: if not hostname or not httpProtocol or not port: raise Exception("%s cannot create client with empty httpProtocol, hostname or port (%s:%s:%s)" % \ (self.logTag, httpProtocol, hostname, port)) if httpProtocol != "http" and httpProtocol != "https": raise Exception("%s httpProtocol %s is not valid for hostname: %s, port: %s" % \ (self.logTag, httpProtocol, hostname, port)) hostname = self.getFullyQualifiedDomainName(hostname) url = '%s://%s:%s/?wsdl' % (httpProtocol, hostname, port) if (useCache and url in self._soapClientCache): cacheEntry = self._soapClientCache[url] # respect cache expiration; if cache is expired allow client to be refreshed below if (cacheEntry['expirationDateTime'] > datetime.utcnow()): if (cacheEntry['client']): # self.tracer.info("%s using cached SOAP client for wsdl: %s", self.logTag, url) return cacheEntry['client'] else: # previously cached soap client attempt was failure raise Exception("%s cached SOAP client failure for wsdl: %s" % (self.logTag, url)) self.tracer.info("%s connecting to wsdl url: %s", self.logTag, url) startTime = time() client = None try: session = Session() session.verify = False client = Client(url, transport=Transport(session=session, timeout=SOAP_API_TIMEOUT_SECS, operation_timeout=SOAP_API_TIMEOUT_SECS)) self.tracer.info("%s initialized SOAP client url: %s [%d ms]", self.logTag, url, TimeUtils.getElapsedMilliseconds(startTime)) return client except Exception as e: self.tracer.error("%s error fetching wsdl url: %s: %s [%d ms]", self.logTag, url, e, TimeUtils.getElapsedMilliseconds(startTime), exc_info=True) raise e finally: # cache successsful and failed soap client attempts to reduce future API calls self._soapClientCache[url] = { 'client': client, 'expirationDateTime': datetime.utcnow() + SOAP_CLIENT_CACHE_EXPIRATIION } def callSoapApi(self, client: Client, apiName: str) -> str: self.tracer.info("%s executing SOAP API: %s for wsdl: %s", self.logTag, apiName, client.wsdl.location) startTime = time() try: method = getattr(client.service, apiName) result = method() self.tracer.info("%s successful SOAP API: %s for wsdl: %s [%d ms]", self.logTag, apiName, client.wsdl.location, TimeUtils.getElapsedMilliseconds(startTime)) return result except Exception as e: self.tracer.error("%s error while calling SOAP API: %s for wsdl: %s: %s [%d ms]", self.logTag, apiName, client.wsdl.location, e, TimeUtils.getElapsedMilliseconds(startTime), exc_info=True) raise e """ return a netweaver RFC client initialized with "MESSAGESERVER" instance we find for this SID. """ def getRfcClient(self, logTag: str) -> NetWeaverMetricClient: # RFC connections against application server instances can be made through 'MESSAGESERVER' instances dispatcherInstance = self.getMessageServerInstance() return MetricClientFactory.getMetricClient(tracer=self.tracer, logTag=logTag, sapHostName=dispatcherInstance['hostname'], sapSysNr=str(dispatcherInstance['instanceNr']), sapSubdomain=self.sapSubdomain, sapSid=self.sapSid, sapClient=str(self.sapClientId), sapLogonGroup = self.sapLogonGroup, sapUsername=self.sapUsername, sapPassword=self.sapPassword) def validate(self) -> bool: logTag = "[%s][%s][validation]" % (self.fullName, self.sapSid) # HACK: Load content json to fetch the list of APIs in the checks self.initContent() try: self._validateSoapClient() except Exception as e: self.tracer.error("%s SOAP API validation failure: %s", logTag, e, exc_info=True) return False try: self._validateRfcClient() except Exception as e: self.tracer.error("%s RFC client validation failure: %s", logTag, e, exc_info=True) return False return True """ iterate through all SOAP API calls and attempt to validate that SOAP API client can be instantiated and expected APIs are callable """ def _validateSoapClient(self) -> None: ### # TODO: this entire function needs to be rethought to me more precise in terms of which instances # are called for which APIs, as some APIs will not work for some function types. ### logTag = "[%s][%s][validation]" % (self.fullName, self.sapSid) # hard-coded list of checks that correspond to SOAP API calls to validate soapApiChecks = ['GetSystemInstanceList', 'GetProcessList', 'ABAPGetWPTable', 'GetQueueStatistic', 'EnqGetStatistic'] self.tracer.info("%s connecting to sap to validate SOAP API connectivity", logTag) try: client = self.getDefaultClient(hostname=self.sapHostName, instance=self.sapInstanceNr) except Exception as e: self.tracer.error("%s error occured while initializing SOAP client to SAP server: %s|%s, %s", logTag, self.sapHostName, self.sapInstanceNr, e, exc_info=True) raise # Ensure that all APIs in the checks are valid and are marked as unprotected. # Some APIs are compatible with only specific instance types and throw a Fault if run against # an incompatible one. # However, here we suppress all errors except Unauthorized since the Monitor phase takes # care of calling the API against the right instance type. As long as we don't get an # Unauthorized error, we know we can safely call them during the Monitor phase. isValid = True for check in self.checks: apiName = check.name if (apiName not in soapApiChecks): # this is not a SOAP API check continue method = getattr(client.service, apiName, None) # Returning None when API not found if method is None: self.tracer.error("%s SOAP client failure: api %s does not exist for %s", logTag, apiName, client.wsdl.location) isValid = False else: try: self.callSoapApi(client, apiName) self.tracer.info("%s validated SOAP api %s for %s", logTag, apiName, client.wsdl.location) except Fault as e: if (e.code == "SOAP-ENV:Client" and e.message == "HTTP Error: 'Unauthorized'"): isValid = False self.tracer.error("%s SOAP api %s is protected for %s, %s ", logTag, apiName, client.wsdl.location, e, exc_info=True) else: self.tracer.error("%s suppressing error during validation of SOAP api %s for %s, %s", logTag, apiName, client.wsdl.location, e, exc_info=True) except Exception as e: self.tracer.error("%s suppressing error during validation of SOAP api %s for %s, %s ", logTag, apiName, client.wsdl.location, e, exc_info=True) if (not isValid): raise Exception("%s one or more SOAP APIs failed validation" % (logTag)) """ if customer provided RFC SDK configuration, then validate that all required properties are specified and validate we can establish RFC client connections to APIs we need to call """ def _validateRfcClient(self) -> None: logTag = "[%s][%s][validation]" % (self.fullName, self.sapSid) # are any RFC SDK config properties populated? if (not self.sapUsername or not self.sapPassword or not self.sapClientId or not self.sapRfcSdkBlobUrl): # customer has not chosen to enable RFC SDK, nothing to validate return # are ALL RFC SDK config properties populated? if (not self.sapUsername and not self.sapPassword and not self.sapClientId and not self.sapRfcSdkBlobUrl): # customer specified only partial set of config properties needed to enable RFC, so fail validation raise Exception("must specify all properties to enable RFC metric collection: Username, Password, ClientId, and RfcSdkBlobUrl") if (not self.areRfcMetricsEnabled()): raise Exception("RFC SDK failed to install and is not usable") # initialize a client for the first healthy ABAP/Dispatcher instance we find client = self.getRfcClient(logTag=logTag) # update logging prefix with the specific instance details of the client sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr) # get metric query window to lookback 10 minutes to see if any results are available. If not that probably # indicates customer has not enabled SMON on their SAP system self.tracer.info("%s attempting to fetch server timestamp from %s", logTag, sapHostnameStr) (startTime, endTime) = client.getQueryWindow(lastRunServerTime=None, minimumRunIntervalSecs=600, logTag=logTag) self.tracer.info("%s attempting to fetch SMON metrics from %s", logTag, sapHostnameStr) result = client.getSmonMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag) self.tracer.info("%s successfully queried SMON metrics from %s", logTag, sapHostnameStr) self.tracer.info("%s attempting to fetch SWNC workload metrics from %s", logTag, sapHostnameStr) result = client.getSwncWorkloadMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag) self.tracer.info("%s successfully queried SWNC workload metrics from %s", logTag, sapHostnameStr) self.tracer.info("%s attempting to fetch Short Dump metrics from %s", logTag, sapHostnameStr) result = client.getShortDumpsMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag) self.tracer.info("%s successfully queried Short Dump metrics from %s", logTag, sapHostnameStr) self.tracer.info("%s attempting to fetch Sys Log metrics from %s", logTag, sapHostnameStr) result = client.getSysLogMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag) self.tracer.info("%s successfully queried Sys Log metrics from %s", logTag, sapHostnameStr) self.tracer.info("%s attempting to fetch Failed Updates metrics from %s", logTag, sapHostnameStr) result = client.getFailedUpdatesMetrics(logTag=logTag) self.tracer.info("%s successfully queried Failed Updates metrics from %s", logTag, sapHostnameStr) self.tracer.info("%s attempting to fetch Batch Job metrics from %s", logTag, sapHostnameStr) result = client.getBatchJobMetrics(startDateTime=startTime, endDateTime=endTime, logTag=logTag) self.tracer.info("%s successfully queried Batch Job metrics from %s", logTag, sapHostnameStr) self.tracer.info("%s attempting to fetch inbound queue metrics from %s", logTag, sapHostnameStr) result = client.getInboundQueuesMetrics(logTag=logTag) self.tracer.info("%s successfully queried inbound queue metrics from %s", logTag, sapHostnameStr) self.tracer.info("%s attempting to fetch outbound queue metrics from %s", logTag, sapHostnameStr) result = client.getOutboundQueuesMetrics(logTag=logTag) self.tracer.info("%s successfully queried outbound queue metrics from %s", logTag, sapHostnameStr) self.tracer.info("%s attempting to fetch lock entries metrics from %s", logTag, sapHostnameStr) result = client.getEnqueueReadMetrics(logTag=logTag) self.tracer.info("%s successfully queried lock entries metrics from %s", logTag, sapHostnameStr) self.tracer.info("%s successfully validated all known RFC SDK calls", logTag) """ query SAP SOAP API to return list of all instances in the SID, but if caller specifies that cached results are okay and we have cached instance list with the provider instance, then just return the cached results """ def getInstances(self, filterFeatures: list = None , filterType: str = None, useCache: bool = True) -> list: # Use cached list of instances if available since they should not change within a single monitor run; # but if cache is not available or if caller explicitly asks to skip cache then make the SOAP call if ('hostConfig' in self.state and useCache): # self.tracer.debug("%s using cached list of system instances", self.logTag) return self.filterInstancesByFeature(self.state['hostConfig'], filterFeatures=filterFeatures, filterType=filterType) self.tracer.info("%s getting list of system instances", self.logTag) startTime = time() instanceList = [] hosts = self._getHosts() # Use last known hosts to fetch the updated list of hosts # Walk through the known hostnames and stop whenever any of them returns the list of all instances isSuccess = False for host in hosts: hostname, instanceNum, httpProtocol, port = host[0], host[1], host[2], host[3] try: apiName = 'GetSystemInstanceList' # if we have a cached host config with already defined protocol and port, then we can initialize # client directly from that, otherwise we have to instantiate client using ports derived from the instance number # which will try the derived HTTPS port first and then fallback to derived HTTP port if (not httpProtocol or not port): client = self.getDefaultClient(hostname=hostname, instance=instanceNum) else: client = self.getClient(hostname, httpProtocol, port) result = self.callSoapApi(client, apiName) instanceList = self._parseResults(result) # cache latest results in provider state self.state['hostConfig'] = instanceList isSuccess = True break except Exception as e: self.tracer.error("%s could not connect to SAP with hostname: %s and port: %s", self.logTag, hostname, port, exc_info=True) if not isSuccess: raise Exception("%s could not connect to any SAP instances with hosts %s [%d ms]" % \ (self.logTag, hosts, TimeUtils.getElapsedMilliseconds(startTime))) self.tracer.info("%s finished getting all system instances [%d ms]", self.logTag, TimeUtils.getElapsedMilliseconds(startTime)) return self.filterInstancesByFeature(instanceList, filterFeatures=filterFeatures, filterType=filterType) """ fetch cached instance list for this provider and filter down to the list 'ABAP' feature functions that are healthy (ie. have dispstatus attribute of 'SAPControl-GREEN'). Just return first in the list. """ def getActiveDispatcherInstance(self): # Use cached list of instances if available since they don't change that frequently, # and filter down to only healthy dispatcher instances since RFC direct application server connection # only works against dispatchera dispatcherInstances = self.getInstances(filterFeatures=['ABAP'], filterType='include', useCache=True) healthyInstances = [instance for instance in dispatcherInstances if 'GREEN' in instance['dispstatus']] if (len(healthyInstances) == 0): raise Exception("No healthy ABAP/dispatcher instance found for %s" % self.sapSid) # return first healthy instance in list return healthyInstances[0] """ fetch cached instance list for this provider and filter down to the list 'MESSAGESERVER' feature functions return the available message server """ def getMessageServerInstance(self): # Use cached list of instances if available since they don't change that frequently, # and filter down to only healthy dispatcher instances since RFC direct application server connection # only works against dispatchera dispatcherInstances = self.getInstances(filterFeatures=['MESSAGESERVER'], filterType='include', useCache=True) if (len(dispatcherInstances) == 0): raise Exception("No MESSAGESERVER instance found for %s" % self.sapSid) # return first healthy instance in list return dispatcherInstances[0] """ given a list of sap instances and a set of instance features (ie. functions) to include or exclude, apply filtering logic and return only those instances that match the filter conditions: 'include' filter type will include any instance that matches any of the feature filters 'exclude' filter type will exclude any instance that matches any of the feature filters """ def filterInstancesByFeature(self, sapInstances: list, filterFeatures: list = None, filterType: str = None) -> list: if (not filterFeatures or len(filterFeatures) == 0 or not sapInstances): return sapInstances self.tracer.info("%s filtering list of system instances based on features: %s", self.logTag, filterFeatures) instances = [(instance, instance['features'].split('|')) for instance in sapInstances] if filterType == "include": # Inclusion filter # Only include instances that match at least one of the filter features filtered_instances = [instance for (instance, instance_features) in instances \ if not set(filterFeatures).isdisjoint(set(instance_features))] elif filterType == "exclude": # Exclusion filter # Only include instance that match none of the filter features filtered_instances = [instance for (instance, instance_features) in instances \ if set(filterFeatures).isdisjoint(set(instance_features))] else: raise Exception("%s filterType '%s' is not supported filter type" % (self.logTag, filterType)) return filtered_instances """ helper method to deserialize result and return as list of dictionary objects """ def _parseResults(self, results: list) -> list: return helpers.serialize_object(results, dict) """ private method to return default provider hostname config (what customer provided at time netweaver provided was added) or a fully fleshed out list of <hostname / instance # / https:Port> tuples based on a previous cached call to getInstances() """ def _getHosts(self) -> list: # Fetch last known list from storage. If storage does not have list, use provided # hostname and instanceNr if 'hostConfig' not in self.state: self.tracer.info("%s no host config persisted yet, using user-provided host name and instance nr", self.logTag) hosts = [(self.sapHostName, self.sapInstanceNr, None, None)] else: self.tracer.info("%s fetching last known host config", self.logTag) currentHostConfig = self.state['hostConfig'] hosts = [(hostConfig['hostname'], hostConfig['instanceNr'], "https" if (hostConfig['httpsPort'] and hostConfig['httpsPort'] != "0") else "http", hostConfig['httpsPort'] if (hostConfig['httpsPort'] and hostConfig['httpsPort'] != "0") else hostConfig['httpPort']) for hostConfig in currentHostConfig] return hosts """ returns flag to indicate whether provider checks should attempt to use RFC SDK client calls to fetch certain metrics. First time may perform fairly expensive checks to validate if RFC SDK is installed anc configured, and may attempt to download user provided blob to install to local system. We only want to attempt this at most once per process, so first caller to this function will pay that cost and the resulting success/failure flag will be cached. """ def areRfcMetricsEnabled(self) -> bool: if self._areRfcCallsEnabled != None: # the flag for whether RFC is usable has already been initialzed, so return return self._areRfcCallsEnabled # there may be 1..N sapNetWeaverProviderInstance instances per sapmon process, and each instance # may choose to enable/disable RFC calls individually, but we should only attempt to install the # RFC SDK at most once per process. Use a static/class variable to determine if installation # attempt has already been attempted and was success/failure, and do all this inside of # a lock and cache flag for future checks try: # class singleton lock sapNetweaverProviderInstance._rfcInstallerLock.acquire(blocking=True) # check -> lock -> check if (self._areRfcCallsEnabled != None): # flag was initialized prior to obtaining the lock return self._areRfcCallsEnabled # ensure this provider instance has necessary config settings to enable RFC SDK calls if (not self.sapUsername or not self.sapPassword or not self.sapClientId or not self.sapRfcSdkBlobUrl or not self.sapLogonGroup): self.tracer.info("%s Netweaver RFC calls disabled for because missing one or more required " + "config properties: sapUsername, sapPassword, sapClientId, sapLogonGroup and sapRfcSdkBlobUrl", self.logTag) self._areRfcCallsEnabled = False return False # only attempt to install RFC SDK once per process execution if (sapNetweaverProviderInstance._isRfcInstalled == None): sapNetweaverProviderInstance._isRfcInstalled = self._trySetupRfcSdk() self._areRfcCallsEnabled = sapNetweaverProviderInstance._isRfcInstalled return self._areRfcCallsEnabled except Exception as e: self.tracer.error("%s Exception trying to check if rfc sdk metrics are enabled, %s", self.logTag, e, exc_info=True) sapNetweaverProviderInstance._isRfcInstalled = False self._areRfcCallsEnabled = False finally: sapNetweaverProviderInstance._rfcInstallerLock.release() return False """ validate that RFC SDK package has been installed and configured correctly and is usable by pyrfc module. If pyrfc module cannot be imported, then potentially attempt to download RFC SDK blob, install to local system, and configure necessary environment variables and system settings so that the libraries can be successfully loaded by the pyrfc module. Returns flag indicating whether pyrfc module can be imnported (ie. whether RFC calls can be enabled) Pre-requisites for RFC SDK installation attempt: 1.) Customer provided config property sapRfcSdkBlobUrl must be non-empty. 2.) python module for "pynwrfc" must be installed 3.) was the last failed SDK installation attempt more than N minutes ago (defined by MINIMUM_RFC_INSTALL_RETRY_INTERVAL) 4.) does the sapRfcSdkBlobUrl provided by customer actually exist in the storage account 5.) was the last_modified timestamp on the sapRfcSdkBlobUrl blob modified since the last failed installation attempt """ def _trySetupRfcSdk(self) -> bool: try: # if no RFC SDK download blob url specified, treat as kill switch to disable any RFC calls if (not self.sapRfcSdkBlobUrl): self.tracer.info("%s No user provided RFC SDK blob url, will not leverage RFC SDK. quitting...", self.logTag) return False installer = SapRfcSdkInstaller(tracer=self.tracer, installPath=PATH_RFC_SDK_INSTALL) # environment variables must be initialized before RFC and pyrfc installation can be validated self.tracer.info("%s initializing RFC SDK environment...", self.logTag) if (not installer.initRfcSdkEnvironment()): self.tracer.error("%s failed to initialize rfc sdk environment pre-requisites", self.logTag) return False # if we are able to successfully import the pyrfc connector module, that means RFC SDK # libraries must be installed and were able to be found by pyrfc package initialization, # so no need to do any further checks. if (installer.isPyrfcModuleUsable()): # pyrfc package is usable, which means RFC SDK is already installed and environment configured correctly self.tracer.info("%s Pyrfc module is usable, RFC calls will be enabled", self.logTag) return True # if pyrfc module cannot be imported, check to see if it is even installed. Assumption is that # pyrfc module is installed as part of container image, so if it is missing something is wrong # there is no need to even try to install the RFC SDK if (not installer.isPyrfcModuleInstalled()): self.tracer.error("%s Pyrfc module is not installed, RFC calls will be disabled", self.logTag) return False # check last sdk install attempt time so we can limit how often we retry # to download and install SDK on persistent failures (eg. no more than once every 30 mins) lastSdkInstallAttemptTime = installer.getLastSdkInstallAttemptTime() if (lastSdkInstallAttemptTime > (datetime.now(timezone.utc) - MINIMUM_RFC_INSTALL_RETRY_INTERVAL)): self.tracer.info("%s last RFC SDK install attempt was %s, minimum attempt retry %s, skipping...", self.logTag, lastSdkInstallAttemptTime, MINIMUM_RFC_INSTALL_RETRY_INTERVAL) return False self.tracer.info("%s RFC SDK is not installed, so attempt installation now...", self.logTag) blobStorageAccount = AzureStorageAccount(tracer=self.tracer, sapmonId=self.ctx.sapmonId, msiClientId=self.ctx.msiClientId, subscriptionId=self.ctx.vmInstance["subscriptionId"], resourceGroup=self.ctx.vmInstance["resourceGroupName"]) # first check that rfc sdk download blob exists in Azure Storage account, and if it # exixts also fetch the last_modified timestamp metadata doesPackageExist, packageLastModifiedTime = installer.isRfcSdkAvailableForDownload( blobUrl=self.sapRfcSdkBlobUrl, storageAccount=blobStorageAccount) if (not doesPackageExist): self.tracer.error("%s User provided RFC SDK blob does not exist %s, skipping...", self.logTag, self.sapRfcSdkBlobUrl) return False self.tracer.info("%s user provided RFC SDK blob exists for download %s, lastModified=%s", self.logTag, self.sapRfcSdkBlobUrl, packageLastModifiedTime) # the user provided sdk blob exists, so before we download compare the last_modified timestamp # with the last modified time of the last download attempt. If nothing has changed, # then no need to try and download the package again # TODO: confirm, should we go ahead and try to re-download previously failed packages # once every 30 minutes anyway? just in case failure was something external? lastInstallPackageModifiedTime = installer.getLastSdkInstallPackageModifiedTime() if (packageLastModifiedTime == lastInstallPackageModifiedTime): self.tracer.info("%s rfc sdk download package has not been modified since last download " + "attempt (last_modified=%s), will not download again", self.logTag, lastInstallPackageModifiedTime) return False self.tracer.info("%s user provided rfc sdk package last_modified (%s) has changed " + "since last install attempt (%s), attempting to re-download and install", self.logTag, packageLastModifiedTime, lastInstallPackageModifiedTime) # try to download user provided RFC SDK blob, install to local system and configure necessary # environment variables and system settings so that it can be usable by pyrfc module if (not installer.downloadAndInstallRfcSdk(blobUrl=self.sapRfcSdkBlobUrl, storageAccount=blobStorageAccount)): self.tracer.error("%s failed to download and install rfc sdk package, RFC calls will not be enabled...", self.logTag) return False # on Linux pyrfc module may not be usable upon first install attempt, as it appears that unpacking # libraries to the LD_LIBRARY_PATH env variable after the python process starts may not pick up the change. # The module should be usable on the next sapmon process run. if (not installer.isPyrfcModuleUsable()): self.tracer.error("%s pyrfc module still not usable after RFC SDK install (might require process restart), " + "RFC calls will not be enabled...", self.logTag) return False self.tracer.info("%s pyrfc module is usable after RFC SDK install, RFC calls will be enabled...", self.logTag) return True except Exception as e: self.tracer.error("%s exception trying to setup and validate RFC SDK, RFC calls will be disabled: %s", self.logTag, e, exc_info=True) return False ########################### class sapNetweaverProviderCheck(ProviderCheck): lastResult = [] # hard-coded set of action names that require RFC SDK to be usable # and can override runtime isEnabled() check if RFC is not usable rfcCheckNames = {'SMON_Metrics', 'SWNC_Workload_Metrics', 'SDF_Short_Dumps_Metrics', 'Sys_Log_Metrics', 'Failed_Updates_Metrics', 'Batch_Jobs_Metrics', 'Inbound_Queues_Metrics', 'Outbound_Queues_Metrics', 'Enqueue_Read_Metrics'} def __init__(self, provider: ProviderInstance, **kwargs ): super().__init__(provider, **kwargs) self.lastRunLocal = None self.lastRunServer = None # provider check common logging prefix self.logTag = "[%s][%s]" % (self.fullName, self.providerInstance.sapSid) """ return flag indicating whether this check instances requires the SAP RFC SDK to be installed and usable """ def doesCheckRequireRfcSdk(self) -> bool: return self.name in sapNetweaverProviderCheck.rfcCheckNames """ override base ProviderCheck implementation to allow RFC metric collection methods enabled in the default Provider JSON configuration yet treated as disabled at runtime if RFC SDK is not configured (to reduce log spam) """ def isEnabled(self) -> bool: if not self.state["isEnabled"]: return False # if this check requires RFC and RFC is not installed, then treat as disabled if (self.doesCheckRequireRfcSdk()): if (not self.providerInstance.areRfcMetricsEnabled()): return False return True def _getFormattedTimestamp(self) -> str: return datetime.utcnow().isoformat() def _parseResult(self, result: object) -> list: return [helpers.serialize_object(result, dict)] def _parseResults(self, results: list) -> list: return helpers.serialize_object(results, dict) def _getServerTimestamp(self) -> datetime: self.tracer.info("%s fetching current timestamp from message server", self.logTag) message_server_instances = self.providerInstance.getInstances(filterFeatures=['MESSAGESERVER'], filterType='include', useCache=True) date = datetime.fromisoformat(self._getFormattedTimestamp()) # Get timestamp from the first message server that returns a valid date for instance in message_server_instances: hostname = instance['hostname'] instanceNr = str(instance['instanceNr']).zfill(2) port = self.providerInstance.getMessageServerPortFromInstanceNr(instanceNr) hostname = self.providerInstance.getFullyQualifiedDomainName(hostname) message_server_endpoint = "http://%s:%s/" % (hostname, port) try: # We only care about the date in the response header. so we ignore the response body # 'Thu, 04 Mar 2021 05:02:12 GMT' # NOTE: we don't need to follow redirects because the redirect response itself 300-3XX # will have the 'date' header as well. In some cases we were following a chain # of redirects that would terminate in a 404, which would not have the 'date' header response = requests.get(message_server_endpoint, allow_redirects=False) if ('date' not in response.headers): raise Exception("no 'date' response header found for response status:%s/%s from:%s" % (response.status_code, response.reason, message_server_endpoint)) date = datetime.strptime(response.headers['date'], '%a, %d %b %Y %H:%M:%S %Z') self.tracer.info("%s received message server %s header: %s, parsed time: %s", self.logTag, message_server_endpoint, response.headers['date'], date) break except Exception as e: self.tracer.info("%s suppressing expected error while fetching server time during HTTP GET request to url %s: %s ", self.logTag, message_server_endpoint, e) return date def _actionGetSystemInstanceList(self) -> None: self.tracer.info("%s refreshing list of system instances", self.logTag) self.lastRunLocal = datetime.utcnow() # when performing the actual provider check action, always fetch fressh instance list snapshot and refresh the cache instanceList = self.providerInstance.getInstances(useCache=False) self.lastRunServer = self._getServerTimestamp() # Update host config, if new list is fetched # Parse dictionary and add current timestamp and SID to data and log it if len(instanceList) != 0: currentTimestamp = self._getFormattedTimestamp() for instance in instanceList: instance['timestamp'] = currentTimestamp instance['serverTimestamp'] = self.lastRunServer.isoformat() instance['SID'] = self.providerInstance.sapSid instance['subdomain'] = self.providerInstance.sapSubdomain self.lastResult = instanceList # Update internal state if not self.updateState(): raise Exception("%s failed to update state" % self.logTag) self.tracer.info("%s successfully fetched system instance list", self.logTag) def _executeWebServiceRequest(self, apiName: str, filterFeatures: list, filterType: str, parser: Callable[[Any], list] = None) -> None: self.tracer.info("[%s] executing web service request: %s" % (self.fullName, apiName)) self.lastRunLocal = datetime.utcnow() # track latency of entire method excecution with dependencies startTime = time() if parser is None: parser = self._parseResults # Use cached list of instances if available since they don't change that frequently; else fetch afresh. # filter down to just the instances we need for this SOAP API type sapInstances = self.providerInstance.getInstances(useCache=True, filterFeatures=filterFeatures, filterType=filterType) self.lastRunServer = self._getServerTimestamp() if len(sapInstances) == 0: self.tracer.info("%s no instances found that support this API: %s", self.logTag, apiName) # Call web service all_results = [] currentTimestamp = self._getFormattedTimestamp() for instance in sapInstances: # default to https unless the httpsPort was not defined, in which case fallback to http httpProtocol = "https" port = instance['httpsPort'] if ((not port) or port == "0"): # fallback to http port instead httpProtocol = "http" port = instance['httpPort'] results = [] try: client = self.providerInstance.getClient(instance['hostname'], httpProtocol, port) results = self.providerInstance.callSoapApi(client, apiName) if(apiName == "GetProcessList"): results = self._sanitizeGetProcessList(results) elif(apiName == "ABAPGetWPTable"): results = self._sanitizeABAPGetWPTable(results) except Exception as e: self.tracer.error("%s unable to call the Soap Api %s - %s://%s:%s, %s", self.logTag, apiName, httpProtocol, instance['hostname'], port, e, exc_info=True) continue if len(results) != 0: parsed_results = parser(results) for result in parsed_results: result['hostname'] = instance['hostname'] result['instanceNr'] = instance['instanceNr'] result['subdomain'] = self.providerInstance.sapSubdomain result['timestamp'] = currentTimestamp result['serverTimestamp'] = self.lastRunServer.isoformat() result['SID'] = self.providerInstance.sapSid all_results.extend(parsed_results) if len(all_results) == 0: self.tracer.info("%s no results found for: %s", self.logTag, apiName) self.lastResult = all_results # Update internal state if not self.updateState(): raise Exception("[%s] failed to update state for web service request: %s [%d ms]" % \ (self.logTag, apiName, TimeUtils.getElapsedMilliseconds(startTime))) self.tracer.info("%s successfully processed web service request: %s [%d ms]", self.logTag, apiName, TimeUtils.getElapsedMilliseconds(startTime)) def _actionExecuteGenericWebServiceRequest(self, apiName: str, filterFeatures: list, filterType: str) -> None: self._executeWebServiceRequest(apiName, filterFeatures, filterType, self._parseResults) def _actionExecuteEnqGetStatistic(self, apiName: str, filterFeatures: list, filterType: str) -> None: self._executeWebServiceRequest(apiName, filterFeatures, filterType, self._parseResult) """ Method to parse the value based on the key provided and set the values with None value to empty string '' """ def _getKeyValue(self, dictionary, key, apiName): if key not in dictionary: raise ValueError("Result received for api %s does not contain key: %s"% (apiName, key)) if(dictionary[key] == None): dictionary[key] = "" return dictionary[key] """ Method to parse the results from ABAPGetWPTable and set the strings with None value to empty string '' """ def _sanitizeABAPGetWPTable(self, records: list) -> list: apiName = "ABAPGetWPTable" processed_results = list() for record in records: processed_result = { "Action": self._getKeyValue(record, 'Action', apiName), "Client": self._getKeyValue(record, 'Client', apiName), "Cpu": self._getKeyValue(record, 'Cpu', apiName), "Err": self._getKeyValue(record, 'Err', apiName), "No": self._getKeyValue(record, 'No', apiName), "Pid": self._getKeyValue(record, 'Pid', apiName), "Program": self._getKeyValue(record, 'Program', apiName), "Reason": self._getKeyValue(record, 'Reason', apiName), "Sem": self._getKeyValue(record, 'Sem', apiName), "Start": self._getKeyValue(record, 'Start', apiName), "Status": self._getKeyValue(record, 'Status', apiName), "Table": self._getKeyValue(record, 'Table', apiName), "Time": self._getKeyValue(record, 'Time', apiName), "Typ": self._getKeyValue(record, 'Typ', apiName), "User": self._getKeyValue(record, 'User', apiName) } processed_results.append(processed_result) return processed_results """ Method to parse the results from GetProcessList and set the strings with None value to empty string '' """ def _sanitizeGetProcessList(self, records: list) -> list: apiName = "GetProcessList" processed_results = list() for record in records: processed_result = { "description": self._getKeyValue(record, 'description', apiName), "dispstatus": self._getKeyValue(record, 'dispstatus', apiName), "elapsedtime": self._getKeyValue(record, 'elapsedtime', apiName), "name": self._getKeyValue(record, 'name', apiName), "pid": self._getKeyValue(record, 'pid', apiName), "starttime": self._getKeyValue(record, 'starttime', apiName), "textstatus": self._getKeyValue(record, 'textstatus', apiName) } processed_results.append(processed_result) return processed_results """ netweaver provider check action to query for SDF/SMON Analysis Run metrics """ def _actionGetSmonAnalysisMetrics(self) -> None: # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure self.lastResult = [] try: # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host sapHostnameStr = self.providerInstance.sapSid if (not self.providerInstance.areRfcMetricsEnabled()): self.tracer.info("%s Skipping SMON metrics because RFC SDK metrics not enabled...", self.logTag) return # track latency of entire method excecution with dependencies latencyStartTime = time() # initialize a client for the first healthy MessageServer instance we find client = self.providerInstance.getRfcClient(logTag=self.logTag) # update logging prefix with the specific instance details of the client sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr) # get metric query window based on our last successful query where results were returned (startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer, minimumRunIntervalSecs=self.frequencySecs, logTag=self.logTag) self.lastResult = client.getSmonMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag) self.tracer.info("%s successfully queried SMON metrics for %s [%d ms]", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime)) self.lastRunLocal = datetime.now(timezone.utc) self.lastRunServer = endTime # only update state on successful query attempt self.updateState() except Exception as e: self.tracer.error("%s exception trying to fetch SMON Analysis Run metrics for %s [%d ms], error: %s", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime), e, exc_info=True) raise """ netweaver provider check action to query for SWNC workload statistics and decorate with ST03 metric calculations """ def _actionGetSwncWorkloadMetrics(self) -> None: # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure self.lastResult = [] try: # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host sapHostnameStr = self.providerInstance.sapSid if (not self.providerInstance.areRfcMetricsEnabled()): self.tracer.info("%s Skipping SWNC metrics because RFC SDK metrics not enabled...", self.logTag) return # track latency of entire method excecution with dependencies latencyStartTime = time() # initialize a client for the first healthy MessageServer instance we find client = self.providerInstance.getRfcClient(logTag=self.logTag) # update logging prefix with the specific instance details of the client sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr) # get metric query window based on our last successful query where results were returned (startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer, minimumRunIntervalSecs=self.frequencySecs, logTag=self.logTag) self.lastResult = client.getSwncWorkloadMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag) self.tracer.info("%s successfully queried SWNC workload metrics for %s [%d ms]", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime)) self.lastRunLocal = datetime.now(timezone.utc) self.lastRunServer = endTime # only update state on successful query attempt self.updateState() except Exception as e: self.tracer.error("%s exception trying to fetch SWNC workload metrics for %s [%d ms], error: %s", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime), e, exc_info=True) raise """ netweaver provider check action to query for short dumps """ def _actionGetShortDumpsMetrics(self) -> None: # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure self.lastResult = [] try: # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host sapHostnameStr = self.providerInstance.sapSid if (not self.providerInstance.areRfcMetricsEnabled()): self.tracer.info("%s Skipping short dumps metrics because RFC SDK metrics not enabled...", self.logTag) return # track latency of entire method excecution with dependencies latencyStartTime = time() # initialize a client for the first healthy MessageServer instance we find client = self.providerInstance.getRfcClient(logTag=self.logTag) # update logging prefix with the specific instance details of the client sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr) # get metric query window based on our last successful query where results were returned (startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer, minimumRunIntervalSecs=self.frequencySecs, logTag=self.logTag) self.lastResult = client.getShortDumpsMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag) self.tracer.info("%s successfully queried short dumps metrics for %s [%d ms]", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime)) self.lastRunLocal = datetime.now(timezone.utc) self.lastRunServer = endTime # only update state on successful query attempt self.updateState() except Exception as e: self.tracer.error("%s exception trying to fetch short dumps metrics for %s [%d ms], error: %s", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime), e, exc_info=True) raise """ netweaver provider check action to query for sys logs """ def _actionGetSysLogMetrics(self) -> None: # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure self.lastResult = [] try: # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host sapHostnameStr = self.providerInstance.sapSid if (not self.providerInstance.areRfcMetricsEnabled()): self.tracer.info("%s Skipping sys logs metrics because RFC SDK metrics not enabled...", self.logTag) return # track latency of entire method excecution with dependencies latencyStartTime = time() # initialize a client for the first healthy MessageServer instance we find client = self.providerInstance.getRfcClient(logTag=self.logTag) # update logging prefix with the specific instance details of the client sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr) # get metric query window based on our last successful query where results were returned (startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer, minimumRunIntervalSecs=self.frequencySecs, logTag=self.logTag) self.lastResult = client.getSysLogMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag) self.tracer.info("%s successfully queried sys log metrics for %s [%d ms]", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime)) self.lastRunLocal = datetime.now(timezone.utc) self.lastRunServer = endTime # only update state on successful query attempt self.updateState() except Exception as e: self.tracer.error("%s exception trying to fetch sys logs metrics for %s [%d ms], error: %s", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime), e, exc_info=True) raise """ netweaver provider check action to query for failed updates metrics """ def _actionGetFailedUpdatesMetrics(self) -> None: # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure self.lastResult = [] try: # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host sapHostnameStr = self.providerInstance.sapSid if (not self.providerInstance.areRfcMetricsEnabled()): self.tracer.info("%s Skipping sys logs metrics because RFC SDK metrics not enabled...", self.logTag) return # track latency of entire method excecution with dependencies latencyStartTime = time() # initialize a client for the first healthy MessageServer instance we find client = self.providerInstance.getRfcClient(logTag=self.logTag) # update logging prefix with the specific instance details of the client sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr) # get metric query window based on our last successful query where results were returned (startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer, minimumRunIntervalSecs=self.frequencySecs, logTag=self.logTag) self.lastResult = client.getFailedUpdatesMetrics(logTag=self.logTag) self.tracer.info("%s successfully queried failed updates metrics for %s [%d ms]", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime)) self.lastRunLocal = datetime.now(timezone.utc) self.lastRunServer = endTime # only update state on successful query attempt self.updateState() except Exception as e: self.tracer.error("%s exception trying to fetch failed updates metrics for %s [%d ms], error: %s", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime), e, exc_info=True) raise """ netweaver provider check action to query for batch job metrics """ def _actionGetBatchJobMetrics(self) -> None: # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure self.lastResult = [] try: # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host sapHostnameStr = self.providerInstance.sapSid if (not self.providerInstance.areRfcMetricsEnabled()): self.tracer.info("%s Skipping batch jobs metrics because RFC SDK metrics not enabled...", self.logTag) return # track latency of entire method excecution with dependencies latencyStartTime = time() # initialize a client for the first healthy MessageServer instance we find client = self.providerInstance.getRfcClient(logTag=self.logTag) # update logging prefix with the specific instance details of the client sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr) # get metric query window based on our last successful query where results were returned (startTime, endTime) = client.getQueryWindow(lastRunServerTime=self.lastRunServer, minimumRunIntervalSecs=self.frequencySecs, logTag=self.logTag) self.lastResult = client.getBatchJobMetrics(startDateTime=startTime, endDateTime=endTime, logTag=self.logTag) self.tracer.info("%s successfully queried batch job metrics for %s [%d ms]", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime)) self.lastRunLocal = datetime.now(timezone.utc) self.lastRunServer = endTime # only update state on successful query attempt self.updateState() except Exception as e: self.tracer.error("%s exception trying to fetch failed updates metrics for %s [%d ms], error: %s", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime), e, exc_info=True) raise """ netweaver provider check action to query for inbound queues statistics """ def _actionGetInboundQueuesMetrics(self) -> None: # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure self.lastResult = [] try: # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host sapHostnameStr = self.providerInstance.sapSid if (not self.providerInstance.areRfcMetricsEnabled()): self.tracer.info("%s Skipping Current Inbound Queues metrics because RFC SDK metrics not enabled...", self.logTag) return # track latency of entire method excecution with dependencies latencyStartTime = time() # initialize a client for the first healthy MessageServer instance we find client = self.providerInstance.getRfcClient(logTag=self.logTag) # update logging prefix with the specific instance details of the client sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr) self.lastResult = client.getInboundQueuesMetrics(logTag=self.logTag) self.tracer.info("%s successfully queried Current Inbound Queues metrics for %s [%d ms]", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime)) self.lastRunLocal = datetime.now(timezone.utc) # only update state on successful query attempt self.updateState() except Exception as e: self.tracer.error("%s exception trying to fetch Current Inbound Queues metrics for %s [%d ms], error: %s", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime), e, exc_info=True) raise """ netweaver provider check action to query for outbound queues statistics """ def _actionGetOutboundQueuesMetrics(self) -> None: # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure self.lastResult = [] try: # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host sapHostnameStr = self.providerInstance.sapSid if (not self.providerInstance.areRfcMetricsEnabled()): self.tracer.info("%s Skipping Current Outbound Queues metrics because RFC SDK metrics not enabled...", self.logTag) return # track latency of entire method excecution with dependencies latencyStartTime = time() # initialize a client for the first healthy MessageServer instance we find client = self.providerInstance.getRfcClient(logTag=self.logTag) # update logging prefix with the specific instance details of the client sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr) self.lastResult = client.getOutboundQueuesMetrics(logTag=self.logTag) self.tracer.info("%s successfully queried Current Outbound Queues metrics for %s [%d ms]", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime)) self.lastRunLocal = datetime.now(timezone.utc) # only update state on successful query attempt self.updateState() except Exception as e: self.tracer.error("%s exception trying to fetch Current Outbound Queues metrics for %s [%d ms], error: %s", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime), e, exc_info=True) raise """ netweaver provider check action to query for object lock entries by connecting to ENQUEUE_READ RFC """ def _actionGetEnqueueReadMetrics(self) -> None: # base class will always call generateJsonString(), so we must always be sure to set the lastResult # regardless of success or failure self.lastResult = [] try: # initialize hostname log string here to default of SID in case we cannot identify a specific dispatcher host sapHostnameStr = self.providerInstance.sapSid if (not self.providerInstance.areRfcMetricsEnabled()): self.tracer.info("%s Skipping ENQUEUE_READ metrics because RFC SDK metrics not enabled...", self.logTag) return # track latency of entire method excecution with dependencies latencyStartTime = time() # initialize a client for the first healthy MessageServer instance we find client = self.providerInstance.getRfcClient(logTag=self.logTag) # update logging prefix with the specific instance details of the client sapHostnameStr = "%s|%s" % (client.Hostname, client.InstanceNr) self.lastResult = client.getEnqueueReadMetrics(logTag=self.logTag) self.tracer.info("%s successfully queried ENQUEUE_READ metrics for %s [%d ms]", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime)) self.lastRunLocal = datetime.now(timezone.utc) # only update state on successful query attempt self.updateState() except Exception as e: self.tracer.error("%s exception trying to fetch ENQUEUE_READ metrics for %s [%d ms], error: %s", self.logTag, sapHostnameStr, TimeUtils.getElapsedMilliseconds(latencyStartTime), e, exc_info=True) raise def generateJsonString(self) -> str: self.tracer.info("%s converting result to json string", self.logTag) if self.lastResult is not None and len(self.lastResult) != 0: for result in self.lastResult: result['SAPMON_VERSION'] = PAYLOAD_VERSION result['PROVIDER_INSTANCE'] = self.providerInstance.name result['METADATA'] = self.providerInstance.metadata resultJsonString = json.dumps(self.lastResult, sort_keys=True, indent=4, cls=JsonEncoder) self.tracer.debug("%s resultJson=%s", self.logTag, str(resultJsonString)) return resultJsonString def updateState(self) -> bool: self.tracer.info("%s updating internal state", self.logTag) self.state['lastRunLocal'] = self.lastRunLocal self.state['lastRunServer'] = self.lastRunServer self.tracer.info("%s internal state successfully updated", self.logTag) return True
51.702186
175
0.624716
7,994
75,692
5.884538
0.113585
0.024659
0.022321
0.023915
0.472885
0.437597
0.409473
0.377734
0.359367
0.337961
0
0.001804
0.304458
75,692
1,463
176
51.737526
0.891713
0.182675
0
0.383051
0
0.00678
0.157579
0.003127
0
0
0
0.001367
0
1
0.051977
false
0.022599
0.024859
0.00904
0.159322
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4480752faba119871fef4e77c8c713728e07b1e
3,294
py
Python
example_usage/example_list_errors.py
oceanprotocol/plecos
ae532df8539e5c327cca57fbc1ea1b1193916cd1
[ "Apache-2.0" ]
1
2019-03-15T14:43:38.000Z
2019-03-15T14:43:38.000Z
example_usage/example_list_errors.py
oceanprotocol/plecos
ae532df8539e5c327cca57fbc1ea1b1193916cd1
[ "Apache-2.0" ]
26
2019-06-04T08:49:42.000Z
2022-02-07T02:06:42.000Z
example_usage/example_list_errors.py
oceanprotocol/Plecos
25b9a3f1698ab2c65ca82ac69ecd1f461c55a581
[ "Apache-2.0" ]
1
2019-03-12T18:31:55.000Z
2019-03-12T18:31:55.000Z
from pathlib import Path import plecos import json print(plecos.__version__) #%% path_to_json_local = Path("~/ocn/plecos/plecos/samples/sample_metadata_local.json").expanduser() path_to_json_remote = Path("~/ocn/plecos/plecos/samples/sample_metadata_remote.json").expanduser() path_to_broken_json = Path("~/ocn/plecos/plecos/samples/metadata_local_broken.json").expanduser() path_to_schema_local = Path("~/ocn/plecos/plecos/schemas/metadata_local_v0_3.json").expanduser() path_to_schema_remote = Path("~/ocn/plecos/plecos/schemas/metadata_remote_v0_3.json").expanduser() # Select remote or local metadata LOCAL=True if LOCAL: path_json_file = path_to_json_local path_schema_file = path_to_schema_local with open(path_to_json_local) as f: json_dict = json.load(f) else: path_json_file = path_to_json_remote path_schema_file = path_to_schema_remote with open(path_to_json_remote) as f: json_dict = json.load(f) print("Json file:", path_json_file) print("Schema file:", path_schema_file) #%% del json_dict['base']['files'][0]['index'] # del json_dict['base']['files'][0]['url'] # json_dict['base']['extra'] = 1 plecos.is_valid_dict(json_dict) # json_dict['base']['files'][0]['url'] # json_dict['base']['EXTRA ATTRIB!'] = 0 # json_dict['base']['files'][0]['EXTRA_ATTR'] = "????" # json_dict['base']['price'] = "A string is not allowed!" errors = plecos.list_errors(json_dict, path_schema_file) if errors: print("ERRORS:") for e in errors: print(e) else: print("No errors") raise #%% json_dict = { "base": { "name": "10 Monkey Species Small", "author": "Mario", "license": "CC0: Public Domain", "contentType": "jpg/txt", "price": 5, "categories": [ "image" ], "tags": [ "image data", " animals" ], "type": "dataset", "description": "Example description", "copyrightHolder": "", "encoding": "", "compression": "", "workExample": "", "inLanguage": "en", "files": [ { "url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/training.zip" }, { "url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/monkey_labels.txt" }, { "url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/assets/validation.zip" } ], "links": [ { "url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/links/sample/sample.zip", "name": "sample.zip", "type": "sample" }, { "url": "https://github.com/slothkong/CNN_classification_10_monkey_species", "name": "example code", "type": "example code" }, { "url": "https://s3.amazonaws.com/datacommons-seeding-us-east/10_Monkey_Species_Small/links/discovery/n5151.jpg", "name": "n5151.jpg", "type": "discovery" } ], "checksum": "0", }, } #%% path_to_schema_local = Path("~/ocn/Plecos/plecos/schemas/metadata_local_190305.json").expanduser() errors = plecos.list_errors(json_dict, path_to_schema_local) if errors: print("ERRORS:") for e in errors: print(e) else: print("No errors")
26.564516
120
0.649059
423
3,294
4.79669
0.260047
0.038443
0.047314
0.056185
0.573682
0.490882
0.414983
0.322326
0.322326
0.322326
0
0.017081
0.182453
3,294
124
121
26.564516
0.736354
0.090164
0
0.206522
0
0.054348
0.43388
0.1078
0
0
0
0
0
1
0
false
0
0.032609
0
0.032609
0.097826
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f448729d42d0a606df0321be7509a9b2530f28d6
2,180
py
Python
pangloss/backend.py
CLRafaelR/pangloss
920c509381a8d7831471fc3f22a07e58b53b8c0e
[ "MIT" ]
null
null
null
pangloss/backend.py
CLRafaelR/pangloss
920c509381a8d7831471fc3f22a07e58b53b8c0e
[ "MIT" ]
1
2020-06-11T21:08:30.000Z
2020-09-20T03:36:06.000Z
pangloss/backend.py
CLRafaelR/pangloss
920c509381a8d7831471fc3f22a07e58b53b8c0e
[ "MIT" ]
1
2021-03-11T21:11:34.000Z
2021-03-11T21:11:34.000Z
import re import panflute as pf from functools import partial from pangloss.util import smallcapify, break_plain # regular expression for label formats label_re = re.compile(r'\{#ex:(\w+)\}') gb4e_fmt_labelled = """ \\ex\\label{{ex:{label}}} \\gll {} \\\\ {} \\\\ \\trans {} """ gb4e_fmt = """ \\ex \\gll {} \\\\ {} \\\\ \\trans {} """ def gb4e(lst): """ Convert an example list into a series of gb4e-formatted interlinear glosses. Because example list references are replaced at parsing by Pandoc, the normal syntax of (@foo) cannot be used for labels; instead, a label syntax similar to that used for headers (and tables and figures with pandoc-crossref) is used, namely a {#ex:foo} inserted after the translation, which will be stripped and replaced with a LaTeX label on the relevant example. """ latex = "\\begin{exe}\n" for li in lst.content: lines = break_plain(li.content[0]) if len(lines) != 3: continue orig, gloss, trans = map(partial(pf.stringify, newlines=False), lines) gloss = smallcapify(gloss) label_match = label_re.search(trans) if label_match: label = label_match.group(1) trans = trans[:label_match.start() - 1] latex += gb4e_fmt_labelled.format(orig, gloss, trans, label=label) else: latex += gb4e_fmt.format(orig, gloss, trans) latex += "\\end{exe}" return pf.RawBlock(latex, format='latex') leipzigjs_fmt = """ <div data-gloss> <p>{}</p> <p>{}</p> <p>{}</p> </div> """ def leipzigjs(lst): """ Convert an example list into a series of div's suitable for use with Leipzig.js. """ html = '' for li in lst.content: lines = break_plain(li.content[0]) if len(lines) != 3: continue orig, gloss, trans = map(partial(pf.stringify, newlines=False), lines) html += leipzigjs_fmt.format(orig, gloss, trans) return pf.RawBlock(html, format='html') # available formats and backends formats = { 'latex': { 'gb4e': gb4e }, 'html': { 'leipzigjs': leipzigjs } }
23.44086
78
0.601835
281
2,180
4.608541
0.41637
0.034749
0.054054
0.046332
0.271815
0.23166
0.23166
0.23166
0.23166
0.176062
0
0.008679
0.260092
2,180
92
79
23.695652
0.794172
0.27156
0
0.357143
0
0
0.145669
0.016404
0
0
0
0
0
1
0.035714
false
0
0.071429
0
0.142857
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f44ab2c0f0cd8c386e07e21d67f94743e0fb707b
3,966
py
Python
minecraft_launcher_lib/fabric.py
bopchik/Simple-minecraft-mod-launcher
52e4e8ec351b0bac7eb4fe707f21de8da14b9ac9
[ "BSD-2-Clause" ]
1
2021-06-17T18:19:41.000Z
2021-06-17T18:19:41.000Z
minecraft_launcher_lib/fabric.py
bopchik/Simple-minecraft-mod-launcher
52e4e8ec351b0bac7eb4fe707f21de8da14b9ac9
[ "BSD-2-Clause" ]
null
null
null
minecraft_launcher_lib/fabric.py
bopchik/Simple-minecraft-mod-launcher
52e4e8ec351b0bac7eb4fe707f21de8da14b9ac9
[ "BSD-2-Clause" ]
3
2021-06-17T18:19:44.000Z
2021-06-17T22:18:34.000Z
from .helper import download_file, get_user_agent from .install import install_minecraft_version from typing import List, Dict, Union from xml.dom import minidom import subprocess import requests import tempfile import random import os def get_all_minecraft_versions() -> List[Dict[str,Union[str,bool]]]: """ Returns all available Minecraft Versions for fabric """ FABRIC_MINECARFT_VERSIONS_URL = "https://meta.fabricmc.net/v2/versions/game" return requests.get(FABRIC_MINECARFT_VERSIONS_URL,headers={"user-agent": get_user_agent()}).json() def get_stable_minecraft_versions() -> List[str]: """ Returns a list which only contains the stable Minecraft versions that supports fabric """ minecraft_versions = get_all_minecraft_versions() stable_versions = [] for i in minecraft_versions: if i["stable"] == True: stable_versions.append(i["version"]) return stable_versions def get_latest_minecraft_version() -> str: """ Returns the latest unstable Minecraft versions that supports fabric. This could be a snapshot. """ minecraft_versions = get_all_minecraft_versions() return minecraft_versions[0]["version"] def get_latest_stable_minecraft_version() -> str: """ Returns the latest stable Minecraft version that supports fabric """ stable_versions = get_stable_minecraft_versions() return stable_versions[0] def is_minecraft_version_supported(version: str) -> bool: """ Checks if a Minecraft version supported by fabric """ minecraft_versions = get_all_minecraft_versions() for i in minecraft_versions: if i["version"] == version: return True return False def get_all_loader_versions() -> List[Dict[str,Union[str,bool,int]]]: """ Returns all loader versions """ FABRIC_LOADER_VERSIONS_URL = "https://meta.fabricmc.net/v2/versions/loader" return requests.get(FABRIC_LOADER_VERSIONS_URL,headers={"user-agent": get_user_agent()}).json() def get_latest_loader_version() -> str: """ Get the latest loader version """ loader_versions = get_all_loader_versions() return loader_versions[0]["version"] def get_latest_installer_version() -> str: """ Returns the latest installer version """ FABRIC_INSTALLER_MAVEN_URL = "https://maven.fabricmc.net/net/fabricmc/fabric-installer/maven-metadata.xml" r = requests.get(FABRIC_INSTALLER_MAVEN_URL,headers={"user-agent": get_user_agent()}) xml_data = minidom.parseString(r.text) release = xml_data.getElementsByTagName("release") return release.item(0).lastChild.data def install_fabric(path: str, minecraft_version: str,loader_version: str=None): """ Install a fabric version """ #Get latest loader version if not given if not loader_version: loader_version = get_latest_loader_version() #Make sure the Minecraft version is installed install_minecraft_version(path,minecraft_version) #Get installer version installer_version = get_latest_installer_version() installer_download_url = f"https://maven.fabricmc.net/net/fabricmc/fabric-installer/{installer_version}/fabric-installer-{installer_version}.jar" #Generate a temporary path for downloading the installer installer_path = os.path.join(tempfile.gettempdir(),f"fabric-installer-{random.randrange(100,10000)}.tmp") #Download the installer download_file(installer_download_url,installer_path) #Run the installer see https://fabricmc.net/wiki/install#cli_installation subprocess.run(["java","-jar",installer_path,"client","-dir",path,"-mcversion",minecraft_version,"-loader",loader_version,"-noprofile","-snapshot"]) #Delete the installer we don't need them anymore os.remove(installer_path) #Install all libs of fabric fabric_minecraft_version = f"fabric-loader-{loader_version}-{minecraft_version}" install_minecraft_version(path,fabric_minecraft_version)
39.66
152
0.739536
504
3,966
5.579365
0.236111
0.085349
0.01707
0.032717
0.29623
0.243599
0.184566
0.121622
0.034851
0.034851
0
0.004189
0.157337
3,966
99
153
40.060606
0.837223
0.199697
0
0.090909
0
0.036364
0.166007
0.033003
0
0
0
0
0
1
0.163636
false
0
0.163636
0
0.490909
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f44b19520f8c0f088d9bcd431d1e1bf360a73146
2,354
py
Python
Strand Sort.py
Nishkarsh-Tripathi/Sorting-algorithms-
cda25f1a8e7fb5e25e59e69e78f000421b0e4eb0
[ "Apache-2.0" ]
5
2020-03-29T16:26:18.000Z
2020-11-23T15:37:23.000Z
Strand Sort.py
Nishkarsh-Tripathi/Sorting-algorithms
cda25f1a8e7fb5e25e59e69e78f000421b0e4eb0
[ "Apache-2.0" ]
null
null
null
Strand Sort.py
Nishkarsh-Tripathi/Sorting-algorithms
cda25f1a8e7fb5e25e59e69e78f000421b0e4eb0
[ "Apache-2.0" ]
null
null
null
# STRAND SORT # It is a recursive comparison based sorting technique which sorts in increasing order. # It works by repeatedly pulling sorted sub-lists out of the list to be sorted and merging them # with a result array. # Algorithm: # Create a empty strand (list) and append the first element to it popping it from the input array # Compare this element with the rest of elements of the input array # if a greater element is found then pop and append it to strand otherwise skip # Now merge this array to the final output array # Recur for remaining items in strand and input array. # Utility Function to merge two arrays def merge(arr1, arr2): # list to store merged output merged_list = [] # while there are elements in both arrays while len(arr1) and len(arr2): # the array having smaller first elements gets appended as the resultant array must be sorted if arr1[0] < arr2[0]: merged_list.append(arr1.pop(0)) else: merged_list.append(arr2.pop(0)) # if the length of either of array is exhausted , merge the remaining part to # the merge sublist merged_list += arr1 merged_list += arr2 # return the merged list return merged_list # Function to return the strand (sorted sub-list) def strand(arr): # append the first element to the strand s = [arr.pop(0)] # initialise a pointer i = 0 # while it is less then length while i > len(arr): # compare the input array elements to the last element of the strand if arr[i] > s[-1]: # if we found a greater element than s[-1] then pop it and append to the strand s.append(arr.pop(i)) else: # else increment i += 1 # return the strand return s # Strand Sort Function def strand_sort(arr): # initialise the output array with the strand output = strand(arr) # while there are elements in the array while len(arr): # merge the strand and previous output list to create a new list output = merge(output, strand(arr)) # return the sorted output return output # Driver Code arr = [1, 6, 3, 8, 2, 0, 9] print(strand_sort(arr)) # Time Complexity : O(n^2) [Worst] # O(n*log(n)) [Average] # Space Complexity : O(n) # Stable : Yes # Inplace : No
26.449438
101
0.657179
363
2,354
4.239669
0.341598
0.045484
0.025341
0.02729
0.059779
0
0
0
0
0
0
0.015808
0.274427
2,354
88
102
26.75
0.885246
0.64486
0
0.076923
0
0
0
0
0
0
0
0
0
1
0.115385
false
0
0
0
0.230769
0.038462
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f44cdd7cc2616d5398119b8bf5c750adca9d4192
10,915
py
Python
gamestonk_terminal/cryptocurrency/overview/pycoingecko_model.py
minhhoang1023/GamestonkTerminal
195dc19b491052df080178c0cc6a9d535a91a704
[ "MIT" ]
null
null
null
gamestonk_terminal/cryptocurrency/overview/pycoingecko_model.py
minhhoang1023/GamestonkTerminal
195dc19b491052df080178c0cc6a9d535a91a704
[ "MIT" ]
null
null
null
gamestonk_terminal/cryptocurrency/overview/pycoingecko_model.py
minhhoang1023/GamestonkTerminal
195dc19b491052df080178c0cc6a9d535a91a704
[ "MIT" ]
null
null
null
"""CoinGecko model""" __docformat__ = "numpy" # pylint: disable=C0301, E1101 import logging import re from typing import Any, List import numpy as np import pandas as pd from pycoingecko import CoinGeckoAPI from gamestonk_terminal.cryptocurrency.dataframe_helpers import ( create_df_index, long_number_format_with_type_check, replace_underscores_in_column_names, ) from gamestonk_terminal.cryptocurrency.discovery.pycoingecko_model import get_coins from gamestonk_terminal.decorators import log_start_end logger = logging.getLogger(__name__) HOLD_COINS = ["ethereum", "bitcoin"] NEWS_FILTERS = ["Index", "Title", "Author", "Posted"] CATEGORIES_FILTERS = [ "Rank", "Name", "Change_1h", "Change_24h", "Change_7d", "Market_Cap", "Volume_24h", "Coins", ] STABLES_FILTERS = [ "Rank", "Name", "Symbol", "Price", "Change_24h", "Exchanges", "Market_Cap", "Change_30d", ] PRODUCTS_FILTERS = [ "Rank", "Platform", "Identifier", "Supply_Rate", "Borrow_Rate", ] PLATFORMS_FILTERS = ["Rank", "Name", "Category", "Centralized"] EXCHANGES_FILTERS = [ "Rank", "Trust_Score", "Id", "Name", "Country", "Year Established", "Trade_Volume_24h_BTC", ] EXRATES_FILTERS = ["Index", "Name", "Unit", "Value", "Type"] INDEXES_FILTERS = ["Rank", "Name", "Id", "Market", "Last", "MultiAsset"] DERIVATIVES_FILTERS = [ "Rank", "Market", "Symbol", "Price", "Pct_Change_24h", "Contract_Type", "Basis", "Spread", "Funding_Rate", "Volume_24h", ] COINS_COLUMNS = [ "symbol", "name", "current_price", "market_cap", "market_cap_rank", "price_change_percentage_7d_in_currency", "price_change_percentage_24h_in_currency", "total_volume", ] @log_start_end(log=logger) def get_holdings_overview(endpoint: str = "bitcoin") -> List[Any]: """Returns public companies that holds ethereum or bitcoin [Source: CoinGecko] Parameters ---------- endpoint : str "bitcoin" or "ethereum" Returns ------- List: - str: Overall statistics - pandas.DataFrame: Companies holding crypto """ cg = CoinGeckoAPI() data = cg.get_companies_public_treasury_by_coin_id(coin_id=endpoint) stats_str = f"""{len(data["companies"])} companies hold a total of {long_number_format_with_type_check(data["total_holdings"])} {endpoint} ({data["market_cap_dominance"]}% of market cap dominance) with the current value of {long_number_format_with_type_check(int(data["total_value_usd"]))} USD dollars""" # noqa df = pd.json_normalize(data, record_path=["companies"]) df.columns = list( map( lambda x: replace_underscores_in_column_names(x) if isinstance(x, str) else x, df.columns, ) ) return [stats_str, df] SORT_VALUES = [ "market_cap_desc", "market_cap_asc", "name_desc", "name_asc", "market_cap_change_24h_desc", "market_cap_change_24h_asc", ] @log_start_end(log=logger) def coin_formatter(n): # TODO: can be improved coins = [] re_str = "small/(.*)(.jpg|.png|.JPG|.PNG)" for coin in n: if re.search(re_str, coin): coin_stripped = re.search(re_str, coin).group(1) coins.append(coin_stripped) return ",".join(coins) @log_start_end(log=logger) def get_top_crypto_categories(sort_filter: str = SORT_VALUES[0]) -> pd.DataFrame: """Returns top crypto categories [Source: CoinGecko] Returns ------- pandas.DataFrame Rank, Name, Change_1h, Change_7d, Market_Cap, Volume_24h,Coins, Url """ if sort_filter in SORT_VALUES: client = CoinGeckoAPI() data = client.get_coins_categories() df = pd.DataFrame(data) del df["id"] del df["content"] del df["updated_at"] df["top_3_coins"] = df["top_3_coins"].apply(coin_formatter) df.columns = [ replace_underscores_in_column_names(col) if isinstance(col, str) else col for col in df.columns ] return df return pd.DataFrame() # TODO: add string with overview @log_start_end(log=logger) def get_stable_coins(top: int = 20) -> pd.DataFrame: """Returns top stable coins [Source: CoinGecko] Returns ------- pandas.DataFrame Rank, Name, Symbol, Price, Change_24h, Exchanges, Market_Cap, Change_30d, Url """ df = get_coins(top=top, category="stablecoins") return df[COINS_COLUMNS] @log_start_end(log=logger) def get_exchanges() -> pd.DataFrame: """Get list of top exchanges from CoinGecko API [Source: CoinGecko] Returns ------- pandas.DataFrame Trust_Score, Id, Name, Country, Year_Established, Trade_Volume_24h_BTC, Url """ client = CoinGeckoAPI() df = pd.DataFrame(client.get_exchanges_list(per_page=250)) df.replace({float(np.NaN): None}, inplace=True) df = df[ [ "trust_score", "id", "name", "country", "year_established", "trade_volume_24h_btc", "url", ] ] df.columns = [ "Trust_Score", "Id", "Name", "Country", "Year_Established", "Trade_Volume_24h_BTC", "Url", ] create_df_index(df, "Rank") return df @log_start_end(log=logger) def get_financial_platforms() -> pd.DataFrame: """Get list of financial platforms from CoinGecko API [Source: CoinGecko] Returns ------- pandas.DataFrame Rank, Name, Category, Centralized, Url """ client = CoinGeckoAPI() df = pd.DataFrame(client.get_finance_platforms()) df.drop("facts", axis=1, inplace=True) create_df_index(df, "rank") df.columns = ["Rank", "Name", "Category", "Centralized", "Url"] return df @log_start_end(log=logger) def get_finance_products() -> pd.DataFrame: """Get list of financial products from CoinGecko API Returns ------- pandas.DataFrame Rank, Platform, Identifier, Supply_Rate, Borrow_Rate """ client = CoinGeckoAPI() df = pd.DataFrame( client.get_finance_products(per_page=250), columns=[ "platform", "identifier", "supply_rate_percentage", "borrow_rate_percentage", ], ) df.columns = ["Platform", "Identifier", "Supply_Rate", "Borrow_Rate"] create_df_index(df, "Rank") return df @log_start_end(log=logger) def get_indexes() -> pd.DataFrame: """Get list of crypto indexes from CoinGecko API [Source: CoinGecko] Returns ------- pandas.DataFrame Name, Id, Market, Last, MultiAsset """ client = CoinGeckoAPI() df = pd.DataFrame(client.get_indexes(per_page=250)) df.columns = ["Name", "Id", "Market", "Last", "MultiAsset"] create_df_index(df, "Rank") return df @log_start_end(log=logger) def get_derivatives() -> pd.DataFrame: """Get list of crypto derivatives from CoinGecko API [Source: CoinGecko] Returns ------- pandas.DataFrame Rank, Market, Symbol, Price, Pct_Change_24h, Contract_Type, Basis, Spread, Funding_Rate, Volume_24h, """ client = CoinGeckoAPI() df = pd.DataFrame(client.get_derivatives(include_tickers="unexpired")) df.drop( ["index", "last_traded_at", "expired_at", "index_id", "open_interest"], axis=1, inplace=True, ) df.rename(columns={"price_percentage_change_24h": "pct_change_24h"}, inplace=True) create_df_index(df, "rank") df["price"] = df["price"].apply( lambda x: "" if not x else float(x.strip("$").replace(",", "")) ) df.columns = [ "Rank", "Market", "Symbol", "Price", "Pct_Change_24h", "Contract_Type", "Basis", "Spread", "Funding_Rate", "Volume_24h", ] return df @log_start_end(log=logger) def get_exchange_rates() -> pd.DataFrame: """Get list of crypto, fiats, commodity exchange rates from CoinGecko API [Source: CoinGecko] Returns ------- pandas.DataFrame Index, Name, Unit, Value, Type """ client = CoinGeckoAPI() df = pd.DataFrame(client.get_exchange_rates()["rates"]).T.reset_index() df.drop("index", axis=1, inplace=True) create_df_index(df, "index") df.columns = ["Index", "Name", "Unit", "Value", "Type"] return df @log_start_end(log=logger) def get_global_info() -> pd.DataFrame: """Get global statistics about crypto from CoinGecko API like: - market cap change - number of markets - icos - number of active crypto [Source: CoinGecko] Returns ------- pandas.DataFrame Metric, Value """ client = CoinGeckoAPI() results = client.get_global() total_mcap = results.pop("market_cap_percentage") btc, eth = total_mcap.get("btc"), total_mcap.get("eth") for key in ["total_market_cap", "total_volume", "updated_at"]: del results[key] results["btc_market_cap_in_pct"] = btc results["eth_market_cap_in_pct"] = eth results["altcoin_market_cap_in_pct"] = 100 - (float(eth) + float(btc)) df = pd.Series(results).reset_index() df.columns = ["Metric", "Value"] df["Metric"] = df["Metric"].apply( lambda x: replace_underscores_in_column_names(x) if isinstance(x, str) else x ) return df @log_start_end(log=logger) def get_global_markets_info() -> pd.DataFrame: """Get global statistics about crypto markets from CoinGecko API like: Market_Cap, Volume, Market_Cap_Percentage [Source: CoinGecko] Returns ------- pandas.DataFrame Market_Cap, Volume, Market_Cap_Percentage """ columns = [ "Market_Cap", "Volume", "Market_Cap_Percentage", ] data = [] client = CoinGeckoAPI() results = client.get_global() for key in columns: data.append(results.get(key)) df = pd.DataFrame(data).T df.columns = columns df.replace({float("nan"): None}, inplace=True) return df.reset_index() @log_start_end(log=logger) def get_global_defi_info() -> pd.DataFrame: """Get global statistics about Decentralized Finances [Source: CoinGecko] Returns ------- pandas.DataFrame Metric, Value """ client = CoinGeckoAPI() results = client.get_global_decentralized_finance_defi() for key, value in results.items(): try: results[key] = round(float(value), 4) except (ValueError, TypeError): pass df = pd.Series(results).reset_index() df.columns = ["Metric", "Value"] df["Metric"] = df["Metric"].apply( lambda x: replace_underscores_in_column_names(x) if isinstance(x, str) else x ) return df
25.034404
316
0.624462
1,293
10,915
5.027842
0.188708
0.033226
0.023689
0.027996
0.522074
0.457776
0.393632
0.307953
0.228273
0.200892
0
0.00954
0.241319
10,915
435
317
25.091954
0.77551
0.202657
0
0.362963
0
0.003704
0.216583
0.062515
0
0
0
0.004598
0
1
0.048148
false
0.003704
0.033333
0
0.133333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f44d14e6df3a58dd087e5855ff51ca5785dc0dff
20,399
py
Python
docker/messein/board-import-app/app.py
sourceperl/tk-dashboard
015ececc670902b02284749ac59f354db4304e48
[ "MIT" ]
null
null
null
docker/messein/board-import-app/app.py
sourceperl/tk-dashboard
015ececc670902b02284749ac59f354db4304e48
[ "MIT" ]
null
null
null
docker/messein/board-import-app/app.py
sourceperl/tk-dashboard
015ececc670902b02284749ac59f354db4304e48
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from configparser import ConfigParser from datetime import datetime import urllib.parse import hashlib import io import json import logging import os import re import time from xml.dom import minidom import feedparser import requests import schedule import PIL.Image import PIL.ImageDraw import PIL.ImageFont from metar.Metar import Metar import pytz import pdf2image import PIL.Image import PIL.ImageDraw from board_lib import CustomRedis, catch_log_except, dt_utc_to_local from webdav import WebDAV # some const USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1' # some var owc_doc_dir_last_sync = 0 owc_car_dir_last_sync = 0 # read config cnf = ConfigParser() cnf.read('/data/conf/board.conf') # redis main_redis_user = cnf.get('redis', 'user') main_redis_pass = cnf.get('redis', 'pass') # redis-loos for share loos_redis_user = cnf.get('redis-loos', 'user') loos_redis_pass = cnf.get('redis-loos', 'pass') # gmap img traffic gmap_img_url = cnf.get('gmap_img', 'img_url') # gsheet gsheet_url = cnf.get('gsheet', 'url') # openweathermap ow_app_id = cnf.get('openweathermap', 'app_id') # webdav webdav_url = cnf.get('owncloud_dashboard', 'webdav_url') webdav_user = cnf.get('owncloud_dashboard', 'webdav_user') webdav_pass = cnf.get('owncloud_dashboard', 'webdav_pass') webdav_reglement_doc_dir = cnf.get('owncloud_dashboard', 'webdav_reglement_doc_dir') webdav_carousel_img_dir = cnf.get('owncloud_dashboard', 'webdav_carousel_img_dir') # some class class DB: # create connector main = CustomRedis(host='board-redis-srv', username=main_redis_user, password=main_redis_pass, socket_timeout=4, socket_keepalive=True) loos = CustomRedis(host='board-redis-loos-tls-cli', username=loos_redis_user, password=loos_redis_pass, socket_timeout=4, socket_keepalive=True) # some function @catch_log_except() def air_quality_atmo_ge_job(): url = 'https://services3.arcgis.com/' + \ 'Is0UwT37raQYl9Jj/arcgis/rest/services/ind_grandest_5j/FeatureServer/0/query' + \ '?where=%s' % urllib.parse.quote('code_zone IN (54395, 57463, 51454, 67482)') + \ '&outFields=date_ech, code_qual, lib_qual, lib_zone, code_zone' + \ '&returnGeometry=false&resultRecordCount=48' + \ '&orderByFields=%s&f=json' % urllib.parse.quote('date_ech ASC') today_dt_date = datetime.today().date() # https request r = requests.get(url, timeout=5.0) # check error if r.status_code == 200: # decode json message atmo_raw_d = r.json() # populate zones dict with receive values zones_d = {} for record in atmo_raw_d['features']: # load record data r_code_zone = record['attributes']['code_zone'] r_ts = int(record['attributes']['date_ech']) r_dt = datetime.utcfromtimestamp(r_ts / 1000) r_value = record['attributes']['code_qual'] # retain today value if r_dt.date() == today_dt_date: zones_d[r_code_zone] = r_value # skip key publish if zones_d is empty if not zones_d: raise ValueError('dataset is empty') # create and populate result dict d_air_quality = {'nancy': zones_d.get(54395, 0), 'metz': zones_d.get(57463, 0), 'reims': zones_d.get(51454, 0), 'strasbourg': zones_d.get(67482, 0)} # update redis DB.main.set_as_json('json:atmo', d_air_quality, ex=6 * 3600) @catch_log_except() def dir_est_img_job(): # retrieve DIR-est webcams: Houdemont, Velaine-en-Haye, Saint-Nicolas, Côte de Flavigny for id_redis, lbl_cam, get_code in [('houdemont', 'Houdemont', '18'), ('velaine', 'Velaine', '53'), ('st-nicolas', 'Saint-Nicolas', '49'), ('flavigny', 'Flavigny', '5')]: r = requests.get('https://webcam.dir-est.fr/app.php/lastimg/%s' % get_code) if r.status_code == 200: # load image to PIL and resize it img = PIL.Image.open(io.BytesIO(r.content)) img.thumbnail([224, 235]) # add text to image txt_img = '%s - %s' % (lbl_cam, datetime.now().strftime('%H:%M')) font = PIL.ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMono.ttf', 16) draw = PIL.ImageDraw.Draw(img) draw.text((5, 5), txt_img, (0x10, 0x0e, 0x0e), font=font) # save image as PNG for redis redis_io = io.BytesIO() img.save(redis_io, format='PNG') # update redis DB.main.set('img:dir-est:%s:png' % id_redis, redis_io.getvalue(), ex=3600) @catch_log_except() def gsheet_job(): # https request response = requests.get(gsheet_url, timeout=5.0) # process response d = dict() for line in response.iter_lines(decode_unicode=True): tag, value = line.split(',') d[tag] = value redis_d = dict(update=datetime.now().isoformat('T'), tags=d) DB.main.set_as_json('json:gsheet', redis_d, ex=2 * 3600) @catch_log_except() def img_gmap_traffic_job(): # http request r = requests.get(gmap_img_url, stream=True, timeout=5.0) if r.status_code == 200: # convert RAW img format (bytes) to Pillow image pil_img = PIL.Image.open(io.BytesIO(r.raw.read())) # crop image pil_img = pil_img.crop((0, 0, 560, 328)) # pil_img.thumbnail([632, 328]) img_io = io.BytesIO() pil_img.save(img_io, format='PNG') # store RAW PNG to redis key DB.main.set('img:traffic-map:png', img_io.getvalue(), ex=2 * 3600) @catch_log_except() def local_info_job(): # do request l_titles = [] for post in feedparser.parse('https://france3-regions.francetvinfo.fr/societe/rss?r=grand-est').entries: title = post.title title = title.strip() title = title.replace('\n', ' ') l_titles.append(title) DB.main.set_as_json('json:news', l_titles, ex=2 * 3600) @catch_log_except() def owc_updated_job(): # check if the owncloud directories has been updated by users (start sync jobs if need) global owc_doc_dir_last_sync, owc_car_dir_last_sync for f in wdv.ls(): item = f['file_path'] item_last_modified = int(f['dt_last_modified'].timestamp()) # document update ? if item == webdav_reglement_doc_dir: # update need if item_last_modified > owc_doc_dir_last_sync: logging.debug(f'"{webdav_reglement_doc_dir}" seem updated: run "owncloud_sync_doc_job"') owc_sync_doc_job() owc_doc_dir_last_sync = item_last_modified # carousel update ? elif item == webdav_carousel_img_dir: # update need if item_last_modified > owc_car_dir_last_sync: logging.debug(f'"{webdav_carousel_img_dir}" seem updated: run "owncloud_sync_carousel_job"') owc_sync_carousel_job() owc_car_dir_last_sync = item_last_modified @catch_log_except() def owc_sync_carousel_job(): # sync owncloud carousel directory with local # local constants DIR_CAR_INFOS = 'dir:carousel:infos' DIR_CAR_RAW = 'dir:carousel:raw:min-png' # local functions def update_carousel_raw_data(filename, raw_data): # build json infos record md5 = hashlib.md5(raw_data).hexdigest() js_infos = json.dumps(dict(size=len(raw_data), md5=md5)) # convert raw data to PNG thumbnails # create default error image img_to_redis = PIL.Image.new('RGB', (655, 453), (255, 255, 255)) draw = PIL.ImageDraw.Draw(img_to_redis) draw.text((0, 0), f'loading error (src: "{filename}")', (0, 0, 0)) # replace default image by convert result try: # convert png and jpg file if filename.lower().endswith('.png') or filename.lower().endswith('.jpg'): # image to PIL img_to_redis = PIL.Image.open(io.BytesIO(raw_data)) # convert pdf file elif filename.lower().endswith('.pdf'): # PDF to PIL: convert first page to PIL image img_to_redis = pdf2image.convert_from_bytes(raw_data)[0] except Exception: pass # resize and format as raw png img_to_redis.thumbnail([655, 453]) io_to_redis = io.BytesIO() img_to_redis.save(io_to_redis, format='PNG') # redis add (atomic write) pipe = DB.main.pipeline() pipe.hset(DIR_CAR_INFOS, filename, js_infos) pipe.hset(DIR_CAR_RAW, filename, io_to_redis.getvalue()) pipe.execute() # log sync start logging.info('start of sync for owncloud carousel') # list local redis files local_files_d = {} for f_name, js_infos in DB.main.hgetall(DIR_CAR_INFOS).items(): try: filename = f_name.decode() size = json.loads(js_infos)['size'] local_files_d[filename] = size except ValueError: pass # check "dir:carousel:raw:min-png" consistency raw_file_l = [f.decode() for f in DB.main.hkeys(DIR_CAR_RAW)] # remove orphan infos record for f in list(set(local_files_d) - set(raw_file_l)): logging.debug(f'remove orphan "{f}" record in hash "{DIR_CAR_INFOS}"') DB.main.hdel(DIR_CAR_INFOS, f) del local_files_d[f] # remove orphan raw-png record for f in list(set(raw_file_l) - set(local_files_d)): logging.debug(f'remove orphan "{f}" record in hash "{DIR_CAR_RAW}"') DB.main.hdel(DIR_CAR_RAW, f) # list owncloud files (disallow directory) own_files_d = {} for f_d in wdv.ls(webdav_carousel_img_dir): file_path = f_d['file_path'] size = f_d['content_length'] if file_path and not file_path.endswith('/'): # search site only tags (_@loos_, _@messein_...) in filename # site id is 16 chars max site_tag_l = re.findall(r'_@([a-zA-Z0-9\-]{1,16})', file_path) site_tag_l = [s.strip().lower() for s in site_tag_l] site_tag_ok = 'messein' in site_tag_l or not site_tag_l # download filter: ignore txt type, heavy file (>10 MB) or name tags mismatch filter_ok = not file_path.lower().endswith('.txt') \ and (size < 10 * 1024 * 1024) \ and site_tag_ok # add file to owncloud dict if filter_ok: own_files_d[f_d['file_path']] = size # exist only on local redis for f in list(set(local_files_d) - set(own_files_d)): logging.info(f'"{f}" exist only on local -> remove it') # redis remove (atomic) pipe = DB.main.pipeline() pipe.hdel(DIR_CAR_INFOS, f) pipe.hdel(DIR_CAR_RAW, f) pipe.execute() # exist only on remote owncloud for f in list(set(own_files_d) - set(local_files_d)): logging.info('"%s" exist only on remote -> download it' % f) data = wdv.download(os.path.join(webdav_carousel_img_dir, f)) if data: update_carousel_raw_data(f, data) # exist at both side (update only if file size change) for f in list(set(local_files_d).intersection(own_files_d)): local_size = local_files_d[f] remote_size = own_files_d[f] logging.debug(f'check "{f}" remote size [{remote_size}]/local size [{local_size}]') if local_size != remote_size: logging.info(f'"{f}" size mismatch -> download it') data = wdv.download(os.path.join(webdav_carousel_img_dir, f)) if data: update_carousel_raw_data(f, data) # log sync end logging.info('end of sync for owncloud carousel') @catch_log_except() def owc_sync_doc_job(): # sync owncloud document directory with local # local constants DIR_DOC_INFOS = 'dir:doc:infos' DIR_DOC_RAW = 'dir:doc:raw' # local functions def update_doc_raw_data(filename, raw_data): # build json infos record md5 = hashlib.md5(raw_data).hexdigest() js_infos = json.dumps(dict(size=len(raw_data), md5=md5)) # redis add (atomic write) pipe = DB.main.pipeline() pipe.hset(DIR_DOC_INFOS, filename, js_infos) pipe.hset(DIR_DOC_RAW, filename, raw_data) pipe.execute() # log sync start logging.info('start of sync for owncloud doc') # list local redis files local_files_d = {} for f_name, js_infos in DB.main.hgetall(DIR_DOC_INFOS).items(): try: filename = f_name.decode() size = json.loads(js_infos)['size'] local_files_d[filename] = size except ValueError: pass # check "dir:doc:raw:min-png" consistency raw_file_l = [f.decode() for f in DB.main.hkeys(DIR_DOC_RAW)] # remove orphan infos record for f in list(set(local_files_d) - set(raw_file_l)): logging.debug(f'remove orphan "{f}" record in hash "{DIR_DOC_INFOS}"') DB.main.hdel(DIR_DOC_INFOS, f) del local_files_d[f] # remove orphan raw-png record for f in list(set(raw_file_l) - set(local_files_d)): logging.debug(f'remove orphan "{f}" record in hash "{DIR_DOC_RAW}"') DB.main.hdel(DIR_DOC_RAW, f) # list owncloud files (disallow directory) own_files_d = {} for f_d in wdv.ls(webdav_reglement_doc_dir): file_path = f_d['file_path'] size = f_d['content_length'] if file_path and not file_path.endswith('/'): # download filter: ignore txt file or heavy fie (>10 MB) ok_load = not file_path.lower().endswith('.txt') \ and (size < 10 * 1024 * 1024) if ok_load: own_files_d[f_d['file_path']] = size # exist only on local redis for f in list(set(local_files_d) - set(own_files_d)): logging.info(f'"{f}" exist only on local -> remove it') # redis remove (atomic) pipe = DB.main.pipeline() pipe.hdel(DIR_DOC_INFOS, f) pipe.hdel(DIR_DOC_RAW, f) pipe.execute() # exist only on remote owncloud for f in list(set(own_files_d) - set(local_files_d)): logging.info(f'"{f}" exist only on remote -> download it') data = wdv.download(os.path.join(webdav_reglement_doc_dir, f)) if data: update_doc_raw_data(f, data) # exist at both side (update only if file size change) for f in list(set(local_files_d).intersection(own_files_d)): local_size = local_files_d[f] remote_size = own_files_d[f] logging.debug(f'check "{f}" remote size [{remote_size}]/local size [{local_size}]') if local_size != remote_size: logging.info(f'"{f}" size mismatch -> download it') data = wdv.download(os.path.join(webdav_reglement_doc_dir, f)) if data: update_doc_raw_data(f, data) # log sync end logging.info('end of sync for owncloud doc') @catch_log_except() def loos_redis_import_job(): share_keys_l = [('to:messein:json:tweets:@grtgaz', 'from:loos:json:tweets:@grtgaz'), ('to:messein:img:grt-twitter-cloud:png', 'from:loos:img:grt-twitter-cloud:png'), ('to:messein:json:flyspray-est', 'from:loos:json:flyspray-est')] for from_remote_key, to_local_key in share_keys_l: # copy redis data from loos key to local key data = DB.loos.get(from_remote_key) if data: DB.main.set(to_local_key, data, ex=4 * 3600) @catch_log_except() def vigilance_job(): # request XML data from server r = requests.get('http://vigilance.meteofrance.com/data/NXFR34_LFPW_.xml', timeout=10.0) # check error if r.status_code == 200: # dom parsing (convert UTF-8 r.text to XML char) dom = minidom.parseString(r.text.encode('ascii', 'xmlcharrefreplace')) # set dict for dep data vig_data = {'update': '', 'department': {}} # map build date tz = pytz.timezone('Europe/Paris') map_date = str(dom.getElementsByTagName('entetevigilance')[0].getAttribute('dateinsert')) map_dt = tz.localize(datetime(int(map_date[0:4]), int(map_date[4:6]), int(map_date[6:8]), int(map_date[8:10]), int(map_date[10:12]))) vig_data['update'] = map_dt.isoformat() # parse every departments for items in dom.getElementsByTagName('datavigilance'): # current department dep_code = str(items.attributes['dep'].value) # get risk ID if exist risk_id = [] for risk in items.getElementsByTagName('risque'): risk_id.append(int(risk.attributes['valeur'].value)) # get flood ID if exist flood_id = None for flood in items.getElementsByTagName('crue'): flood_id = int(flood.attributes['valeur'].value) # get color ID color_id = int(items.attributes['couleur'].value) # build vig_data vig_data['department'][dep_code] = {'vig_level': color_id, 'flood_level': flood_id, 'risk_id': risk_id} DB.main.set_as_json('json:vigilance', vig_data, ex=2 * 3600) @catch_log_except() def weather_today_job(): # request data from NOAA server (METAR of Nancy-Essey Airport) r = requests.get('http://tgftp.nws.noaa.gov/data/observations/metar/stations/LFSN.TXT', timeout=10.0, headers={'User-Agent': USER_AGENT}) # check error if r.status_code == 200: # extract METAR message metar_msg = r.content.decode().split('\n')[1] # METAR parse obs = Metar(metar_msg) # init and populate d_today dict d_today = {} # message date and time if obs.time: d_today['update_iso'] = obs.time.strftime('%Y-%m-%dT%H:%M:%SZ') d_today['update_fr'] = dt_utc_to_local(obs.time).strftime('%H:%M %d/%m') # current temperature if obs.temp: d_today['temp'] = round(obs.temp.value('C')) # current dew point if obs.dewpt: d_today['dewpt'] = round(obs.dewpt.value('C')) # current pressure if obs.press: d_today['press'] = round(obs.press.value('hpa')) # current wind speed if obs.wind_speed: d_today['w_speed'] = round(obs.wind_speed.value('KMH')) # current wind gust if obs.wind_gust: d_today['w_gust'] = round(obs.wind_gust.value('KMH')) # current wind direction if obs.wind_dir: # replace 'W'est by 'O'uest d_today['w_dir'] = obs.wind_dir.compass().replace('W', 'O') # weather status str d_today['descr'] = 'n/a' # store to redis DB.main.set_as_json('json:weather:today:nancy', d_today, ex=2 * 3600) # main if __name__ == '__main__': # logging setup logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) logging.getLogger('PIL').setLevel(logging.INFO) logging.info('board-import-app started') # init webdav client wdv = WebDAV(webdav_url, username=webdav_user, password=webdav_pass) # init scheduler schedule.every(5).minutes.do(owc_updated_job) schedule.every(1).hours.do(owc_sync_carousel_job) schedule.every(1).hours.do(owc_sync_doc_job) schedule.every(2).minutes.do(loos_redis_import_job) schedule.every(60).minutes.do(air_quality_atmo_ge_job) schedule.every(5).minutes.do(dir_est_img_job) schedule.every(5).minutes.do(gsheet_job) schedule.every(2).minutes.do(img_gmap_traffic_job) schedule.every(5).minutes.do(local_info_job) schedule.every(5).minutes.do(vigilance_job) schedule.every(5).minutes.do(weather_today_job) # first call air_quality_atmo_ge_job() dir_est_img_job() gsheet_job() img_gmap_traffic_job() local_info_job() loos_redis_import_job() vigilance_job() weather_today_job() owc_updated_job() # main loop while True: schedule.run_pending() time.sleep(1)
40.076621
110
0.622776
2,883
20,399
4.182102
0.172043
0.014929
0.016422
0.01551
0.424733
0.359045
0.306793
0.278427
0.2562
0.255122
0
0.018981
0.256189
20,399
508
111
40.155512
0.775654
0.143242
0
0.276353
0
0.005698
0.165217
0.039403
0
0
0.000691
0
0
1
0.037037
false
0.02849
0.079772
0
0.125356
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f44d3d2bcc982ad4f8edfb7eb180227db0f5fa05
19,687
py
Python
fsleyes_widgets/widgetlist.py
pauldmccarthy/fsleyes-widgets
cb27899a0f665efe3f1c6ca1f89349507e004378
[ "Apache-2.0" ]
1
2018-11-04T11:18:46.000Z
2018-11-04T11:18:46.000Z
fsleyes_widgets/widgetlist.py
pauldmccarthy/fsleyes-widgets
cb27899a0f665efe3f1c6ca1f89349507e004378
[ "Apache-2.0" ]
2
2018-09-24T15:01:56.000Z
2020-01-20T10:39:37.000Z
fsleyes_widgets/widgetlist.py
pauldmccarthy/fsleyes-widgets
cb27899a0f665efe3f1c6ca1f89349507e004378
[ "Apache-2.0" ]
1
2017-12-09T09:02:07.000Z
2017-12-09T09:02:07.000Z
#!/usr/bin/env python # # widgetlist.py - A widget which displays a list of groupable widgets. # # Author: Paul McCarthy <pauldmccarthy@gmail.com> # """This module provides the :class:`WidgetList` class, which displays a list of widgets. """ import wx import wx.lib.newevent as wxevent import wx.lib.scrolledpanel as scrolledpanel import fsleyes_widgets.togglepanel as togglepanel class WidgetList(scrolledpanel.ScrolledPanel): """A scrollable list of widgets. The ``WidgetList`` provides a number of features: - Widgets can be grouped. - A label can be shown next to each widget. - Widget groups can be collapsed/expanded. - Widgets and groups can be dynamically added/removed. The most important methods are: .. autosummary:: :nosignatures: AddWidget AddGroup A ``WidgetList`` looks something like this: .. image:: images/widgetlist.png :scale: 50% :align: center A ``WidgetList`` emits a :data:`WidgetListChangeEvent` whenever its contents change. """ _defaultOddColour = None """Background colour for widgets on odd rows. Iniitalised in :meth:`__init__`. """ _defaultEvenColour = None """Background colour for widgets on even rows. Iniitalised in :meth:`__init__`. """ _defaultGroupColour = None """Border and title background colour for widget groups. Iniitalised in :meth:`__init__`. """ def __init__(self, parent, style=0, minHeight=-1): """Create a ``WidgetList``. :arg parent: The :mod:`wx` parent object. :arg style: Passed through to ``wx.ScrolledPanel.__init__`` :arg minHeight: Minimum height of each row """ odd = wx.SystemSettings.GetColour(wx.SYS_COLOUR_LISTBOX) even = odd.ChangeLightness(90) group = odd if WidgetList._defaultOddColour is None: WidgetList._defaultOddColour = odd if WidgetList._defaultEvenColour is None: WidgetList._defaultEvenColour = even if WidgetList._defaultGroupColour is None: WidgetList._defaultGroupColour = group self.__minHeight = minHeight self.__widgSizer = wx.BoxSizer(wx.VERTICAL) self.__sizer = wx.BoxSizer(wx.VERTICAL) self.__groupSizer = wx.BoxSizer(wx.VERTICAL) self.__widgets = {} self.__groups = {} self.__oddColour = WidgetList._defaultOddColour self.__evenColour = WidgetList._defaultEvenColour self.__groupColour = WidgetList._defaultGroupColour self.__sizer.Add(self.__widgSizer, flag=wx.EXPAND) self.__sizer.Add(self.__groupSizer, flag=wx.EXPAND) self.__oneExpanded = style & WL_ONE_EXPANDED # The SP.__init__ method seemingly # induces a call to DoGetBestSize, # which assumes that all of the # things above exist. So we call # init after we've created those # things. scrolledpanel.ScrolledPanel.__init__(self, parent) self.SetSizer(self.__sizer) self.SetupScrolling() self.SetAutoLayout(1) def DoGetBestSize(self): """Returns the best size for the widget list, with all group widgets expanded. """ width, height = self.__widgSizer.GetSize().Get() for name, group in self.__groups.items(): w, h = group.parentPanel.GetBestSize().Get() w += 20 h += 10 if w > width: width = w height += h return wx.Size(width, height) def __makeWidgetKey(self, widget): """Widgets are stored in a dictionary - this method generates a string to use as a key, based on the widget ``id``. """ return str(id(widget)) def __setLabelWidths(self, widgets): """Calculates the maximum width of all widget labels, and sets all labels to that width. This ensures that all labels/widgets line are horizontally aligned. """ if len(widgets) == 0: return dc = wx.ClientDC(widgets[0].label) lblWidths = [dc.GetTextExtent(w.displayName)[0] for w in widgets] maxWidth = max(lblWidths) for w in widgets: w.label.SetMinSize((maxWidth + 10, -1)) w.label.SetMaxSize((maxWidth + 10, -1)) def __setColours(self): """Called whenever the widget list needs to be refreshed. Makes sure that odd/even widgets and their labels have the correct background colour. """ def setWidgetColours(widgDict): for i, widg in enumerate(widgDict.values()): if i % 2: colour = self.__oddColour else: colour = self.__evenColour widg.SetBackgroundColour(colour) setWidgetColours(self.__widgets) for group in self.__groups.values(): setWidgetColours(group.widgets) group.parentPanel.SetBackgroundColour(self.__groupColour) group.colPanel .SetBackgroundColour(self.__groupColour) def __refresh(self, *args, **kwargs): """Updates widget colours (see :meth:`__setColours`), and lays out the widget list. :arg postEvent: If ``True`` (the default), a :data:`WidgetListChangeEvent` is posted. """ self.__setColours() self.FitInside() self.Layout() if kwargs.get('postEvent', True): wx.PostEvent(self, WidgetListChangeEvent()) def SetColours(self, odd=None, even=None, group=None): """Sets the colours used on this ``WidgetList``. Each argument is assumed to be a tuple of ``(r, g, b)`` values, each in the range ``[0 - 255]``. :arg odd: Background colour for widgets on odd rows. :arg even: Background colour for widgets on even rows. :arg group: Border/title colour for widget groups. """ if odd is not None: self.__oddColour = odd if even is not None: self.__evenColour = even if group is not None: self.__groupColour = group self.__setColours() def GetGroups(self): """Returns a list containing the name of every group in this ``WidgetList``. """ return list(self.__groups.keys()) def HasGroup(self, groupName): """Returns ``True`` if this ``WidgetList`` contains a group with the specified name. """ return groupName in self.__groups def RenameGroup(self, groupName, newDisplayName): """Changes the display name of the specified group. .. note:: This method only changes the *display name* of a group, not the group identifier name. See the :meth:`AddGroup` method. :arg groupName: Name of the group. :arg newDisplayName: New display name for the group. """ group = self.__groups[groupName] group.displayName = newDisplayName group.colPanel.SetLabel(newDisplayName) def AddGroup(self, groupName, displayName=None): """Add a new group to this ``WidgetList``. A :exc:`ValueError` is raised if a group with the specified name already exists. :arg groupName: The name of the group - this is used as an identifier for the group. :arg displayName: A string to be shown in the title bar for the group. This can be changed later via the :meth:`RenameGroup` method. """ if displayName is None: displayName = groupName if groupName in self.__groups: raise ValueError('A group with name {} ' 'already exists'.format(groupName)) parentPanel = wx.Panel(self, style=wx.SUNKEN_BORDER) colPanel = togglepanel.TogglePanel(parentPanel, label=displayName) widgPanel = colPanel.GetPane() widgSizer = wx.BoxSizer(wx.VERTICAL) widgPanel.SetSizer(widgSizer) gapSizer = wx.BoxSizer(wx.VERTICAL) # A spacer exists at the top, # and between, every group. gapSizer.Add((-1, 5)) gapSizer.Add(parentPanel, border=10, flag=(wx.EXPAND | wx.LEFT | wx.RIGHT)) self.__groupSizer.Add(gapSizer, flag=wx.EXPAND) parentSizer = wx.BoxSizer(wx.VERTICAL) parentSizer.Add(colPanel, border=5, flag=wx.EXPAND | wx.BOTTOM, proportion=0) parentPanel.SetSizer(parentSizer) group = _Group(groupName, displayName, gapSizer, parentPanel, colPanel, widgPanel, widgSizer) self.__groups[groupName] = group self.__refresh() # Mouse wheel listener needed # on all children under linux/GTK if wx.Platform == '__WXGTK__': parentPanel.Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel) colPanel .Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel) colPanel.Bind(togglepanel.EVT_TOGGLEPANEL_EVENT, self.__onGroupExpand) def GetWidgets(self, groupName=None): """Returns a list containing all of the widgets that have been added to this ``WidgetList``. :arg groupName: If provided, only widgets in the specified group will be returned. Otherwise, ungrouped widgets are returned. """ if groupName is None: widgDict = self.__widgets else: widgDict = self.__groups[groupName].widgets widgets = [w.widget for w in widgDict.values()] return widgets def AddWidget(self, widget, displayName, tooltip=None, groupName=None): """Add an arbitrary widget to the property list. If the ``groupName`` is not provided, the widget is added to a list of *top level* widgets, which appear at the top of the list, above any groups. Otherwise, the widget is added to the collapsible panel corresponding to the specified group. A :exc:`ValueError` is raised if the widget is already contained in the list. :arg widget: The widget to add to the list. :arg displayName: The widget label/display name. :arg tooltip: A tooltip for the widget. :arg groupName: Name of the group to which the widget should be added. .. note:: The provided ``widget`` may also be a :class:`wx.Sizer` instances, although support for this is basic. Specifically, only one level of nesting is possible, i.e. the provided ``wx.Sizer`` may not have any other ``wx.Sizer`` instances as its children. """ if groupName is None: widgDict = self.__widgets parent = self parentSizer = self.__widgSizer else: group = self.__groups[groupName] widgDict = group.widgets parent = group.widgPanel parentSizer = group.sizer key = self.__makeWidgetKey(widget) if key in widgDict: raise ValueError('Widgets {} already exist'.format(key)) widgPanel = wx.Panel(parent) widgSizer = wx.BoxSizer(wx.HORIZONTAL) widgPanel.SetSizer(widgSizer) if isinstance(widget, wx.Sizer): for child in widget.GetChildren(): child.GetWindow().Reparent(widgPanel) else: w, h = widget.GetBestSize().Get() if self.__minHeight > h: h = self.__minHeight widget.SetMinSize( (w, h)) widget.Reparent(widgPanel) label = wx.StaticText(widgPanel, label=displayName, style=wx.ALIGN_RIGHT) widgSizer.Add(label, flag=wx.EXPAND) widgSizer.Add(widget, flag=wx.EXPAND, proportion=1) parentSizer.Add(widgPanel, flag=wx.EXPAND | wx.LEFT | wx.RIGHT, border=5) widg = _Widget(displayName, tooltip, label, widget, widgPanel, widgSizer) if tooltip is not None: widg.SetTooltip(tooltip) # Under linux/GTK, mouse events are # captured by child windows, so if # we want scrolling to work, we need # to capture scroll events on every # child. Under OSX/cocoa, this is # not necessary. if wx.Platform == '__WXGTK__': widg.Bind(wx.EVT_MOUSEWHEEL, self.__onMouseWheel) widgDict[key] = widg self.__setLabelWidths(list(widgDict.values())) self.__refresh() def __onMouseWheel(self, ev): """Only called if running on GTK. Scrolls the widget list according to the mouse wheel rotation. """ posx, posy = self.GetViewStart() rotation = ev.GetWheelRotation() if rotation > 0: delta = 5 elif rotation < 0: delta = -5 else: return if ev.GetWheelAxis() == wx.MOUSE_WHEEL_VERTICAL: posy -= delta else: posx += delta self.Scroll(posx, posy) def __onGroupExpand(self, ev): """Called when the user expands or collapses a group. Enforces the :data:`WL_ONE_EXPANDED` style if it is enabled, and refreshes the panel. """ panel = ev.GetEventObject() if panel.IsExpanded() and self.__oneExpanded: for group in self.__groups.values(): if group.colPanel is not panel: group.colPanel.Collapse() self.__refresh() def AddSpace(self, groupName=None): """Adds some empty vertical space to the widget list. :arg groupName: Name of the group tio which the space should be added. If not specified, the space is added to the *top level* widget list - see the :meth:`AddWidget` method. """ if groupName is None: parentSizer = self.__widgSizer else: parentSizer = self.__groups[groupName].sizer parentSizer.Add((-1, 10)) def RemoveWidget(self, widget, groupName=None): """Removes and destroys the specified widget from this ``WidgetList``. :arg widget: The widget to remove. :arg groupName: Name of the group in which the widget is contained. """ key = self.__makeWidgetKey(widget) if groupName is None: parentSizer = self.__widgSizer widgDict = self.__widgets else: group = self.__groups[groupName] parentSizer = group.sizer widgDict = group.widgets widg = widgDict.pop(key) parentSizer.Detach(widg.panel) widg.Destroy() self.__refresh() def RemoveGroup(self, groupName): """Removes the specified group, and destroys all of the widgets contained within it. """ group = self.__groups.pop(groupName) self.__groupSizer.Detach(group.gapSizer) group.parentPanel.Destroy() self.__refresh() def Clear(self): """Removes and destroys all widgets and groups. """ for key in list(self.__widgets.keys()): widg = self.__widgets.pop(key) self.__widgSizer.Detach(widg.sizer) widg.Destroy() for group in self.GetGroups(): self.RemoveGroup(group) self.__refresh() def ClearGroup(self, groupName): """Removes and destroys all widgets in the specified group, but does not remove the group. """ group = self.__groups[groupName] group.sizer.Clear(True) group.widgets.clear() self.__refresh() def GroupSize(self, groupName): """Returns the number of widgets that have been added to the specified group. """ return len(self.__groups[groupName].widgets) def IsExpanded(self, groupName): """Returns ``True`` if the panel for the specified group is currently expanded, ``False`` if it is collapsed """ return self.__groups[groupName].colPanel.IsExpanded() def Expand(self, groupName, expand=True): """Expands or collapses the panel for the specified group. """ panel = self.__groups[groupName].colPanel if expand: panel.Expand() else: panel.Collapse() self.__refresh() class _Widget: """The ``_Widget`` class is used internally by the :class:`WidgetList` to organise references to each widget in the list. """ def __init__(self, displayName, tooltip, label, widget, panel, sizer): self.displayName = displayName self.tooltip = tooltip self.label = label self.widget = widget self.panel = panel self.sizer = sizer def SetBackgroundColour(self, colour): self.panel.SetBackgroundColour(colour) self.label.SetBackgroundColour(colour) def SetTooltip(self, tooltip): self.label.SetToolTip(wx.ToolTip(tooltip)) if isinstance(self.widget, wx.Sizer): for child in self.widget.GetChildren(): child.GetWindow().SetToolTip(wx.ToolTip(tooltip)) else: self.widget.SetToolTip(wx.ToolTip(tooltip)) def Bind(self, evType, callback): self.panel.Bind(evType, callback) self.label.Bind(evType, callback) if isinstance(self.widget, wx.Sizer): for c in self.widget.GetChildren(): c.GetWindow().Bind(evType, callback) else: self.widget.Bind(evType, callback) def Destroy(self): self.label.Destroy() if isinstance(self.widget, wx.Sizer): self.widget.Clear(True) else: self.widget.Destroy() class _Group: """The ``_Group`` class is used internally by :class:`WidgetList` instances to represent groups of widgets that are in the list. """ def __init__(self, groupName, displayName, gapSizer, parentPanel, colPanel, widgPanel, sizer): self.groupName = groupName self.displayName = displayName self.gapSizer = gapSizer self.parentPanel = parentPanel self.colPanel = colPanel self.widgPanel = widgPanel self.sizer = sizer self.widgets = {} _WidgetListChangeEvent, _EVT_WL_CHANGE_EVENT = wxevent.NewEvent() WidgetListChangeEvent = _WidgetListChangeEvent """Event emitted by a :class:`WidgetList` when its contents change. """ EVT_WL_CHANGE_EVENT = _EVT_WL_CHANGE_EVENT """Identifier for the :data:`WidgetListChangeEvent`. """ WL_ONE_EXPANDED = 1 """:class:`WidgetList` style flag. When applied, at most one group will be expanded at any one time. """
31.05205
79
0.584701
2,105
19,687
5.349169
0.193824
0.015986
0.016874
0.010657
0.16119
0.105861
0.050977
0.007993
0
0
0
0.003105
0.329354
19,687
633
80
31.101106
0.849731
0.29268
0
0.243986
0
0
0.00689
0
0
0
0
0
0
1
0.103093
false
0
0.013746
0
0.164948
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f44da748e4ab13e359126b052ffbda6e65cd72ff
1,441
py
Python
setup.py
TransactPRO/gw3-python-client
77a9395c13f75467385227461b57ce85f4730ce5
[ "MIT" ]
1
2018-03-13T00:10:05.000Z
2018-03-13T00:10:05.000Z
setup.py
TransactPRO/gw3-python-client
77a9395c13f75467385227461b57ce85f4730ce5
[ "MIT" ]
1
2020-08-05T08:25:14.000Z
2020-08-05T08:25:14.000Z
setup.py
TransactPRO/gw3-python-client
77a9395c13f75467385227461b57ce85f4730ce5
[ "MIT" ]
null
null
null
#!/usr/bin/env python import setuptools MAINTAINER_NAME = 'Transact Pro' MAINTAINER_EMAIL = 'support@transactpro.lv' URL_GIT = 'https://github.com/TransactPRO/gw3-python-client' try: import pypandoc LONG_DESCRIPTION = pypandoc.convert('README.md', 'rst') except (IOError, ImportError, OSError, RuntimeError): LONG_DESCRIPTION = '' CLASSIFIERS = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Topic :: Software Development :: Libraries :: Python Modules' ] required = [ 'requests', ] setuptools.setup( name='transactpro-gw3-client', version='1.7.6', description='Transact PRO Gateway3 implementation in Python.', long_description=LONG_DESCRIPTION, long_description_content_type="text/markdown", author='Transact Pro', author_email='support@transactpro.net', install_requires=required, url=URL_GIT, packages=setuptools.find_packages(), license='MIT', classifiers=CLASSIFIERS, keywords='GW3 gateway3 integration gateway TransactPRO python python3', python_requires='>=3.6', )
28.82
75
0.693963
153
1,441
6.437909
0.555556
0.076142
0.126904
0.105584
0
0
0
0
0
0
0
0.01598
0.174879
1,441
49
76
29.408163
0.812447
0.013879
0
0
0
0
0.492958
0.047183
0
0
0
0
0
1
0
false
0
0.073171
0
0.073171
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f44db94e38c8e52a26896847a590eaee7cd80693
2,359
py
Python
social_auth_mitxpro/backends_test.py
mitodl/social-auth-mitxpro
8cae8bbe900b25f724b24f783d06de7b853a1366
[ "BSD-3-Clause" ]
null
null
null
social_auth_mitxpro/backends_test.py
mitodl/social-auth-mitxpro
8cae8bbe900b25f724b24f783d06de7b853a1366
[ "BSD-3-Clause" ]
37
2019-03-06T17:43:26.000Z
2022-03-21T05:18:10.000Z
social_auth_mitxpro/backends_test.py
mitodl/social-auth-mitxpro
8cae8bbe900b25f724b24f783d06de7b853a1366
[ "BSD-3-Clause" ]
null
null
null
"""Tests for our backend""" from urllib.parse import urljoin import pytest from social_auth_mitxpro.backends import MITxProOAuth2 # pylint: disable=redefined-outer-name @pytest.fixture def strategy(mocker): """Mock strategy""" return mocker.Mock() @pytest.fixture def backend(strategy): """MITxProOAuth2 backend fixture""" return MITxProOAuth2(strategy) @pytest.mark.parametrize( "response, expected", [ ( {"username": "abc123", "email": "user@example.com", "name": "Jane Doe"}, {"username": "abc123", "email": "user@example.com", "name": "Jane Doe"}, ), ({"username": "abc123"}, {"username": "abc123", "email": "", "name": ""}), ], ) def test_get_user_details(backend, response, expected): """Test that get_user_details produces expected results""" assert backend.get_user_details(response) == expected def test_user_data(backend, strategy, mocked_responses): """Tests that the backend makes a correct appropriate request""" access_token = "user_token" api_root = "http://xpro.example.com/" response = {"username": "abc123", "email": "user@example.com", "name": "Jane Doe"} mocked_responses.add( mocked_responses.GET, urljoin(api_root, "/api/users/me"), json=response ) settings = {"API_ROOT": api_root} def _setting(name, *, backend, default=None): # pylint: disable=unused-argument """Dummy setting func""" return settings.get(name, default) strategy.setting.side_effect = _setting assert backend.user_data(access_token) == response request, _ = mocked_responses.calls[0] assert request.headers["Authorization"] == "Bearer user_token" strategy.setting.assert_any_call("API_ROOT", default=None, backend=backend) def test_authorization_url(backend, strategy): """Test authorization_url()""" strategy.setting.return_value = "abc" assert backend.authorization_url() == "abc" strategy.setting.assert_called_once_with( "AUTHORIZATION_URL", default=None, backend=backend ) def test_access_token_url(backend, strategy): """Test access_token_url()""" strategy.setting.return_value = "abc" assert backend.access_token_url() == "abc" strategy.setting.assert_called_once_with( "ACCESS_TOKEN_URL", default=None, backend=backend )
29.4875
86
0.680797
271
2,359
5.730627
0.321033
0.042498
0.048938
0.04443
0.265937
0.245976
0.204765
0.204765
0.094012
0.065679
0
0.009799
0.178042
2,359
79
87
29.860759
0.791129
0.133531
0
0.166667
0
0
0.166833
0
0
0
0
0
0.166667
1
0.145833
false
0
0.0625
0
0.270833
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f450583ef2fc87d70603f2a691c77577371d8626
11,640
py
Python
classifier/interpretation_exp.py
methylgrammarlab/proj_scwgbs
287196898796eb617fef273bfaf9e978a57047dc
[ "MIT" ]
null
null
null
classifier/interpretation_exp.py
methylgrammarlab/proj_scwgbs
287196898796eb617fef273bfaf9e978a57047dc
[ "MIT" ]
null
null
null
classifier/interpretation_exp.py
methylgrammarlab/proj_scwgbs
287196898796eb617fef273bfaf9e978a57047dc
[ "MIT" ]
null
null
null
""" Code adapted from https://github.com/ohlerlab/DeepRiPe with changes Extract information and graphs from the Integrated gradients output """ import argparse import os import sys import matplotlib.pyplot as plt import numpy as np import seaborn as sns from classifier.plotseqlogo import seqlogo_fig from commons import files_tools sns.set() sns.set_style('whitegrid') def parse_input(): parser = argparse.ArgumentParser() parser.add_argument('--interpretation_file', help='path for the input file', required=True) parser.add_argument('--output_folder', help='Path of the output folder', required=False, default=os.path.dirname(sys.argv[0])) args = parser.parse_args() return args def plot_one_seq(seq, output, title, yl=None): fig = seqlogo_fig(seq[:, :], vocab="DNA", yl=yl, figsize=(20, 4), ncol=1, plot_name=title) fig.savefig(output) plt.close() def plot_multi_seq(sequences_dict, number_of_seq, output_folder=None): """ Plot the multiple sequences in one figure :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param number_of_seq: number of sequences in one figure :param output_folder: Output folder """ for k in sequences_dict: ex_seq = sequences_dict[k][:number_of_seq] fig = seqlogo_fig(np.transpose(ex_seq[:, :, :], axes=(1, 2, 0)), vocab="DNA", figsize=(8, 4), ncol=1, yl=0.1, plot_name="seq for top %s of type %s" % (number_of_seq, k)) if output_folder: fig.savefig(os.path.join(output_folder, "seq_for_top_%s_of_type_%s" % (number_of_seq, k))) else: plt.show() plt.close() def plot_avg_sequence(sequences_dict, output_folder=None): """ Plot the average sequence across 30 letters and all the sequence :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder """ for k in sequences_dict: ex_seq = sequences_dict[k] mean_seq = np.transpose(np.mean(ex_seq[:, 60:90, :], axis=0).reshape(1, 30, 4), axes=(1, 2, 0)) name = k fig = seqlogo_fig(mean_seq, vocab="DNA", figsize=(20, 4), ncol=1, plot_name="Average attribution score for prediction %s" % name) ax = fig.axes[0] ax.set_title("Average sequence for prediction %s" % name, fontsize=16) if output_folder: fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s30.png" % k)) else: plt.show() plt.close() for k in sequences_dict: ex_seq = sequences_dict[k] mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0)) fig = seqlogo_fig(mean_seq, vocab="DNA", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k) if output_folder: fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s.png" % k)) else: plt.show() plt.close() def plot_avg_sequence_sw(sequences_dict, output_folder=None): """ plot the avg sequence using SW, flatten the AT to W and CG to S :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder """ for k in sequences_dict: ex_seq = sequences_dict[k] mean_seq = np.transpose(np.mean(ex_seq[:, 60:90, :], axis=0).reshape(1, 30, 4), axes=(1, 2, 0)) new_seq = np.zeros_like(mean_seq) for i in range(mean_seq.shape[0]): new_seq[i][0] = mean_seq[i][0] + mean_seq[i][3] new_seq[i][1] = mean_seq[i][1] + mean_seq[i][2] fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k) if output_folder: fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw30.png" % k)) else: plt.show() for k in sequences_dict: ex_seq = sequences_dict[k] mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0)) new_seq = np.zeros_like(mean_seq) for i in range(mean_seq.shape[0]): new_seq[i][0] = mean_seq[i][0] + mean_seq[i][3] new_seq[i][1] = mean_seq[i][1] + mean_seq[i][2] fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k) if output_folder: fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw.png" % k)) else: plt.show() plt.close() def plot_avg_sequence_sw_flatten_values(sequences_dict, output_folder=None): """ plot the avg sequence using SW, flatten the AT to W and CG to S and combining both options to get one number per sequence place :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder """ for k in sequences_dict: ex_seq = sequences_dict[k] mean_seq = np.transpose(np.mean(ex_seq[:, 60:90, :], axis=0).reshape(1, 30, 4), axes=(1, 2, 0)) new_seq = np.zeros_like(mean_seq) for i in range(mean_seq.shape[0]): w = mean_seq[i][0] + mean_seq[i][3] s = mean_seq[i][1] + mean_seq[i][2] delta = s - w sw_index = 1 if delta > 0 else 0 new_seq[i][sw_index] = abs(delta) fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(8, 4), ncol=1, plot_name="Avg seq for %s" % k) if output_folder: fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw30_flatten.png" % k)) else: fig.show() for k in sequences_dict: ex_seq = sequences_dict[k] mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0)) new_seq = np.zeros_like(mean_seq) for i in range(mean_seq.shape[0]): w = mean_seq[i][0] + mean_seq[i][3] s = mean_seq[i][1] + mean_seq[i][2] delta = s - w sw_index = 1 if delta > 0 else 0 new_seq[i][sw_index] = abs(delta) fig = seqlogo_fig(new_seq, vocab="DNAWS", figsize=(20, 4), ncol=1, plot_name="Avg seq for %s" % k) if output_folder: fig.savefig(os.path.join(output_folder, "Avg_seq_for_%s_sw_flatten.png" % k)) else: plt.show() plt.close() def plot_distance_weight_two_sides(sequences_dict, output_folder=None): """ Plot the integrated gradient value of each feature based on distance from center, two ways graph(-74->74) We wanted to see if there are indexes and some periodicity :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder """ for k in sequences_dict: class_type = k ex_seq = np.abs(sequences_dict[k]) mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0)) seq_weight = np.sum(mean_seq, axis=1) middle = int(seq_weight.shape[0] / 2) - 1 seq_weight[middle] = None seq_weight[middle + 1] = None x = np.arange(-74, 1).astype(np.int) x = np.append(x, x[::-1] * -1) x_ticks = [i for i in range(-70, 80, 10)] plt.xticks(x_ticks) plt.plot(x, seq_weight, '.-') plt.legend() plt.grid(axis="y") plt.xlabel("Distance from CpG Site", fontsize=12) plt.ylabel("Attribute score", fontsize=12) plt.title("Attribute score base on distance from CpG site for %s" % class_type, fontsize=14) if output_folder: plt.savefig( os.path.join(output_folder, "distance_importance_of_flanking_letters_type_%s_two_way.png" % k)) else: plt.show() plt.close() def plot_distance_weight_one_side(sequences_dict, output_folder=None): """ Plot the integrated gradient value of each feature based on distance from center, one way graph (0->74) We wanted to see if there are indexes and some periodicity :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder """ for k in sequences_dict: class_type = k ex_seq = np.abs(sequences_dict[k]) mean_seq = np.transpose(np.mean(ex_seq[:, :, :], axis=0).reshape(1, 150, 4), axes=(1, 2, 0)) seq_weight = np.sum(mean_seq, axis=1) std_seq = np.std(mean_seq, axis=1) middle = int(seq_weight.shape[0] / 2) - 1 seq_to_values = np.flip(seq_weight[:middle]) seq_from_values = seq_weight[middle + 2:] seq_to_std = np.flip(std_seq[:middle]) seq_from_std = std_seq[middle + 2:] x = np.arange(1, seq_from_values.shape[0] + 1) plt.errorbar(x, seq_to_values, seq_to_std, marker='^', label="to", alpha=0.5) plt.errorbar(x, seq_from_values, seq_from_std, marker='^', label="from", alpha=0.5) plt.legend() x_ticks = [i for i in range(1, 5)] + [i for i in range(5, 75, 5)] plt.xticks(x_ticks) plt.xlabel("Distance from CG") plt.ylabel("Importance shannon values") plt.title("Importance of flanking letters - %s" % (class_type)) if output_folder: plt.savefig(os.path.join(output_folder, "distance_importance_of_flanking_letters_type_%s_one_way.png" % k)) else: plt.show() plt.close() def print_each_seq(sequences_dict, output_folder): """ Plot all the sequences on after the other :param sequences_dict: A dictionary with pl or cl as key and the integrated values results for each sequence in this label :param output_folder: Output folder """ cl_list = [] pl_list = [] # Remove duplicates seq = None for i in range(sequences_dict["cl"].shape[0]): new_seq = sequences_dict["cl"][i] if np.all(new_seq == seq): continue else: cl_list.append(new_seq) seq = new_seq seq = None for i in range(sequences_dict["pl"].shape[0]): new_seq = sequences_dict["pl"][i] if np.all(new_seq == seq): continue else: pl_list.append(new_seq) seq = new_seq for i in range(1000): plot_one_seq(seq=cl_list[i], output=os.path.join(output_folder, "cl_seq_%s.png" % i), title="CL seq num %s" % i, yl=0.1) for i in range(1000): plot_one_seq(seq=pl_list[i], output=os.path.join(output_folder, "pl_seq_%s.png" % i), title="PL seq num %s" % i, yl=0.1) def main(): args = parse_input() ex_seq_d = files_tools.load_pickle(args.interpretation_file) new_d = {"cl": ex_seq_d["cl"], "pl": ex_seq_d["pl"]} plot_distance_weight_one_side(new_d, args.output_folder) plot_distance_weight_two_sides(new_d, args.output_folder) plot_multi_seq(new_d, 1000, args.output_folder) plot_avg_sequence(new_d, args.output_folder) plot_avg_sequence_sw(new_d, args.output_folder) plot_avg_sequence_sw_flatten_values(new_d, args.output_folder) print_each_seq(new_d, args.output_folder) if __name__ == '__main__': main()
36.489028
109
0.617526
1,796
11,640
3.797327
0.126392
0.087977
0.018768
0.025806
0.720381
0.681525
0.658798
0.630645
0.611877
0.579619
0
0.026148
0.260739
11,640
318
110
36.603774
0.766415
0.171134
0
0.526829
0
0
0.088491
0.028402
0
0
0
0
0
1
0.04878
false
0
0.058537
0
0.112195
0.009756
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f452f54dd600820476b6e9842531fd00913972e2
3,921
py
Python
scripts/pythonutils/autorepr.py
shulinye/dotfiles
a342512c33ca102d03921cc653ee4605d0cf9617
[ "MIT" ]
2
2015-01-16T22:07:10.000Z
2015-11-09T06:45:44.000Z
scripts/pythonutils/autorepr.py
shulinye/dotfiles
a342512c33ca102d03921cc653ee4605d0cf9617
[ "MIT" ]
4
2015-07-08T19:13:47.000Z
2015-08-31T16:04:36.000Z
scripts/pythonutils/autorepr.py
shulinye/dotfiles
a342512c33ca102d03921cc653ee4605d0cf9617
[ "MIT" ]
null
null
null
#!/usr/bin/python3 from collections import OrderedDict from functools import partial from ordered_set import OrderedSet import inspect import itertools import types from .utils import walk_getattr __all__ = ['autoinit', 'autorepr', 'TotalCompareByKey'] def autoinit(obj=None, *args, params=None, **kwargs): """Takes __slots__ and _slots and writes an __init__ Can be used as a class decorator, or by setting __init__ = autoinit""" if obj is None: return partial(autoinit, params=params) if params: pass elif hasattr(obj, '__slots__'): params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '__slots__'))) elif hasattr(obj, '_slots'): params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '_slots'))) else: raise RuntimeError("Can't autocreate __init__, please supply '__slots__' or '_slots'") if inspect.isclass(obj): #I'm being used as a decorator s = ["def __init__(self,{}):".format(", ".join(i for i in params))] s.extend("self.{0} = {0}".format(i) for i in params) scope = {} exec('\n '.join(s), scope) setattr(obj, '__init__', scope['__init__']) return obj else: signature = inspect.Signature(inspect.Parameter(i, inspect.Parameter.POSITIONAL_OR_KEYWORD) for i in params) signature.bind(*args, **kwargs) for p, val in itertools.chain(zip(params, args), kwargs.items()): setattr(obj, p, val) def autorepr(obj=None, *, params=None): """Function that automagically gives you a __repr__. If no params are given, uses __slots__, _slots, and at last resort, inspects __init__ Can be used as a class decorator or by setting __repr__ = autorepr""" if obj is None: return partial(autorepr, params = params) discard_first = False if params: pass elif hasattr(obj, '__slots__'): params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '__slots__'))) elif hasattr(obj, '_slots'): params = OrderedSet(itertools.chain.from_iterable(walk_getattr(obj, '_slots'))) else: sig = inspect.signature(obj.__init__) params = sig.parameters discard_first = True if inspect.isclass(obj): #I'm being used as a decorator if discard_first: params = list(params)[1:] #drop the first argument, that's self s = ["def __repr__(self):\n return '%s(" + ", ".join(["%s=%r"]*(len(params)))] s.append(")' % (self.__class__.__name__, ") s.append(', '.join("'{0}', self.{0}".format(i) for i in params) + ')') scope = {} exec("".join(s), scope) setattr(obj, '__repr__', scope['__repr__']) return obj else: #Being a normal function here :P return "%s(%s)" % (obj.__class__.__name__, ", ".join("%s=%r" % (i, getattr(obj,i)) for i in params)) class TotalCompareByKey(object): """Writes all comparison methods using one key""" __slots__ = ['key', 'check_type'] def __init__(self, key, *, check_type=True): self.key = key self.check_type = check_type def __call__(self, cls): orderings = {'__lt__': '<', '__le__': '<=', '__gt__': '>', '__ge__': '>=', '__eq__': '==', '__ne__': '!='} for dunder, symbol in orderings.items(): if dunder in cls.__dict__: continue s = ["def {dunder}(self, other):".format(dunder=dunder)] if self.check_type: s.append("if not isinstance(other, self.__class__):") s.append(" return NotImplemented") s.append("return self.{k} {symbol} other.{k}".format(k=self.key, symbol=symbol)) scope = {} exec("\n ".join(s), scope) setattr(cls, dunder, scope[dunder]) return cls
40.42268
116
0.602907
479
3,921
4.578288
0.294363
0.029184
0.01368
0.02736
0.327405
0.305062
0.283174
0.262654
0.262654
0.236206
0
0.002056
0.255802
3,921
96
117
40.84375
0.749486
0.129814
0
0.298701
0
0
0.157253
0.007432
0
0
0
0
0
1
0.051948
false
0.025974
0.090909
0
0.220779
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4537a07a1d5765ef6c894d899d3fcdd3ed64dab
10,051
py
Python
v1.0.0.test/toontown/estate/DistributedGardenPlotAI.py
TTOFFLINE-LEAK/ttoffline
bb0e91704a755d34983e94288d50288e46b68380
[ "MIT" ]
4
2019-07-01T15:46:43.000Z
2021-07-23T16:26:48.000Z
v1.0.0.test/toontown/estate/DistributedGardenPlotAI.py
TTOFFLINE-LEAK/ttoffline
bb0e91704a755d34983e94288d50288e46b68380
[ "MIT" ]
1
2019-06-29T03:40:05.000Z
2021-06-13T01:15:16.000Z
v1.0.0.test/toontown/estate/DistributedGardenPlotAI.py
TTOFFLINE-LEAK/ttoffline
bb0e91704a755d34983e94288d50288e46b68380
[ "MIT" ]
4
2019-07-28T21:18:46.000Z
2021-02-25T06:37:25.000Z
from direct.directnotify import DirectNotifyGlobal from toontown.estate import GardenGlobals from toontown.estate.DistributedLawnDecorAI import DistributedLawnDecorAI FLOWER_X_OFFSETS = ( None, (0, ), (-1.5, 1.5), (-3.4, 0, 3.5)) class DistributedGardenPlotAI(DistributedLawnDecorAI): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGardenPlotAI') def __init__(self, mgr): DistributedLawnDecorAI.__init__(self, mgr) self.plotType = 0 self.__plantingAvId = 0 self.treeIndex = 0 self.flowerIndex = 0 def announceGenerate(self): DistributedLawnDecorAI.announceGenerate(self) self.plotType = GardenGlobals.whatCanBePlanted(self.ownerIndex, self.plot) self.__plantingAvId = 0 def setTreeIndex(self, treeIndex): self.treeIndex = treeIndex def getTreeIndex(self): return self.treeIndex def setFlowerIndex(self, flowerIndex): self.flowerIndex = flowerIndex def getFlowerIndex(self): return self.flowerIndex def __initialSanityCheck(self, wantedType=None, forceOwner=False): if self.__plantingAvId: return else: avId = self.air.getAvatarIdFromSender() av = self.air.doId2do.get(avId) if not av: self.air.writeServerEvent('suspicious', avId, 'called DistributedGardenPlotAI method outside shard!') return if wantedType is not None and self.plotType != wantedType: self.air.writeServerEvent('suspicious', avId, 'called incorrect DistributedGardenPlotAI method!', plotType=self.plotType, wantedType=wantedType) return self.d_interactionDenied() if avId != self.ownerDoId and not forceOwner: self.air.writeServerEvent('suspicious', avId, "called someone else's DistributedGardenPlotAI plant method!", ownerDoId=self.ownerDoId) return self.d_interactionDenied() return av def plantFlower(self, species, variety, usingFlowerAll=False): av = self.__initialSanityCheck(GardenGlobals.FLOWER_TYPE if not usingFlowerAll else None, usingFlowerAll) if not av: return else: def invalid(problem): msg = 'tried to plant flower but something went wrong: %s' % problem self.notify.warning('%d %s' % (av.doId, msg)) self.air.writeServerEvent('suspicious', av.doId, msg) if not usingFlowerAll: return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED) plantAttributes = GardenGlobals.PlantAttributes.get(species, {}) if plantAttributes.get('plantType') != GardenGlobals.FLOWER_TYPE: return invalid('invalid species: %d' % species) if variety >= len(plantAttributes['varieties']): return invalid('invalid variety: %d' % variety) if not usingFlowerAll: cost = len(GardenGlobals.Recipes[plantAttributes['varieties'][variety][0]]['beans']) av.takeMoney(cost) self.d_setMovie(GardenGlobals.MOVIE_PLANT) def handlePlantFlower(task): flower = self.mgr.plantFlower(self.getFlowerIndex(), species, variety, plot=self, ownerIndex=self.ownerIndex, plotId=self.plot, waterLevel=0, generate=False) index = (0, 1, 2, 2, 2, 3, 3, 3, 4, 4)[self.getFlowerIndex()] idx = (0, 0, 0, 1, 2, 0, 1, 2, 0, 1)[self.getFlowerIndex()] zOffset = 1.5 gardenBox = self.mgr._estateBoxes[index] xOffset = FLOWER_X_OFFSETS[gardenBox.getTypeIndex()][idx] flower.setPos(gardenBox, 0, 0, 0) flower.setZ(gardenBox, zOffset) flower.setX(gardenBox, xOffset) flower.setH(gardenBox, 0) flower.generateWithRequired(self.mgr.estate.zoneId) if not usingFlowerAll: flower.d_setMovie(GardenGlobals.MOVIE_FINISHPLANTING, self.__plantingAvId) flower.d_setMovie(GardenGlobals.MOVIE_CLEAR, self.__plantingAvId) self.air.writeServerEvent('plant-flower', self.__plantingAvId, species=species, variety=variety, plot=self.plot, name=plantAttributes.get('name', 'unknown flower')) if task: return task.done if usingFlowerAll: handlePlantFlower(None) else: taskMgr.doMethodLater(7, handlePlantFlower, self.uniqueName('handle-plant-flower')) self.__plantingAvId = av.doId return 1 def plantGagTree(self, track, index): av = self.__initialSanityCheck(GardenGlobals.GAG_TREE_TYPE) if not av: return for i in xrange(index): if not self.mgr.hasTree(track, i): msg = 'tried to plant tree but an index is missing: %d' % index self.notify.warning('%d %s' % (av.doId, msg)) self.air.writeServerEvent('suspicious', av.doId, msg) return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED) if self.mgr.hasTree(track, index): msg = 'tried to plant tree but gag already planted' self.notify.warning('%d %s' % (av.doId, msg)) self.air.writeServerEvent('suspicious', av.doId, msg) return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED) if av.inventory.useItem(track, index) == -1: msg = 'tried to plant tree but not carrying selected gag' self.notify.warning('%d %s' % (av.doId, msg)) self.air.writeServerEvent('suspicious', av.doId, msg) return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED) av.d_setInventory(av.getInventory()) self.d_setMovie(GardenGlobals.MOVIE_PLANT) def handlePlantTree(task): if not self.air: return tree = self.mgr.plantTree(self.getTreeIndex(), GardenGlobals.getTreeTypeIndex(track, index), plot=self, ownerIndex=self.ownerIndex, plotId=self.plot, pos=(self.getPos(), self.getH())) tree.d_setMovie(GardenGlobals.MOVIE_FINISHPLANTING, self.__plantingAvId) tree.d_setMovie(GardenGlobals.MOVIE_CLEAR, self.__plantingAvId) self.air.writeServerEvent('plant-tree', self.__plantingAvId, track=track, index=index, plot=self.plot) return task.done taskMgr.doMethodLater(7, handlePlantTree, self.uniqueName('handle-plant-tree')) self.__plantingAvId = av.doId def plantStatuary(self, species): av = self.__initialSanityCheck(GardenGlobals.STATUARY_TYPE) if not av: return def invalid(problem): msg = 'tried to plant statuary but something went wrong: %s' % problem self.notify.warning('%d %s' % (av.doId, msg)) self.air.writeServerEvent('suspicious', av.doId, msg) return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED) plantAttributes = GardenGlobals.PlantAttributes.get(species, {}) if plantAttributes.get('plantType') != GardenGlobals.STATUARY_TYPE: return invalid('invalid species: %d' % species) gardenItem = species - 100 if gardenItem == 134: gardenItem = 135 if not av.removeGardenItem(gardenItem, 1): return invalid("av doesn't own item: %d" % species) self.d_setMovie(GardenGlobals.MOVIE_PLANT) def handlePlaceStatuary(task): if not self.air: return statuary = self.mgr.placeStatuary(self.mgr.S_pack(0, 0, species, 0), plot=self, ownerIndex=self.ownerIndex, plotId=self.plot, pos=( self.getPos(), self.getH()), generate=False) statuary.generateWithRequired(self.zoneId) statuary.d_setMovie(GardenGlobals.MOVIE_FINISHPLANTING, self.__plantingAvId) statuary.d_setMovie(GardenGlobals.MOVIE_CLEAR, self.__plantingAvId) self.air.writeServerEvent('plant-statuary', self.__plantingAvId, species=species, plot=self.plot) return task.done taskMgr.doMethodLater(7, handlePlaceStatuary, self.uniqueName('handle-place-statuary')) self.__plantingAvId = av.doId def plantToonStatuary(self, species, dnaCode): av = self.__initialSanityCheck(GardenGlobals.STATUARY_TYPE) if not av: return def invalid(problem): msg = 'tried to plant statuary but something went wrong: %s' % problem self.notify.warning('%d %s' % (av.doId, msg)) self.air.writeServerEvent('suspicious', av.doId, msg) return self.d_setMovie(GardenGlobals.MOVIE_PLANT_REJECTED) plantAttributes = GardenGlobals.PlantAttributes.get(species, {}) if plantAttributes.get('plantType') != GardenGlobals.STATUARY_TYPE: return invalid('invalid species: %d' % species) if not av.removeGardenItem(species - 100, 1): return invalid("av doesn't own item: %d" % species) self.d_setMovie(GardenGlobals.MOVIE_PLANT) def handlePlaceStatuary(task): if not self.air: return statuary = self.mgr.placeStatuary(self.mgr.S_pack(dnaCode, 0, species, 0), plot=self, ownerIndex=self.ownerIndex, plotId=self.plot, pos=( self.getPos(), self.getH()), generate=False) statuary.generateWithRequired(self.zoneId) statuary.d_setMovie(GardenGlobals.MOVIE_FINISHPLANTING, self.__plantingAvId) self.air.writeServerEvent('plant-statuary', self.__plantingAvId, species=species, plot=self.plot) return task.done taskMgr.doMethodLater(7, handlePlaceStatuary, self.uniqueName('handle-place-statuary')) self.__plantingAvId = av.doId def plantNothing(self, burntBeans): av = self.__initialSanityCheck() if av: av.takeMoney(burntBeans)
49.029268
195
0.639936
1,047
10,051
6.034384
0.159503
0.045584
0.059196
0.07265
0.537195
0.52485
0.490503
0.455049
0.440962
0.433523
0
0.00927
0.259477
10,051
205
196
49.029268
0.839581
0
0
0.485876
0
0
0.090828
0.013331
0
0
0
0
0
1
0.107345
false
0
0.016949
0.011299
0.310734
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4547f32ba2dd53a8a0e71fc993cc07d7d1a58ed
2,384
py
Python
python/handwritten_baseline/pipeline/model/feature_extr/debug.py
UKPLab/cdcr-beyond-corpus-tailored
52bf98692c7464f25628baea24addd1a988f9a1f
[ "Apache-2.0" ]
10
2020-11-28T05:01:04.000Z
2021-12-21T19:34:00.000Z
python/handwritten_baseline/pipeline/model/feature_extr/debug.py
UKPLab/cdcr-beyond-corpus-tailored
52bf98692c7464f25628baea24addd1a988f9a1f
[ "Apache-2.0" ]
1
2022-03-12T07:20:39.000Z
2022-03-16T05:11:38.000Z
python/handwritten_baseline/pipeline/model/feature_extr/debug.py
UKPLab/cdcr-beyond-corpus-tailored
52bf98692c7464f25628baea24addd1a988f9a1f
[ "Apache-2.0" ]
1
2021-12-21T19:34:08.000Z
2021-12-21T19:34:08.000Z
import pprint from typing import Optional, List, Tuple, Set, Dict import numpy as np from overrides import overrides from python.handwritten_baseline.pipeline.data.base import Dataset from python.handwritten_baseline.pipeline.model.feature_extr import DEBUG_EXTR from python.handwritten_baseline.pipeline.model.feature_extr.base_mixin import FeatureExtractorMixin class DebugFeatureExtractor(FeatureExtractorMixin): """ Returns constant or random feature value for testing purposes. """ def __init__(self, strategy: str, num_features: int, use_cache: bool, features_to_select: Optional[List[str]]): super(DebugFeatureExtractor, self).__init__(DEBUG_EXTR, use_cache, features_to_select) self.strategy = strategy self.num_features = num_features @overrides def _transform(self, dataset: Dataset, pairs: List[Tuple[Tuple, Tuple]], unique_mentions: Set[Tuple]): if self.strategy == "random": return np.random.normal(0, 1, (len(pairs), self.num_features)) elif self.strategy == "zero": return np.zeros((len(pairs), self.num_features)) elif self.strategy == "mix": num_zero_features = self.num_features // 2 print(f"Generating {num_zero_features} zero features and {self.num_features - num_zero_features} random features.") zero_features = np.zeros((len(pairs), num_zero_features)) random_features = np.random.normal(0, 1, (len(pairs), self.num_features - num_zero_features)) feature_matrix = np.hstack([zero_features, random_features]) np.random.shuffle(np.transpose(feature_matrix)) return feature_matrix @overrides def _get_plain_names_of_all_features(self) -> List[str]: return [str(i) for i in range(self.num_features)] @classmethod @overrides def from_params(cls, config: Dict): strategy = config.pop("strategy") num_features = config.pop("num_features") use_cache = config.pop("use_cache", False) features_to_select = config.pop("features_to_select", None) obj = DebugFeatureExtractor(strategy, num_features, use_cache, features_to_select) if config: raise ValueError("Leftover configuration: " + pprint.pformat(config)) return obj
43.345455
127
0.684983
286
2,384
5.468531
0.328671
0.084399
0.067136
0.055627
0.285166
0.219949
0.152813
0.152813
0.049872
0.049872
0
0.00269
0.220218
2,384
55
128
43.345455
0.838623
0.026007
0
0.068182
0
0
0.081925
0
0
0
0
0
0
1
0.090909
false
0
0.159091
0.022727
0.386364
0.068182
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f456221256fc52688ca188318ed96a52141502e3
4,311
py
Python
venv/lib/python3.5/site-packages/igraph/test/atlas.py
dtklinh/Protein-Rigid-Domains-Estimation
a27152ef5437eb87ee31c317091356c4787f82a4
[ "MIT" ]
2
2021-03-04T16:57:06.000Z
2021-08-11T01:42:29.000Z
venv/lib/python3.5/site-packages/igraph/test/atlas.py
dtklinh/Protein-Rigid-Domains-Estimation
a27152ef5437eb87ee31c317091356c4787f82a4
[ "MIT" ]
null
null
null
venv/lib/python3.5/site-packages/igraph/test/atlas.py
dtklinh/Protein-Rigid-Domains-Estimation
a27152ef5437eb87ee31c317091356c4787f82a4
[ "MIT" ]
null
null
null
import warnings import unittest from igraph import * class TestBase(unittest.TestCase): def testPageRank(self): for idx, g in enumerate(self.__class__.graphs): try: pr = g.pagerank() except Exception as ex: self.assertTrue(False, msg="PageRank calculation threw exception for graph #%d: %s" % (idx, ex)) raise if g.vcount() == 0: self.assertEqual([], pr) continue self.assertAlmostEqual(1.0, sum(pr), places=5, \ msg="PageRank sum is not 1.0 for graph #%d (%r)" % (idx, pr)) self.assertTrue(min(pr) >= 0, \ msg="Minimum PageRank is less than 0 for graph #%d (%r)" % (idx, pr)) def testEigenvectorCentrality(self): # Temporarily turn off the warning handler because g.evcent() will print # a warning for DAGs warnings.simplefilter("ignore") try: for idx, g in enumerate(self.__class__.graphs): try: ec, eval = g.evcent(return_eigenvalue=True) except Exception as ex: self.assertTrue(False, msg="Eigenvector centrality threw exception for graph #%d: %s" % (idx, ex)) raise if g.vcount() == 0: self.assertEqual([], ec) continue if not g.is_connected(): # Skip disconnected graphs; this will be fixed in igraph 0.7 continue n = g.vcount() if abs(eval) < 1e-4: self.assertTrue(min(ec) >= -1e-10, msg="Minimum eigenvector centrality is smaller than 0 for graph #%d" % idx) self.assertTrue(max(ec) <= 1, msg="Maximum eigenvector centrality is greater than 1 for graph #%d" % idx) continue self.assertAlmostEqual(max(ec), 1, places=7, \ msg="Maximum eigenvector centrality is %r (not 1) for graph #%d (%r)" % \ (max(ec), idx, ec)) self.assertTrue(min(ec) >= 0, \ msg="Minimum eigenvector centrality is less than 0 for graph #%d" % idx) ec2 = [sum(ec[u.index] for u in v.predecessors()) for v in g.vs] for i in range(n): self.assertAlmostEqual(ec[i] * eval, ec2[i], places=7, \ msg="Eigenvector centrality in graph #%d seems to be invalid "\ "for vertex %d" % (idx, i)) finally: # Reset the warning handler warnings.resetwarnings() def testHubScore(self): for idx, g in enumerate(self.__class__.graphs): sc = g.hub_score() if g.vcount() == 0: self.assertEqual([], sc) continue self.assertAlmostEqual(max(sc), 1, places=7, \ msg="Maximum authority score is not 1 for graph #%d" % idx) self.assertTrue(min(sc) >= 0, \ msg="Minimum hub score is less than 0 for graph #%d" % idx) def testAuthorityScore(self): for idx, g in enumerate(self.__class__.graphs): sc = g.authority_score() if g.vcount() == 0: self.assertEqual([], sc) continue self.assertAlmostEqual(max(sc), 1, places=7, \ msg="Maximum authority score is not 1 for graph #%d" % idx) self.assertTrue(min(sc) >= 0, \ msg="Minimum authority score is less than 0 for graph #%d" % idx) class GraphAtlasTests(TestBase): graphs = [Graph.Atlas(i) for i in range(1253)] class IsoclassTests(TestBase): graphs = [Graph.Isoclass(3, i, directed=True) for i in range(16)] + \ [Graph.Isoclass(4, i, directed=True) for i in range(218)] def suite(): atlas_suite = unittest.makeSuite(GraphAtlasTests) isoclass_suite = unittest.makeSuite(IsoclassTests) return unittest.TestSuite([atlas_suite, isoclass_suite]) def test(): runner = unittest.TextTestRunner() runner.run(suite()) if __name__ == "__main__": test()
38.491071
118
0.52424
494
4,311
4.510121
0.257085
0.035009
0.048474
0.037702
0.454219
0.38061
0.366697
0.326302
0.279174
0.219928
0
0.018328
0.3672
4,311
111
119
38.837838
0.798387
0.040362
0
0.341176
0
0
0.174619
0
0
0
0
0
0.2
1
0.070588
false
0
0.035294
0
0.176471
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4584cfc0d782e8ed0b2d30fb8fdd386a63762a3
1,017
py
Python
artascope/src/web/app.py
magus0219/icloud-photo-downloader
6334530d971cf61089d031de99a38f204c201837
[ "MIT" ]
3
2020-09-24T16:19:28.000Z
2022-02-09T21:10:11.000Z
artascope/src/web/app.py
magus0219/icloud-photo-downloader
6334530d971cf61089d031de99a38f204c201837
[ "MIT" ]
null
null
null
artascope/src/web/app.py
magus0219/icloud-photo-downloader
6334530d971cf61089d031de99a38f204c201837
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Created by magus0219[magus0219@gmail.com] on 2020/3/23 from types import FunctionType from flask import ( Flask, redirect, url_for, ) import artascope.src.web.lib.filter as module_filter from artascope.src.web.lib.content_processor import inject_version def index(): return redirect(url_for("task.get_task_list")) def create_app(): # create and configure the app app = Flask(__name__) app.jinja_env.filters.update( { key: val for key, val in module_filter.__dict__.items() if isinstance(val, FunctionType) } ) from . import user from . import task from . import scheduler # register blueprint app.register_blueprint(user.bp) app.register_blueprint(task.bp) app.register_blueprint(scheduler.bp) # register index app.add_url_rule("/", "index", index) # register context processor app.context_processor(inject_version) return app
23.113636
66
0.67355
131
1,017
5.038168
0.496183
0.10303
0.090909
0.054545
0
0
0
0
0
0
0
0.021684
0.229105
1,017
43
67
23.651163
0.820153
0.184857
0
0
0
0
0.029233
0
0
0
0
0
0
1
0.071429
false
0
0.25
0.035714
0.392857
0.107143
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4584d9b2545719be7d26d0474bfda0fc16fc902
2,251
py
Python
tests/common/test_op/scatter_nd.py
KnowingNothing/akg-test
114d8626b824b9a31af50a482afc07ab7121862b
[ "Apache-2.0" ]
1
2020-08-31T02:43:43.000Z
2020-08-31T02:43:43.000Z
tests/common/test_op/scatter_nd.py
KnowingNothing/akg-test
114d8626b824b9a31af50a482afc07ab7121862b
[ "Apache-2.0" ]
null
null
null
tests/common/test_op/scatter_nd.py
KnowingNothing/akg-test
114d8626b824b9a31af50a482afc07ab7121862b
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """operator dsl function: scatter_nd""" import akg.tvm from akg.utils import validation_check as vc_util def scatter_nd(indices, updates, shape): """ Scatters input tensor updates to a new tensor according to indices. Args: indices(akg.tvm.Tensor): Tensor of type int32. updates(akg.tvm.Tensor): Tensor of type float16, float32, int32. shape(list, tuple): Specifies the shape of output tensor. Returns: Scattered tensor with same type as input tensor updates and shape specified by parameter shape. """ # check shapes dtype indices_shape = [x.value for x in indices.shape] data_shape = [x.value for x in updates.shape] vc_util.check_shape(indices_shape) vc_util.check_shape(data_shape) indices_dtype = indices.dtype if not indices_dtype in "int32": raise TypeError("indices_dtype only support int32 while dtype is %s" % indices_dtype) dtype = updates.dtype support_list = {"float16", "float32", "int32"} if not (dtype in support_list): raise TypeError("scatter_nd only support %s while dtype is %s" % (",".join(support_list), dtype)) n = indices.shape[0].value def pick(i, j, *indexes): return akg.tvm.expr.Select(j == indices[i][0], akg.tvm.const(1, updates.dtype), akg.tvm.const(0, updates.dtype)) * updates[(i,) + indexes] reducible = akg.tvm.compute([n] + list(shape), lambda *i: pick(i[0], i[1], *i[2:]), name="reduc") k = akg.tvm.reduce_axis((0, n)) res = akg.tvm.compute(shape, lambda *i: akg.tvm.sum(reducible[(k,) + i], axis=k)) return res
37.516667
105
0.676588
331
2,251
4.537764
0.407855
0.039947
0.01731
0.021305
0.082557
0.054594
0
0
0
0
0
0.019263
0.215904
2,251
59
106
38.152542
0.831728
0.436251
0
0
0
0
0.102395
0
0
0
0
0
0
1
0.086957
false
0
0.086957
0.043478
0.26087
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f45c36a2a7c87d236af65ffb124e4f77205e7048
744
py
Python
recommender_engine/similarity_measure/__init__.py
tranlyvu/recommender
4985c355d54ee22ba48f4891077fd7e12bd21b47
[ "Apache-2.0" ]
8
2019-03-14T07:53:51.000Z
2021-06-22T06:19:32.000Z
recommender_engine/similarity_measure/__init__.py
tranlyvu/recommender-engine
4985c355d54ee22ba48f4891077fd7e12bd21b47
[ "Apache-2.0" ]
3
2018-01-16T06:48:55.000Z
2020-05-04T01:43:14.000Z
recommender_engine/similarity_measure/__init__.py
tranlyvu/recommender-engine
4985c355d54ee22ba48f4891077fd7e12bd21b47
[ "Apache-2.0" ]
1
2019-03-14T07:53:59.000Z
2019-03-14T07:53:59.000Z
""" recommender_engine ----- recommender_engine is a recommendation application using either item-based or user-based approaches :copyright: (c) 2016 - 2019 by Tran Ly Vu. All Rights Reserved. :license: Apache License 2.0 """ from .cosine import cosine from .euclidean_distance import euclidean_distance from .pearson_correlation import pearson_correlation name="similarity_measure" __all__ = ["cosine", "euclidean_distance", "pearson_correlation"] __author__ = "Tran Ly Vu (vutransingapore@gmail.com)" __copyright__ = "Copyright (c) 2016 - 2019 Tran Ly Vu. All Rights Reserved." __license__ = "Apache License 2.0" __credits__ = ["Tran Ly Vu"] __maintainer__ = "Tran Ly Vu" __email__ = "vutransingapore@gmail.com" __status__ = "Beta"
33.818182
100
0.766129
94
744
5.62766
0.5
0.056711
0.075614
0.068053
0.177694
0.177694
0.177694
0.177694
0.177694
0.177694
0
0.031008
0.133065
744
21
101
35.428571
0.789147
0.30914
0
0
0
0
0.439216
0.101961
0
0
0
0
0
1
0
false
0
0.25
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f45dfb481b367182927b34141a1df143252d871f
7,306
py
Python
test/examples/test_simple_gp_regression.py
ediphy-dwild/gpytorch
559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a
[ "MIT" ]
null
null
null
test/examples/test_simple_gp_regression.py
ediphy-dwild/gpytorch
559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a
[ "MIT" ]
null
null
null
test/examples/test_simple_gp_regression.py
ediphy-dwild/gpytorch
559c78a6446237ed7cc8e1cc7cf4ed8bf31a3c8a
[ "MIT" ]
null
null
null
import math import torch import unittest import gpytorch from torch import optim from torch.autograd import Variable from gpytorch.kernels import RBFKernel from gpytorch.means import ConstantMean from gpytorch.likelihoods import GaussianLikelihood from gpytorch.random_variables import GaussianRandomVariable # Simple training data: let's try to learn a sine function train_x = Variable(torch.linspace(0, 1, 11)) train_y = Variable(torch.sin(train_x.data * (2 * math.pi))) test_x = Variable(torch.linspace(0, 1, 51)) test_y = Variable(torch.sin(test_x.data * (2 * math.pi))) class ExactGPModel(gpytorch.models.ExactGP): def __init__(self, train_inputs, train_targets, likelihood): super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood) self.mean_module = ConstantMean(constant_bounds=(-1, 1)) self.covar_module = RBFKernel(log_lengthscale_bounds=(-3, 3)) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return GaussianRandomVariable(mean_x, covar_x) class TestSimpleGPRegression(unittest.TestCase): def test_posterior_latent_gp_and_likelihood_without_optimization(self): # We're manually going to set the hyperparameters to be ridiculous likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3)) gp_model = ExactGPModel(train_x.data, train_y.data, likelihood) # Update bounds to accommodate extreme parameters gp_model.covar_module.set_bounds(log_lengthscale=(-10, 10)) likelihood.set_bounds(log_noise=(-10, 10)) # Update parameters gp_model.covar_module.initialize(log_lengthscale=-10) gp_model.mean_module.initialize(constant=0) likelihood.initialize(log_noise=-10) # Compute posterior distribution gp_model.eval() likelihood.eval() # Let's see how our model does, conditioned with weird hyperparams # The posterior should fit all the data function_predictions = likelihood(gp_model(train_x)) self.assertLess( torch.norm(function_predictions.mean().data - train_y.data), 1e-3, ) self.assertLess(torch.norm(function_predictions.var().data), 1e-3) # It shouldn't fit much else though test_function_predictions = gp_model(Variable(torch.Tensor([1.1]))) self.assertLess( torch.norm(test_function_predictions.mean().data - 0), 1e-4, ) self.assertLess(torch.norm(test_function_predictions.var().data - 1), 1e-4) def test_posterior_latent_gp_and_likelihood_with_optimization(self): # We're manually going to set the hyperparameters to something they shouldn't be likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3)) gp_model = ExactGPModel(train_x.data, train_y.data, likelihood) mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model) gp_model.covar_module.initialize(log_lengthscale=1) gp_model.mean_module.initialize(constant=0) likelihood.initialize(log_noise=1) # Find optimal model hyperparameters gp_model.train() likelihood.train() optimizer = optim.Adam( list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1, ) optimizer.n_iter = 0 for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() # Test the model gp_model.eval() likelihood.eval() test_function_predictions = likelihood(gp_model(test_x)) mean_abs_error = torch.mean( torch.abs(test_y - test_function_predictions.mean()) ) self.assertLess(mean_abs_error.data.squeeze()[0], 0.05) def test_posterior_latent_gp_and_likelihood_fast_pred_var(self): with gpytorch.fast_pred_var(): # We're manually going to set the hyperparameters to # something they shouldn't be likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3)) gp_model = ExactGPModel(train_x.data, train_y.data, likelihood) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) gp_model.covar_module.initialize(log_lengthscale=1) gp_model.mean_module.initialize(constant=0) likelihood.initialize(log_noise=1) # Find optimal model hyperparameters gp_model.train() likelihood.train() optimizer = optim.Adam( list(gp_model.parameters()) + list(likelihood.parameters()), lr=0.1, ) optimizer.n_iter = 0 for _ in range(50): optimizer.zero_grad() output = gp_model(train_x) loss = -mll(output, train_y) loss.backward() optimizer.n_iter += 1 optimizer.step() # Test the model gp_model.eval() likelihood.eval() # Set the cache test_function_predictions = likelihood(gp_model(train_x)) # Now bump up the likelihood to something huge # This will make it easy to calculate the variance likelihood.log_noise.data.fill_(3) test_function_predictions = likelihood(gp_model(train_x)) noise = likelihood.log_noise.exp() var_diff = (test_function_predictions.var() - noise).abs() self.assertLess(torch.max(var_diff.data / noise.data), 0.05) def test_posterior_latent_gp_and_likelihood_with_optimization_cuda(self): if torch.cuda.is_available(): # We're manually going to set the hyperparameters to # something they shouldn't be likelihood = GaussianLikelihood(log_noise_bounds=(-3, 3)).cuda() gp_model = ExactGPModel( train_x.data.cuda(), train_y.data.cuda(), likelihood ).cuda() mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, gp_model) gp_model.covar_module.initialize(log_lengthscale=1) gp_model.mean_module.initialize(constant=0) likelihood.initialize(log_noise=1) # Find optimal model hyperparameters gp_model.train() likelihood.train() optimizer = optim.Adam(gp_model.parameters(), lr=0.1) optimizer.n_iter = 0 for _ in range(50): optimizer.zero_grad() output = gp_model(train_x.cuda()) loss = -mll(output, train_y.cuda()) loss.backward() optimizer.n_iter += 1 optimizer.step() # Test the model gp_model.eval() likelihood.eval() test_function_predictions = likelihood(gp_model(test_x.cuda())) mean_abs_error = torch.mean( torch.abs(test_y.cuda() - test_function_predictions.mean()) ) self.assertLess(mean_abs_error.data.squeeze()[0], 0.05) if __name__ == '__main__': unittest.main()
38.861702
88
0.634684
861
7,306
5.147503
0.190476
0.05528
0.051895
0.017599
0.680054
0.641245
0.604919
0.556859
0.536101
0.490523
0
0.016205
0.273611
7,306
187
89
39.069519
0.818918
0.116343
0
0.5
0
0
0.001244
0
0
0
0
0
0.051471
1
0.044118
false
0
0.073529
0
0.139706
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f460edaf40609072f5da235373227615b76ded70
804
py
Python
Algo and DSA/LeetCode-Solutions-master/Python/smallest-greater-multiple-made-of-two-digits.py
Sourav692/FAANG-Interview-Preparation
f523e5c94d582328b3edc449ea16ac6ab28cdc81
[ "Unlicense" ]
3,269
2018-10-12T01:29:40.000Z
2022-03-31T17:58:41.000Z
Algo and DSA/LeetCode-Solutions-master/Python/smallest-greater-multiple-made-of-two-digits.py
Sourav692/FAANG-Interview-Preparation
f523e5c94d582328b3edc449ea16ac6ab28cdc81
[ "Unlicense" ]
53
2018-12-16T22:54:20.000Z
2022-02-25T08:31:20.000Z
Algo and DSA/LeetCode-Solutions-master/Python/smallest-greater-multiple-made-of-two-digits.py
Sourav692/FAANG-Interview-Preparation
f523e5c94d582328b3edc449ea16ac6ab28cdc81
[ "Unlicense" ]
1,236
2018-10-12T02:51:40.000Z
2022-03-30T13:30:37.000Z
# Time: sum(O(l * 2^l) for l in range(1, 11)) = O(20 * 2^10) = O(1) # Space: O(1) class Solution(object): def findInteger(self, k, digit1, digit2): """ :type k: int :type digit1: int :type digit2: int :rtype: int """ MAX_NUM_OF_DIGITS = 10 INT_MAX = 2**31-1 if digit1 < digit2: digit1, digit2 = digit2, digit1 total = 2 for l in xrange(1, MAX_NUM_OF_DIGITS+1): for mask in xrange(total): curr, bit = 0, total>>1 while bit: curr = curr*10 + (digit1 if mask&bit else digit2) bit >>= 1 if k < curr <= INT_MAX and curr%k == 0: return curr total <<= 1 return -1
28.714286
69
0.452736
108
804
3.296296
0.37963
0.101124
0.033708
0.078652
0
0
0
0
0
0
0
0.087912
0.43408
804
27
70
29.777778
0.694505
0.174129
0
0
0
0
0
0
0
0
0
0
0
1
0.058824
false
0
0
0
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4647df8f083e67396d2554f67110e5d8f963972
7,875
py
Python
aldryn_people/tests/test_plugins.py
compoundpartners/js-people
a3744c3880f6626e677034a693f337c927baf886
[ "BSD-3-Clause" ]
null
null
null
aldryn_people/tests/test_plugins.py
compoundpartners/js-people
a3744c3880f6626e677034a693f337c927baf886
[ "BSD-3-Clause" ]
1
2019-01-15T16:06:44.000Z
2019-01-15T16:06:44.000Z
aldryn_people/tests/test_plugins.py
compoundpartners/js-people
a3744c3880f6626e677034a693f337c927baf886
[ "BSD-3-Clause" ]
1
2019-01-09T11:53:59.000Z
2019-01-09T11:53:59.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals try: from django.core.urlresolvers import reverse except ImportError: # Django 2.0 from django.urls import reverse from django.utils.translation import force_text from cms import api from cms.utils.i18n import force_language from aldryn_people import DEFAULT_APP_NAMESPACE from ..models import Person, Group from ..cms_plugins import PeoplePlugin from . import DefaultApphookMixin, BasePeopleTest class TestPersonPlugins(DefaultApphookMixin, BasePeopleTest): def test_add_people_list_plugin_api(self): """ We add a person to the People Plugin and look her up """ name = 'Donald' Person.objects.create(name=name) plugin = api.add_plugin(self.placeholder, PeoplePlugin, self.language) plugin.people = Person.objects.all() self.assertEqual(force_text(plugin), force_text(plugin.pk)) self.page.publish(self.language) url = self.page.get_absolute_url() response = self.client.get(url, follow=True) self.assertContains(response, name) # This fails because of Sane Add Plugin (I suspect). This will be refactored # and re-enabled in a future commit. # def test_add_people_list_plugin_client(self): # """ # We log into the PeoplePlugin # """ # self.client.login( # username=self.su_username, password=self.su_password) # # plugin_data = { # 'plugin_type': 'PeoplePlugin', # 'plugin_language': self.language, # 'placeholder_id': self.placeholder.pk, # } # response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data) # self.assertEqual(response.status_code, 200) # self.assertTrue(CMSPlugin.objects.exists()) def test_hide_ungrouped(self): """ """ the_bradys = Group.objects.create(name="The Bradys") alice = Person.objects.create(name="Alice") bobby = Person.objects.create(name="Bobby") cindy = Person.objects.create(name="Cindy") # Alice is the housekeeper, not a real Brady. bobby.groups.add(the_bradys) cindy.groups.add(the_bradys) # Add a plugin where ungrouped people are not shown plugin = api.add_plugin(self.placeholder, PeoplePlugin, self.language) plugin.people = Person.objects.all() plugin.group_by_group = True plugin.show_ungrouped = False plugin.save() self.page.publish(self.language) url = self.page.get_absolute_url() response = self.client.get(url, follow=True) self.assertContains(response, bobby.name) self.assertContains(response, cindy.name) self.assertNotContains(response, alice.name) def test_show_ungrouped(self): """ """ the_bradys = Group.objects.create(name="The Bradys") alice = Person.objects.create(name="Alice") bobby = Person.objects.create(name="Bobby") cindy = Person.objects.create(name="Cindy") # Alice is the housekeeper, not a real Brady. bobby.groups.add(the_bradys) cindy.groups.add(the_bradys) # Now, add a new plugin where ungrouped people are shown plugin = api.add_plugin(self.placeholder, PeoplePlugin, self.language) plugin.people = Person.objects.all() plugin.group_by_group = True plugin.show_ungrouped = True plugin.save() self.page.publish(self.language) url = self.page.get_absolute_url() response = self.client.get(url, follow=True) self.assertContains(response, bobby.name) self.assertContains(response, cindy.name) self.assertContains(response, alice.name) class TestPeopleListPluginNoApphook(BasePeopleTest): def setUp(self): super(TestPeopleListPluginNoApphook, self).setUp() # we are testing only en self.person1.set_current_language('en') self.namespace = DEFAULT_APP_NAMESPACE def create_plugin(self, plugin_params=None): if plugin_params is None: plugin_params = {} with force_language('en'): plugin = api.add_plugin( self.placeholder, PeoplePlugin, 'en', **plugin_params) self.page.publish('en') return plugin def test_plugin_with_no_apphook_doesnot_breaks_page(self): self.create_plugin() url = self.page.get_absolute_url() response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertContains(response, self.person1.name) from ..cms_plugins import NAMESPACE_ERROR self.assertNotContains(response, NAMESPACE_ERROR[:20]) def test_plugin_with_no_apphook_shows_error_message(self): self.create_plugin() url = self.page.get_absolute_url() self.client.login(username=self.su_username, password=self.su_password) response = self.client.get(url, user=self.superuser) self.assertEqual(response.status_code, 200) self.assertContains(response, self.person1.name) from ..cms_plugins import NAMESPACE_ERROR self.assertContains(response, NAMESPACE_ERROR[:20]) def test_plugin_with_vcard_enabled_no_apphook(self): self.create_plugin(plugin_params={'show_vcard': True}) url = self.page.get_absolute_url() response = self.client.get(url) self.assertContains(response, self.person1.name) def test_plugin_with_vcard_disabled_no_apphook(self): self.create_plugin(plugin_params={'show_vcard': False}) url = self.page.get_absolute_url() response = self.client.get(url) self.assertContains(response, self.person1.name) def test_plugin_show_links_are_shown_if_enabled_and_apphook_page(self): with force_language('en'): app_page = self.create_apphook_page() list_plugin = api.add_plugin( placeholder=self.placeholder, plugin_type=PeoplePlugin, language='en', ) list_plugin.show_links = True list_plugin.save() self.page.publish('en') url = self.page.get_absolute_url() person_url = self.person1.get_absolute_url() # ensure that url is not the link to the home page and not app page app_page_len = len(app_page.get_absolute_url()) self.assertGreater(len(person_url), app_page_len) response = self.client.get(url, follow=True) self.assertContains(response, person_url) # ensure that url is not shown if not enabled for plugin. list_plugin.show_links = False list_plugin.save() self.page.publish('en') response = self.client.get(url, follow=True) self.assertNotContains(response, person_url) def test_plugin_with_vcard_enabled_with_apphook(self): vcard_kwargs = { 'slug': self.person1.slug } with force_language('en'): self.create_apphook_page() person_vcard_url = reverse( '{0}:download_vcard'.format(self.namespace), kwargs=vcard_kwargs) plugin = self.create_plugin(plugin_params={'show_vcard': True}) url = self.page.get_absolute_url() response = self.client.get(url, follow=True) self.assertContains(response, self.person1.name) self.assertContains(response, person_vcard_url) # test that vcard download link is not shown if disabled plugin.show_vcard = False plugin.save() self.page.publish('en') response = self.client.get(url, follow=True) self.assertContains(response, self.person1.name) self.assertNotContains(response, person_vcard_url)
38.985149
80
0.657143
939
7,875
5.315229
0.172524
0.043278
0.078141
0.046283
0.587257
0.564616
0.523542
0.499499
0.48307
0.48307
0
0.004715
0.245841
7,875
201
81
39.179104
0.835663
0.139937
0
0.492958
0
0
0.019073
0
0
0
0
0
0.161972
1
0.077465
false
0.007042
0.091549
0
0.190141
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4680fe37289f7c11ee4bd2ba12292268d591a53
1,960
py
Python
Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyreadline/clipboard/__init__.py
tchamabe1979/exareme
462983e4feec7808e1fd447d02901502588a8879
[ "MIT" ]
null
null
null
Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyreadline/clipboard/__init__.py
tchamabe1979/exareme
462983e4feec7808e1fd447d02901502588a8879
[ "MIT" ]
null
null
null
Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyreadline/clipboard/__init__.py
tchamabe1979/exareme
462983e4feec7808e1fd447d02901502588a8879
[ "MIT" ]
null
null
null
import sys success = False in_ironpython = "IronPython" in sys.version if in_ironpython: try: from ironpython_clipboard import GetClipboardText, SetClipboardText success = True except ImportError: pass else: try: from win32_clipboard import GetClipboardText, SetClipboardText success = True except ImportError: raise def send_data(lists): SetClipboardText(make_tab(lists)) def set_clipboard_text(toclipboard): SetClipboardText(str(toclipboard)) def make_tab(lists): if hasattr(lists, "tolist"): lists = lists.tolist() ut = [] for rad in lists: if type(rad) in [list, tuple]: ut.append("\t".join(["%s" % x for x in rad])) else: ut.append("%s" % rad) return "\n".join(ut) def make_list_of_list(txt): def make_num(x): try: return int(x) except ValueError: try: return float(x) except ValueError: try: return complex(x) except ValueError: return x return x ut = [] flag = False for rad in [x for x in txt.split("\r\n") if x != ""]: raden = [make_num(x) for x in rad.split("\t")] if str in map(type, raden): flag = True ut.append(raden) return ut, flag def get_clipboard_text_and_convert(paste_list=False): """Get txt from clipboard. if paste_list==True the convert tab separated data to list of lists. Enclose list of list in array() if all elements are numeric""" txt = GetClipboardText() if txt: if paste_list and "\t" in txt: array, flag = make_list_of_list(txt) if flag: txt = repr(array) else: txt = "array(%s)" % repr(array) txt = "".join([c for c in txt if c not in " \t\r\n"]) return txt
24.810127
79
0.558673
249
1,960
4.301205
0.293173
0.022409
0.014006
0.019608
0.239029
0.140056
0.140056
0.140056
0
0
0
0.001554
0.343367
1,960
78
80
25.128205
0.830614
0.078571
0
0.311475
0
0
0.026786
0
0
0
0
0
0
1
0.098361
false
0.016393
0.081967
0
0.311475
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4689432e90e3326c569ffdf5beb1c42f606d0c9
17,634
py
Python
mjrl/utils/train_agent.py
YujieLu10/tslam
1341dbecdf02ee6b1b6cdd1a538272fffdea6ffd
[ "Apache-2.0" ]
null
null
null
mjrl/utils/train_agent.py
YujieLu10/tslam
1341dbecdf02ee6b1b6cdd1a538272fffdea6ffd
[ "Apache-2.0" ]
null
null
null
mjrl/utils/train_agent.py
YujieLu10/tslam
1341dbecdf02ee6b1b6cdd1a538272fffdea6ffd
[ "Apache-2.0" ]
null
null
null
import logging logging.disable(logging.CRITICAL) import math from tabulate import tabulate from mjrl.utils.make_train_plots import make_train_plots from mjrl.utils.gym_env import GymEnv from mjrl.samplers.core import sample_paths import numpy as np import torch import pickle import imageio import time as timer import os import copy import matplotlib.pyplot as plt try: import exptools from colorsys import hsv_to_rgb import pyvista as pv except ImportError: exptools = None def _load_latest_policy_and_logs(agent, *, policy_dir, logs_dir): """Loads the latest policy. Returns the next step number to begin with. """ assert os.path.isdir(policy_dir), str(policy_dir) assert os.path.isdir(logs_dir), str(logs_dir) log_csv_path = os.path.join(logs_dir, 'log.csv') if not os.path.exists(log_csv_path): return 0 # fresh start print("Reading: {}".format(log_csv_path)) agent.logger.read_log(log_csv_path) last_step = agent.logger.max_len - 1 if last_step <= 0: return 0 # fresh start # find latest policy/baseline i = last_step while i >= 0: policy_path = os.path.join(policy_dir, 'policy_{}.pickle'.format(i)) baseline_path = os.path.join(policy_dir, 'baseline_{}.pickle'.format(i)) if not os.path.isfile(policy_path): i = i -1 continue else: print("Loaded last saved iteration: {}".format(i)) with open(policy_path, 'rb') as fp: agent.policy = pickle.load(fp) with open(baseline_path, 'rb') as fp: agent.baseline = pickle.load(fp) # additional # global_status_path = os.path.join(policy_dir, 'global_status.pickle') # with open(global_status_path, 'rb') as fp: # agent.load_global_status( pickle.load(fp) ) agent.logger.shrink_to(i + 1) assert agent.logger.max_len == i + 1 return agent.logger.max_len # cannot find any saved policy raise RuntimeError("Log file exists, but cannot find any saved policy.") def save_voxel_visualization(obj_name, reset_mode_conf, reward_conf, obj_orientation, obj_relative_position, obj_scale, pc_frame, iternum, is_best_policy): uniform_gt_data = np.load("/home/jianrenw/prox/tslam/assets/uniform_gt/uniform_{}_o3d.npz".format(obj_name))['pcd'] data_scale = uniform_gt_data * obj_scale data_rotate = data_scale.copy() x = data_rotate[:, 0].copy() y = data_rotate[:, 1].copy() z = data_rotate[:, 2].copy() x_theta = obj_orientation[0] data_rotate[:, 0] = x data_rotate[:, 1] = y*math.cos(x_theta) - z*math.sin(x_theta) data_rotate[:, 2] = y*math.sin(x_theta) + z*math.cos(x_theta) x = data_rotate[:, 0].copy() y = data_rotate[:, 1].copy() z = data_rotate[:, 2].copy() y_theta = obj_orientation[1] data_rotate[:, 0] = x * math.cos(y_theta) + z * math.sin(y_theta) data_rotate[:, 1] = y data_rotate[:, 2] = z * math.cos(y_theta) - x * math.sin(y_theta) x = data_rotate[:, 0].copy() y = data_rotate[:, 1].copy() z = data_rotate[:, 2].copy() z_theta = obj_orientation[2] data_rotate[:, 0] = x * math.cos(z_theta) - y * math.sin(z_theta) data_rotate[:, 1] = x * math.sin(z_theta) + y * math.cos(z_theta) data_rotate[:, 2] = z data_trans = data_rotate.copy() data_trans[:, 0] += obj_relative_position[0] data_trans[:, 1] += obj_relative_position[1] data_trans[:, 2] += obj_relative_position[2] uniform_gt_data = data_trans.copy() data = pc_frame resolution = 0.01 sep_x = math.ceil(0.3 / resolution) sep_y = math.ceil(0.3 / resolution) sep_z = math.ceil(0.3 / resolution) x, y, z = np.indices((sep_x, sep_y, sep_z)) cube1 = (x<0) & (y <1) & (z<1) gtcube = (x<0) & (y <1) & (z<1) voxels = cube1 gt_voxels = gtcube # draw gt gt_map_list = [] for idx,val in enumerate(uniform_gt_data): idx_x = math.floor((val[0] + 0.15) / resolution) idx_y = math.floor((val[1] + 0.15) / resolution) idx_z = math.floor((val[2]) / resolution) # if idx_z > 6: # continue name = str(idx_x) + '_' + str(idx_y) + '_' + str(idx_z) if name not in gt_map_list: gt_map_list.append(name) cube = (x < idx_x + 1) & (y < idx_y + 1) & (z < idx_z + 1) & (x >= idx_x) & (y >= idx_y) & (z >= idx_z) # combine the objects into a single boolean array gt_voxels += cube # draw cuboids in the top left and bottom right corners, and a link between them map_list = [] for idx,val in enumerate(data): idx_x = math.floor((val[0] + 0.15) / resolution) idx_y = math.floor((val[1] + 0.15) / resolution) idx_z = math.floor((val[2]) / resolution) # if idx_z > 6: # continue name = str(idx_x) + '_' + str(idx_y) + '_' + str(idx_z) if name not in map_list and name in gt_map_list: map_list.append(name) cube = (x < idx_x + 1) & (y < idx_y + 1) & (z < idx_z + 1) & (x >= idx_x) & (y >= idx_y) & (z >= idx_z) # combine the objects into a single boolean array voxels += cube # gt_obj4:668 occupancy = len(map_list) / len(gt_map_list) # print(len(map_list) / sep_x / sep_y / sep_z ) is_best_reconstruct = True files = os.listdir('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/'.format(obj_name, reset_mode_conf, reward_conf)) for file in files: if "overlap" in file and "png" in file: file_str = str(file) previous_occup = file_str[(file_str.index("-")+1):file_str.index(".png")] if occupancy < float(previous_occup): is_best_reconstruct = False # obj_name = "obj{}".format(obj_name) # set the colors of each object vis_voxel = gt_voxels | voxels colors = np.empty(vis_voxel.shape, dtype=object) colors[gt_voxels] = 'white' colors[voxels] = 'cyan' # and plot everything ax = plt.figure().add_subplot(projection='3d') ax.set_zlim(1,30) ax.voxels(vis_voxel, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05) # plt.savefig('uniform_gtbox_{}.png'.format(step)) if is_best_policy or is_best_reconstruct: plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_overlap-{}.png'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy)) plt.savefig('voxel/iter-{}-{}-overlap-{}.png'.format(iternum, obj_name, occupancy)) plt.close() ax = plt.figure().add_subplot(projection='3d') ax.set_zlim(1,30) ax.voxels(gt_voxels, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05) if is_best_policy or is_best_reconstruct: plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/gt.png'.format(obj_name, reset_mode_conf, reward_conf)) plt.savefig('voxel/iter-{}-{}-gt.png'.format(iternum, obj_name)) plt.close() ax = plt.figure().add_subplot(projection='3d') ax.set_zlim(1,30) ax.voxels(voxels, facecolors=colors, edgecolor='g', alpha=.4, linewidth=.05) if is_best_policy or is_best_reconstruct: plt.savefig('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_exp.png'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct)) plt.savefig('voxel/iter-{}-{}-exp.png'.format(iternum, obj_name)) plt.close() return is_best_reconstruct, occupancy def train_agent(job_name, agent, seed = 0, niter = 101, gamma = 0.995, gae_lambda = None, num_cpu = 16, sample_mode = 'trajectories', horizon= int(150), num_traj = 50, num_samples = 50000, # has precedence, used with sample_mode = 'samples' save_freq = 10, evaluation_rollouts = None, plot_keys = ['stoc_pol_mean'], env_kwargs= dict(), visualize_kwargs= dict(), sample_paths_kwargs= dict(), ): print("num_cpu{}".format(num_cpu)) np.random.seed(seed) if os.path.isdir(job_name) == False: os.mkdir(job_name) previous_dir = os.getcwd() obj_name = env_kwargs["obj_name"] reset_mode_conf = env_kwargs["reset_mode"] reward_conf = "cf{}knn{}voxel{}".format(env_kwargs["chamfer_r_factor"], env_kwargs["knn_r_factor"], env_kwargs["new_voxel_r_factor"]) os.chdir(job_name) # important! we are now in the directory to save data if os.path.isdir('iterations') == False: os.mkdir('iterations') if os.path.isdir('2dpointcloud') == False: os.mkdir('2dpointcloud') if os.path.isdir('pointcloudnpz') == False: os.mkdir('pointcloudnpz') if os.path.isdir('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf)) == False: os.makedirs('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf)) if os.path.isdir('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf)) == False: os.makedirs('/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}'.format(obj_name, reset_mode_conf, reward_conf)) if os.path.isdir('voxel') == False: os.mkdir('voxel') if os.path.isdir('logs') == False and agent.save_logs == True: os.mkdir('logs') best_policy = copy.deepcopy(agent.policy) best_perf = -1e8 train_curve = best_perf*np.ones(niter) mean_pol_perf = 0.0 e = GymEnv(agent.env.env_id, env_kwargs) # Load from any existing checkpoint, policy, statistics, etc. # Why no checkpointing.. :( i_start = _load_latest_policy_and_logs(agent, policy_dir='iterations', logs_dir='logs') if i_start: print("Resuming from an existing job folder ...") for i in range(i_start, niter): print("......................................................................................") print("ITERATION : %i " % i) is_best_policy = False if train_curve[i-1] > best_perf: if exptools: exptools.logging.logger.log_text("update best_policy") best_policy = copy.deepcopy(agent.policy) best_perf = train_curve[i-1] is_best_policy = True N = num_traj if sample_mode == 'trajectories' else num_samples stats = agent.train_step( N=N, sample_mode=sample_mode, horizon= horizon, gamma=gamma, gae_lambda=gae_lambda, num_cpu=num_cpu, env_kwargs= env_kwargs, sample_paths_kwargs= sample_paths_kwargs, ) train_curve[i] = stats[0] if evaluation_rollouts is not None and evaluation_rollouts > 0: print("Performing evaluation rollouts ........") eval_paths = sample_paths( num_traj=evaluation_rollouts, env=e.env_id, policy=agent.policy, eval_mode=True, base_seed=seed, num_cpu=num_cpu, env_kwargs= env_kwargs, **sample_paths_kwargs) mean_pol_perf = np.mean([np.sum(path['rewards']) for path in eval_paths]) if agent.save_logs: agent.logger.log_kv('eval_score', mean_pol_perf) if exptools: exptools.logging.logger.log_scalar('eval_score', mean_pol_perf, i) if exptools: env_infos = [path["env_infos"] for path in eval_paths] # a list of dict rewards = dict() total_points = list() if env_infos: # get decomposed reward statistics keys = [k for k in env_infos[0].keys() if "_p" in k[-2:] or "_r" in k[-2:] or "occupancy" in k] for k in keys: rewards[k] = list() for env_info in env_infos: rewards[k].append(env_info[k]) for env_info in env_infos: total_points.append(len(env_info["pointcloud"])) for k, v in rewards.items(): exptools.logging.logger.log_scalar_batch(k, v, i) exptools.logging.logger.log_scalar_batch("total_num_points", total_points, i) print(">>> finish evaluation rollouts") if (i % save_freq == 0 and i > 0): if agent.save_logs: agent.logger.save_log('logs/') make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/') obj_orientation = env_kwargs["obj_orientation"] obj_relative_position = env_kwargs["obj_relative_position"] obj_scale = env_kwargs["obj_scale"] policy_file = 'policy_%i.pickle' % i baseline_file = 'baseline_%i.pickle' % i pickle.dump(agent.policy, open('iterations/' + policy_file, 'wb')) pickle.dump(agent.baseline, open('iterations/' + baseline_file, 'wb')) pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb')) pickle.dump(agent.global_status, open('iterations/global_status.pickle', 'wb')) # save videos and pointcloud and reconstruted mesh if exptools: video, env_infos = e.visualize_policy_offscreen( policy= agent.policy, **visualize_kwargs, ) # (T, C, H, W) video_explore, env_infos_explore = e.visualize_policy_explore( policy= agent.policy, **visualize_kwargs, ) # (T, C, H, W) pc_frame = np.array(env_infos[-1]["pointcloud"] if len(env_infos[-1]["pointcloud"]) > 0 else np.empty((0, 3))) # 3d voxel visualization is_best_reconstruct, occupancy = save_voxel_visualization(obj_name, reset_mode_conf, reward_conf, obj_orientation, obj_relative_position, obj_scale, pc_frame, i, is_best_policy) if is_best_policy or is_best_reconstruct: pickle.dump(best_policy, open('/home/jianrenw/prox/tslam/data/result/best_policy/{}/{}/{}/bp{}_br{}_best_policy.pickle'.format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct), 'wb')) if is_best_policy or is_best_reconstruct: np.savez_compressed("pointcloudnpz/alpha_pointcloud_"+str(i)+".npz",pcd=pc_frame) np.savez_compressed("/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_alpha_pointcloud_overlap-{}.npz".format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy), pcd=pc_frame) # else: # np.savez_compressed("pointcloudnpz/pointcloud_"+str(i)+".npz",pcd=pc_frame) # pc_frames.append(pc_frame) ax = plt.axes() ax.scatter(pc_frame[:, 0], pc_frame[:, 1], cmap='viridis', linewidth=0.5) if is_best_policy or is_best_reconstruct: plt.savefig("2dpointcloud/alpha_{}.png".format('2dpointcloud' + str(i))) plt.savefig("/home/jianrenw/prox/tslam/data/result/best_eval/{}/{}/{}/bp{}_br{}_alpha_2dpointcloud_overlap-{}.png".format(obj_name, reset_mode_conf, reward_conf, is_best_policy, is_best_reconstruct, occupancy)) # else: # plt.savefig("2dpointcloud/{}.png".format('2dpointcloud' + str(i))) plt.close() # ======================================================= # if obj_name in ["airplane", "apple", "glass", "cup"]: exptools.logging.logger.record_image("rendered", video[-1], i) exptools.logging.logger.record_gif("rendered", video, i) # exptools.logging.logger.record_image("rendered_explore", video_explore[-1], i) # exptools.logging.logger.record_gif("rendered_explore", video_explore, i) # print results to console if i == 0: result_file = open('results.txt', 'w') print("Iter | Stoc Pol | Mean Pol | Best (Stoc) \n") result_file.write("Iter | Sampling Pol | Evaluation Pol | Best (Sampled) \n") result_file.close() result_file = open('results.txt', 'a') result_file.write("%4i %5.2f %5.2f %5.2f \n" % (i, train_curve[i], mean_pol_perf, best_perf)) result_file.close() if agent.save_logs: print_data = sorted(filter(lambda v: np.asarray(v[1]).size == 1, agent.logger.get_current_log().items())) print(tabulate(print_data)) if exptools: exptools.logging.logger.log_scalar("Iter", i, i) exptools.logging.logger.log_scalar("SamplingPol", train_curve[i], i) exptools.logging.logger.log_scalar("EvaluationPol", mean_pol_perf, i) exptools.logging.logger.log_scalar("BestSampled", best_perf, i) exptools.logging.logger.dump_data() # final save pickle.dump(best_policy, open('iterations/best_policy.pickle', 'wb')) if agent.save_logs: agent.logger.save_log('logs/') make_train_plots(log=agent.logger.log, keys=plot_keys, save_loc='logs/') os.chdir(previous_dir)
46.898936
260
0.60304
2,365
17,634
4.249894
0.157717
0.017909
0.017909
0.022286
0.441847
0.40195
0.353199
0.318575
0.280171
0.269127
0
0.012919
0.258138
17,634
375
261
47.024
0.755389
0.089826
0
0.217391
0
0.016722
0.134997
0.073782
0
0
0
0
0.010033
1
0.010033
false
0
0.060201
0
0.083612
0.036789
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4691a885e026834c8813dea028eee2eea8dcb79
4,499
py
Python
src/tests/plugins/banktransfer/test_refund_export.py
NicsTr/pretix
e6d2380d9ed1836cc64a688b2be20d00a8500eab
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
src/tests/plugins/banktransfer/test_refund_export.py
NicsTr/pretix
e6d2380d9ed1836cc64a688b2be20d00a8500eab
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
src/tests/plugins/banktransfer/test_refund_export.py
NicsTr/pretix
e6d2380d9ed1836cc64a688b2be20d00a8500eab
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
import json from datetime import timedelta from decimal import Decimal import pytest from django.utils.timezone import now from pretix.base.models import Event, Order, OrderRefund, Organizer, Team, User from pretix.plugins.banktransfer.models import RefundExport from pretix.plugins.banktransfer.views import ( _row_key_func, _unite_transaction_rows, ) @pytest.fixture def env(): o = Organizer.objects.create(name='Dummy', slug='dummy') event = Event.objects.create( organizer=o, name='Dummy', slug='dummy', date_from=now(), plugins='pretix.plugins.banktransfer,pretix.plugins.paypal' ) user = User.objects.create_user('dummy@dummy.dummy', 'dummy') t = Team.objects.create(organizer=event.organizer, can_view_orders=True, can_change_orders=True) t.members.add(user) t.limit_events.add(event) order = Order.objects.create( code='1Z3AS', event=event, email='admin@localhost', status=Order.STATUS_PAID, datetime=now(), expires=now() + timedelta(days=10), total=23 ) refund = OrderRefund.objects.create( order=order, amount=Decimal("23"), provider='banktransfer', state=OrderRefund.REFUND_STATE_CREATED, info=json.dumps({ 'payer': "Abc Def", 'iban': "DE27520521540534534466", 'bic': "HELADEF1MEG", }) ) return event, user, refund url_prefixes = [ "/control/event/dummy/dummy/", "/control/organizer/dummy/" ] @pytest.mark.django_db @pytest.mark.parametrize("url_prefix", url_prefixes) def test_export_refunds_as_sepa_xml(client, env, url_prefix): client.login(email='dummy@dummy.dummy', password='dummy') r = client.post(f'{url_prefix}banktransfer/refunds/', {"unite_transactions": True}, follow=True) assert b"SEPA" in r.content r = client.get(f'{url_prefix}banktransfer/sepa-export/{RefundExport.objects.last().id}/') assert r.status_code == 200 r = client.post(f'{url_prefix}banktransfer/sepa-export/{RefundExport.objects.last().id}/', { "account_holder": "Fission Festival", "iban": "DE71720690050653667120", "bic": "GENODEF1AIL", }) assert "DE27520521540534534466" in "".join(str(part) for part in r.streaming_content) @pytest.mark.django_db @pytest.mark.parametrize("url_prefix", url_prefixes) def test_export_refunds(client, env, url_prefix): client.login(email='dummy@dummy.dummy', password='dummy') r = client.get(f'{url_prefix}banktransfer/refunds/') assert r.status_code == 200 r = client.post(f'{url_prefix}banktransfer/refunds/', {"unite_transactions": True}, follow=True) assert r.status_code == 200 refund = RefundExport.objects.last() assert refund is not None assert b"Download CSV" in r.content r = client.get(f'{url_prefix}banktransfer/export/{refund.id}/') assert r.status_code == 200 assert "DE27520521540534534466" in "".join(str(part) for part in r.streaming_content) def test_unite_transaction_rows(): rows = sorted([ { 'payer': "Abc Def", 'iban': 'DE12345678901234567890', 'bic': 'HARKE9000', 'id': "ROLLA-R-1", 'amount': Decimal("42.23"), }, { 'payer': "First Last", 'iban': 'DE111111111111111111111', 'bic': 'ikswez2020', 'id': "PARTY-R-1", 'amount': Decimal("6.50"), } ], key=_row_key_func) assert _unite_transaction_rows(rows) == rows rows = sorted(rows + [ { 'payer': "Abc Def", 'iban': 'DE12345678901234567890', 'bic': 'HARKE9000', 'id': "ROLLA-R-1", 'amount': Decimal("7.77"), }, { 'payer': "Another Last", 'iban': 'DE111111111111111111111', 'bic': 'ikswez2020', 'id': "PARTY-R-2", 'amount': Decimal("13.50"), } ], key=_row_key_func) assert _unite_transaction_rows(rows) == sorted([ { 'payer': "Abc Def", 'iban': 'DE12345678901234567890', 'bic': 'HARKE9000', 'id': "ROLLA-R-1", 'amount': Decimal("50.00"), }, { 'payer': 'Another Last, First Last', 'iban': 'DE111111111111111111111', 'bic': 'ikswez2020', 'id': 'PARTY-R-1, PARTY-R-2', 'amount': Decimal('20.00'), }], key=_row_key_func)
33.080882
100
0.608357
506
4,499
5.280632
0.278656
0.033683
0.022455
0.049401
0.528817
0.508234
0.5
0.490644
0.471183
0.471183
0
0.081645
0.243165
4,499
135
101
33.325926
0.703084
0
0
0.308333
0
0
0.269838
0.134919
0
0
0
0
0.091667
1
0.033333
false
0.016667
0.066667
0
0.108333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f469fb9c0617beca4380191f4e87136c8e35c588
4,804
py
Python
NewLifeUtils/LoggerModule.py
NewLife1324/NewLifeUtils-Dev
d955ad801da879d2888506853b0d0141c15dfafc
[ "MIT" ]
2
2020-12-12T17:45:34.000Z
2020-12-16T15:00:05.000Z
NewLifeUtils/LoggerModule.py
NewLife1324/NewLifeUtils
d955ad801da879d2888506853b0d0141c15dfafc
[ "MIT" ]
null
null
null
NewLifeUtils/LoggerModule.py
NewLife1324/NewLifeUtils
d955ad801da879d2888506853b0d0141c15dfafc
[ "MIT" ]
null
null
null
from NewLifeUtils.ColorModule import ACC, MCC from NewLifeUtils.UtilsModule import hex_to_rgb from NewLifeUtils.FileModule import DataStorage, LogFile from NewLifeUtils.StringUtilModule import remove_csi from datetime import datetime import sys class Formatter(dict): def __init__(self, *args, date_format="%d-%m-%Y", time_format="%H:%M:%S", **kwargs): self.date_format = "%d-%m-%Y" self.time_format = "%H:%M:%S" dict.__init__(self, *args, **kwargs) def __missing__(self, key): if key == "time": return datetime.now().strftime(self.time_format) elif key == "date": return datetime.now().strftime(self.date_format) elif key.startswith("#"): if key == "#reset": return ACC.RESET elif key == "#under": return ACC.UNDERLINE elif key == "#nounder": return ACC.NO_UNDERLINE elif key == "#reverse": return ACC.REVERSE elif key == "#noreverse": return ACC.NO_REVERSE else: return ACC.customrgb(*hex_to_rgb(key)) else: return "{" + key + "}" def create_logger( pattern="[{time}] {tag}: {message}", tag_length=7, default_tag="Log", reader=False, reader_bg="#24416b", reader_fg="#a0dbf2", file_log=False, logfile=None, time_format = "%d-%m-%Y", data_format = "%H:%M:%S", ): def log(message, tag=""): if reader: if not any([message.endswith(i) for i in tuple(":> ")]): title = message + ": " else: title = message message = message.rstrip(" ") message = message.rstrip(":") message = message.rstrip(">") sys.stdout.write( f"{ACC.bcustomrgb(*hex_to_rgb(reader_bg))}{ACC.customrgb(*hex_to_rgb(reader_fg))}{title}{MCC.ERASE_NXT_LINE}" ) readed = input() sys.stdout.write(ACC.RESET + MCC.up() + MCC.ERASE_ALL_LINE) else: readed = None tag = ("{:<" + str(tag_length) + "}").format(tag if tag else default_tag) log_record = pattern.format_map( Formatter(tag=tag, message=message, input=readed, time_format = time_format, data_format = data_format) ) sys.stdout.write(ACC.RESET + log_record + ACC.RESET + "\n") if file_log: logfile.write(remove_csi(log_record) + "\n") return readed return log def cstm(pattern, **kwargs): sys.stdout.write( ACC.RESET + pattern.format_map(Formatter(**kwargs)) + ACC.RESET + "\n" ) def smart_format(pattern, **kwargs): return pattern.format_map(Formatter(**kwargs)) def init_from_cfg(): default_config = { "log_pattern": "{#81f059}[{time}] {#6bd130}{tag}{#fff}: {#1ed476}{message}", "wrn_pattern": "{#cfa529}[{time}] {#d7e356}{tag}{#fff}: {#b9c726}{message}", "err_pattern": "{#cf4729}[{time}] {#d93b18}{tag}{#fff}: {#cf2727}{message}", "tip_pattern": "{#9c1fd1}[{time}] {#471dc4}{tag}{#fff}: {#219ddb}{message}", "rea_pattern": "{#2141a3}[{time}] {#5a51db}{tag}{#fff}: {#2459d6}{message} {#fff}: {#24d0d6}{input}", "log_tag": "Log", "wrn_tag": "Warn", "err_tag": "Error", "tip_tag": "Tip", "rea_tag": "Reader", "date_format": "%d-%m-%Y", "time_format": "%H:%M:%S", "tag_length": 7, "file_log": True, "logtime": "%d-%m-%Y-%H", "logname": "log-{time}", } config = DataStorage("config.yml", "logger", default_config) if config["file_log"]: now = datetime.now() logname = config["logname"] logtime = config["logtime"] logfile = LogFile(f"{logname.format(time=now.strftime(logtime))}.log", "logs") else: logfile = None log = create_logger(pattern=config["log_pattern"], default_tag=config["log_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length']) wrn = create_logger(pattern=config["wrn_pattern"], default_tag=config["wrn_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length']) err = create_logger(pattern=config["err_pattern"], default_tag=config["err_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length']) tip = create_logger(pattern=config["tip_pattern"], default_tag=config["tip_tag"], file_log=config['file_log'], logfile=logfile, tag_length=config['tag_length']) rea = create_logger( pattern=config["rea_pattern"], default_tag=config["rea_tag"], reader=True ) return log, wrn, err, tip, rea log, wrn, err, tip, rea = init_from_cfg()
39.056911
164
0.580766
575
4,804
4.652174
0.217391
0.031402
0.042617
0.046729
0.238505
0.133084
0.110654
0.110654
0.110654
0.110654
0
0.020011
0.251041
4,804
123
165
39.056911
0.723457
0
0
0.0625
0
0.017857
0.214776
0.053902
0
0
0
0
0
1
0.0625
false
0
0.053571
0.008929
0.241071
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f46b0b539cef945ee6aa318ff4cb5a94326430db
6,290
py
Python
mealpy/evolutionary_based/MA.py
Alhassan20/mealpy
7ed365c5c495ad1c1e066662c90159b3d5e9b8e3
[ "MIT" ]
1
2021-08-07T16:30:48.000Z
2021-08-07T16:30:48.000Z
mealpy/evolutionary_based/MA.py
Alhassan20/mealpy
7ed365c5c495ad1c1e066662c90159b3d5e9b8e3
[ "MIT" ]
null
null
null
mealpy/evolutionary_based/MA.py
Alhassan20/mealpy
7ed365c5c495ad1c1e066662c90159b3d5e9b8e3
[ "MIT" ]
null
null
null
#!/usr/bin/env python # ------------------------------------------------------------------------------------------------------% # Created by "Thieu Nguyen" at 14:22, 11/04/2020 % # % # Email: nguyenthieu2102@gmail.com % # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % # Github: https://github.com/thieu1995 % # ------------------------------------------------------------------------------------------------------% import time import numpy as np from mealpy.optimizer import Optimizer class BaseMA(Optimizer): """ The original version of: Memetic Algorithm (MA) (On evolution, search, optimization, genetic algorithms and martial arts: Towards memetic algorithms) Link: Clever Algorithms: Nature-Inspired Programming Recipes - Memetic Algorithm (MA) http://www.cleveralgorithms.com/nature-inspired/physical/memetic_algorithm.html """ ID_POS = 0 ID_FIT = 1 ID_BIT = 2 def __init__(self, problem: dict, epoch=1000, pop_size=100, pc=0.98, pm=0.025, p_local=0.5, max_local_gens=10, bits_per_param=16): """ Args: problem (dict): a dictionary of your problem epoch (int): maximum number of iterations, default = 1000 pop_size (int): number of population size, default = 100 pc (float): cross-over probability, default = 0.95 pm (float): mutation probability, default = 0.025 p_local (): max_local_gens (): bits_per_param (): """ super().__init__(problem) self.epoch = epoch self.pop_size = pop_size self.pc = pc self.pm = pm self.p_local = p_local self.max_local_gens = max_local_gens self.bits_per_param = bits_per_param self.bits_total = self.problem_size * self.bits_per_param def create_solution(self): position = np.random.uniform(self.lb, self.ub) fitness = self.get_fitness_position(position=position) bitstring = ''.join(["1" if np.random.uniform() < 0.5 else "0" for _ in range(0, self.bits_total)]) return [position, fitness, bitstring] def _decode__(self, bitstring=None): """ Decode the random bitstring into real number Args: bitstring (str): "11000000100101000101010" - bits_per_param = 16, 32 bit for 2 variable. eg. x1 and x2 Returns: list of real number (vector) """ vector = np.ones(self.problem_size) for idx in range(0, self.problem_size): param = bitstring[idx * self.bits_per_param: (idx + 1) * self.bits_per_param] # Select 16 bit every time vector[idx] = self.lb[idx] + ((self.ub[idx] - self.lb[idx]) / ((2.0 ** self.bits_per_param) - 1)) * int(param, 2) return vector def _crossover__(self, dad=None, mom=None): if np.random.uniform() >= self.pc: temp = [dad].copy() return temp[0] else: child = "" for idx in range(0, self.bits_total): if np.random.uniform() < 0.5: child += dad[idx] else: child += mom[idx] return child def _point_mutation__(self, bitstring=None): child = "" for bit in bitstring: if np.random.uniform() < self.pc: child += "0" if bit == "1" else "1" else: child += bit return child def create_next_generation(self, pop: list): ## Binary tournament children = [self.get_solution_kway_tournament_selection(pop, k_way=2, output=1)[0] for _ in range(self.pop_size)] ## Reproduction for idx in range(0, self.pop_size): ancient = pop[idx + 1] if idx % 2 == 0 else pop[idx - 1] if idx == self.pop_size - 1: ancient = pop[0] bitstring_new = self._crossover__(pop[idx][self.ID_BIT], ancient[self.ID_BIT]) bitstring_new = self._point_mutation__(bitstring_new) pos_new = self._decode__(bitstring_new) fit_new = self.get_fitness_position(pos_new) children[idx] = [pos_new, fit_new, bitstring_new] return children def _bits_climber__(self, child=None): current = child.copy() for idx in range(0, self.max_local_gens): child = current.copy() bitstring_new = self._point_mutation__(child[self.ID_BIT]) pos_new = self._decode__(bitstring_new) fit_new = self.get_fitness_position(pos_new) current = self.get_better_solution(child, [pos_new, fit_new, bitstring_new]) return current def train(self): pop = [self.create_solution() for _ in range(self.pop_size)] pop, g_best = self.get_global_best_solution(pop) self.history_list_g_best = [g_best] self.history_list_c_best = self.history_list_g_best.copy() for epoch in range(0, self.epoch): time_start = time.time() # Create next generations pop = self.create_next_generation(pop) # Searching in local for i in range(0, self.pop_size): if np.random.uniform() < self.p_local: pop[i] = self._bits_climber__(pop[i]) # Sort the population and update the global best solution pop = self.update_global_best_solution(pop) ## Additional information for the framework time_start = time.time() - time_start self.history_list_epoch_time.append(time_start) self.print_epoch(epoch + 1, time_start) self.history_list_pop.append(pop.copy()) ## Additional information for the framework self.solution = self.history_list_g_best[-1] self.save_data() return self.solution[self.ID_POS], self.solution[self.ID_FIT][self.ID_TAR]
42.214765
134
0.550397
744
6,290
4.419355
0.25672
0.019161
0.032847
0.025547
0.233273
0.133212
0.055961
0.037713
0.037713
0.037713
0
0.028833
0.321781
6,290
148
135
42.5
0.741913
0.296184
0
0.125
0
0
0.001187
0
0
0
0
0
0
1
0.090909
false
0
0.034091
0
0.261364
0.011364
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f46c203558ba08eaf57d58a68abbbd1315976d22
16,106
py
Python
src/estimagic/estimation/estimate_ml.py
OpenSourceEconomics/estimagic
85163b4cdc601d60d654c6ca1f42b9db17a130a3
[ "MIT" ]
83
2019-09-26T04:44:03.000Z
2022-03-17T20:24:02.000Z
src/estimagic/estimation/estimate_ml.py
OpenSourceEconomics/estimagic
85163b4cdc601d60d654c6ca1f42b9db17a130a3
[ "MIT" ]
243
2019-06-25T18:15:53.000Z
2022-03-26T09:17:44.000Z
src/estimagic/estimation/estimate_ml.py
OpenSourceEconomics/estimagic
85163b4cdc601d60d654c6ca1f42b9db17a130a3
[ "MIT" ]
23
2019-07-03T11:16:55.000Z
2022-03-07T00:57:38.000Z
from estimagic.inference.ml_covs import cov_cluster_robust from estimagic.inference.ml_covs import cov_hessian from estimagic.inference.ml_covs import cov_jacobian from estimagic.inference.ml_covs import cov_robust from estimagic.inference.ml_covs import cov_strata_robust from estimagic.inference.shared import calculate_inference_quantities from estimagic.inference.shared import check_is_optimized_and_derivative_case from estimagic.inference.shared import get_derivative_case from estimagic.inference.shared import get_internal_first_derivative from estimagic.inference.shared import transform_covariance from estimagic.optimization.optimize import maximize from estimagic.parameters.parameter_conversion import get_derivative_conversion_function from estimagic.parameters.process_constraints import process_constraints from estimagic.shared.check_option_dicts import check_numdiff_options from estimagic.shared.check_option_dicts import check_optimization_options def estimate_ml( loglike, params, optimize_options, *, constraints=None, logging=False, log_options=None, loglike_kwargs=None, derivative=None, derivative_kwargs=None, loglike_and_derivative=None, loglike_and_derivative_kwargs=None, numdiff_options=None, jacobian=None, jacobian_kwargs=None, hessian=False, hessian_kwargs=None, ci_level=0.95, n_samples=10_000, bounds_handling="raise", design_info=None, ): """Do a maximum likelihood (ml) estimation. This is a high level interface of our lower level functions for maximization, numerical differentiation and inference. It does the full workflow for maximum likelihood estimation with just one function call. While we have good defaults, you can still configure each aspect of each step via the optional arguments of this function. If you find it easier to do the "difficult" steps (mainly maximization and calculating numerical derivatives of a potentially noisy function) separately, you can do so and just provide those results as ``params``, ``jacobian`` and ``hessian``. The docstring is aspirational and not all options are supported yet. Args: loglike (callable): Likelihood function that takes a params DataFrame (and potentially other keyword arguments) and returns a dictionary that has at least the entries "value" (a scalar float) and "contributions" (a 1d numpy array or pandas Series) with the log likelihood contribution per individual. params (pd.DataFrame): DataFrame where the "value" column contains the estimated or start parameters of a likelihood model. See :ref:`params` for details. If the supplied parameters are estimated parameters, set optimize_options to False. optimize_options (dict or False): Keyword arguments that govern the numerical optimization. Valid entries are all arguments of :func:`~estimagic.optimization.optimize.minimize` except for criterion, derivative, criterion_and_derivative and params. If you pass False as optimize_options you signal that ``params`` are already the optimal parameters and no numerical optimization is needed. constraints (list): List with constraint dictionaries. See .. _link: ../../docs/source/how_to_guides/how_to_use_constraints.ipynb logging (pathlib.Path, str or False): Path to sqlite3 file (which typically has the file extension ``.db``. If the file does not exist, it will be created. The dashboard can only be used when logging is used. log_options (dict): Additional keyword arguments to configure the logging. - "fast_logging": A boolean that determines if "unsafe" settings are used to speed up write processes to the database. This should only be used for very short running criterion functions where the main purpose of the log is a real-time dashboard and it would not be catastrophic to get a corrupted database in case of a sudden system shutdown. If one evaluation of the criterion function (and gradient if applicable) takes more than 100 ms, the logging overhead is negligible. - "if_table_exists": (str) One of "extend", "replace", "raise". What to do if the tables we want to write to already exist. Default "extend". - "if_database_exists": (str): One of "extend", "replace", "raise". What to do if the database we want to write to already exists. Default "extend". loglike_kwargs (dict): Additional keyword arguments for loglike. derivative (callable): Function takes params and potentially other keyword arguments and calculates the first derivative of loglike. It can either return a numpy array or pandas Series/DataFrame with the derivative or a dictionary with derivatives of each output of loglike. If loglike returns a dict but derivative does not, it is your responsibility to make sure that the correct derivative for the numerical optimizers you are using is returned. derivative_kwargs (dict): Additional keyword arguments for loglike. loglike_and_derivative (callable): Return a tuple consisting of the result of loglike and the result of derivative. Only use this if you can exploit synergies in the calculation of loglike and derivative. loglike_and_derivative_kwargs (dict): Additional keyword arguments for loglike_and_derivative. numdiff_options (dict): Keyword arguments for the calculation of numerical derivatives for the calculation of standard errors. See :ref:`first_derivative` for details. jacobian (callable or pandas.DataFrame or False): A function that takes ``params`` and potentially other keyword arguments and returns the jacobian of loglike["contributions"] with respect to the params. Alternatively, you can pass a pandas.DataFrame with the Jacobian at the optimal parameters. This is only possible if you pass ``optimize_options=False``. Note that you only need to pass a Jacobian function if you have a closed form Jacobian but decided not to return it as part of ``derivative`` (e.g. because you use a scalar optimizer and can calculate a gradient in a way that is faster than calculating and summing the Jacobian). If you pass None, a numerical Jacobian will be calculated. If you pass ``False``, you signal that no Jacobian should be calculated. Thus, no result that requires the Jacobian will be calculated. jacobian_kwargs (dict): Additional keyword arguments for the Jacobian function. hessian (callable or pd.DataFrame): A function that takes ``params`` and potentially other keyword arguments and returns the Hessian of loglike["value"] with respect to the params. Alternatively, you can pass a pandas.DataFrame with the Hessian at the optimal parameters. This is only possible if you pass ``optimize_options=False``. If you pass None, a numerical Hessian will be calculated. If you pass ``False``, you signal that no Hessian should be calculated. Thus, no result that requires the Hessian will be calculated. hessian_kwargs (dict): Additional keyword arguments for the Hessian function. ci_level (float): Confidence level for the calculation of confidence intervals. The default is 0.95. n_samples (int): Number of samples used to transform the covariance matrix of the internal parameter vector into the covariance matrix of the external parameters. For background information about internal and external params see :ref:`implementation_of_constraints`. This is only used if you have specified constraints. bounds_handling (str): One of "clip", "raise", "ignore". Determines how bounds are handled. If "clip", confidence intervals are clipped at the bounds. Standard errors are only adjusted if a sampling step is necessary due to additional constraints. If "raise" and any lower or upper bound is binding, we raise an Error. If "ignore", boundary problems are simply ignored. design_info (pandas.DataFrame): DataFrame with one row per observation that contains some or all of the variables "psu" (primary sampling unit), "stratum" and "fpc" (finite population corrector). See :ref:`robust_likelihood_inference` for details. Returns: dict: The estimated parameters, standard errors and covariance matrix of the parameters. """ # ================================================================================== # Check and process inputs # ================================================================================== is_optimized = optimize_options is False check_optimization_options( optimize_options, usage="estimate_ml", algorithm_mandatory=True, ) jac_case = get_derivative_case(jacobian) hess_case = get_derivative_case(hessian) check_is_optimized_and_derivative_case(is_optimized, jac_case) check_is_optimized_and_derivative_case(is_optimized, hess_case) cov_cases = _get_cov_cases(jac_case, hess_case, design_info) check_numdiff_options(numdiff_options, "estimate_ml") numdiff_options = {} if numdiff_options in (None, False) else numdiff_options constraints = [] if constraints is None else constraints processed_constraints, _ = process_constraints(constraints, params) # ================================================================================== # Calculate estimates via maximization (if necessary) # ================================================================================== if is_optimized: estimates = params else: opt_res = maximize( criterion=loglike, criterion_kwargs=loglike_kwargs, params=params, constraints=constraints, derivative=derivative, derivative_kwargs=derivative_kwargs, criterion_and_derivative=loglike_and_derivative, criterion_and_derivative_kwargs=loglike_and_derivative_kwargs, logging=logging, log_options=log_options, **optimize_options, ) estimates = opt_res["solution_params"] # ================================================================================== # Calculate internal jacobian # ================================================================================== deriv_to_internal = get_derivative_conversion_function( params=params, constraints=constraints ) if jac_case == "pre-calculated": int_jac = deriv_to_internal(jacobian) elif jac_case == "closed-form": jacobian_kwargs = {} if jacobian_kwargs is None else jacobian_kwargs _jac = jacobian(estimates, **jacobian_kwargs) int_jac = deriv_to_internal(_jac) # switch to "numerical" even if jac_case == "skip" because jac is required for ml. elif jac_case == "numerical": options = numdiff_options.copy() options["key"] = "contributions" deriv_res = get_internal_first_derivative( func=loglike, params=estimates, constraints=constraints, func_kwargs=loglike_kwargs, numdiff_options=options, ) int_jac = deriv_res["derivative"] jac_numdiff_info = {k: v for k, v in deriv_res.items() if k != "derivative"} else: int_jac = None # ================================================================================== # Calculate internal Hessian (most of this is not yet implemented) # ================================================================================== if hess_case == "skip": int_hess = None elif hess_case == "numerical": raise NotImplementedError("Numerical Hessian calculation is not yet supported.") hess_numdiff_info = {} elif hess_case in ("closed-form", "pre-calculated") and constraints: raise NotImplementedError( "Closed-form or pre-calculated Hessians are not yet compatible with " "constraints." ) else: int_hess = hessian(estimates, **hessian_kwargs) # ================================================================================== # Calculate all available internal cov types # ================================================================================== int_covs = {} if "jacobian" in cov_cases: int_covs["cov_jacobian"] = cov_jacobian(int_jac) if "hessian" in cov_cases: int_covs["cov_hessian"] = cov_hessian(int_hess) if "robust" in cov_cases: int_covs["cov_robust"] = cov_robust(jac=int_jac, hess=int_hess) if "cluster_robust" in cov_cases: int_covs["cov_cluster_robust"] = cov_cluster_robust( jac=int_jac, hess=int_hess, design_info=design_info ) if "strata_robust" in cov_cases: int_covs["cov_strata_robust"] = cov_strata_robust( jac=int_jac, hess=int_hess, design_info=design_info ) # ================================================================================== # Calculate all available external covs and summaries # ================================================================================== covs = {} summaries = {} for case in cov_cases: cov = transform_covariance( params=estimates, internal_cov=int_covs[f"cov_{case}"], constraints=constraints, n_samples=n_samples, bounds_handling=bounds_handling, ) summary = calculate_inference_quantities( params=estimates, free_cov=cov, ci_level=ci_level, ) covs[f"cov_{case}"] = cov summaries[f"summary_{case}"] = summary # ================================================================================== # Calculate external jac and hess (if no transforming constraints) # ================================================================================== if not processed_constraints: ext_jac = int_jac ext_hess = int_hess else: ext_jac = "No external Jacobian defined due to constraints." ext_hess = "No external Hessian defined due to constraints." # ================================================================================== # Construct output # ================================================================================== out = { **summaries, **covs, "jacobian": ext_jac, "hessian": ext_hess, } if not is_optimized: out["optimize_res"] = opt_res if jac_case == "numerical": out["jacobian_numdiff_info"] = jac_numdiff_info if hess_case == "numerical": out["hessian_numdiff_info"] = hess_numdiff_info return out def _get_cov_cases(jac_case, hess_case, design_info): if jac_case == "skip" and hess_case == "skip": raise ValueError("Jacobian and Hessian cannot both be False.") elif jac_case == "skip" and hess_case != "skip": cases = ["hessian"] elif hess_case == "skip" and jac_case != "skip": cases = ["jacobian"] else: cases = ["jacobian", "hessian", "robust"] if design_info is not None: if "psu" in design_info: cases.append("cluster_robust") if {"strata", "psu", "fpc"}.issubset(design_info): cases.append("strata_robust") return cases
47.934524
88
0.626537
1,841
16,106
5.318848
0.204237
0.019914
0.022467
0.018382
0.231618
0.201389
0.186172
0.141034
0.08701
0.070466
0
0.001297
0.234323
16,106
335
89
48.077612
0.792734
0.551782
0
0.087719
0
0
0.111242
0.003094
0
0
0
0
0
1
0.011696
false
0
0.087719
0
0.111111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f46ca3af523c02675160a6c57c283a2d49c86f50
6,503
py
Python
neural_architecture_search_appendix_a.py
NunoEdgarGFlowHub/neural_architecture_search_with_reinforcement_learning_appendix_a
67e4876d428e5155f5526ee02875b0a89a52305d
[ "MIT" ]
68
2017-01-31T06:35:53.000Z
2021-02-24T09:39:55.000Z
neural_architecture_search_appendix_a.py
NunoEdgarGFlowHub/neural_architecture_search_with_reinforcement_learning_appendix_a
67e4876d428e5155f5526ee02875b0a89a52305d
[ "MIT" ]
3
2017-05-14T13:41:39.000Z
2020-04-21T04:23:50.000Z
neural_architecture_search_appendix_a.py
NunoEdgarGFlowHub/neural_architecture_search_with_reinforcement_learning_appendix_a
67e4876d428e5155f5526ee02875b0a89a52305d
[ "MIT" ]
15
2017-03-16T03:04:46.000Z
2018-07-05T15:07:39.000Z
import six import chainer import numpy as np import chainer.links as L import chainer.functions as F import nutszebra_chainer import functools from collections import defaultdict class Conv(nutszebra_chainer.Model): def __init__(self, in_channel, out_channel, filter_size=(3, 3), stride=(1, 1), pad=(1, 1)): super(Conv, self).__init__( conv=L.Convolution2D(in_channel, out_channel, filter_size, stride, pad), ) def weight_initialization(self): self.conv.W.data = self.weight_relu_initialization(self.conv) self.conv.b.data = self.bias_initialization(self.conv, constant=0) def __call__(self, x, train=False): return self.conv(x) def count_parameters(self): return functools.reduce(lambda a, b: a * b, self.conv.W.data.shape) class Conv_ReLU_BN(nutszebra_chainer.Model): def __init__(self, in_channel, out_channel, filter_size=(3, 3), stride=(1, 1), pad=(1, 1)): super(Conv_ReLU_BN, self).__init__( conv=L.Convolution2D(in_channel, out_channel, filter_size, stride, pad), bn=L.BatchNormalization(out_channel), ) def weight_initialization(self): self.conv.W.data = self.weight_relu_initialization(self.conv) self.conv.b.data = self.bias_initialization(self.conv, constant=0) def __call__(self, x, train=False): return self.bn(F.relu(self.conv(x)), test=not train) def count_parameters(self): return functools.reduce(lambda a, b: a * b, self.conv.W.data.shape) class AppendixA(nutszebra_chainer.Model): def __init__(self, category_num): super(AppendixA, self).__init__() out_channels = [36, 48, 36, 36, 48, 48, 48, 36, 36, 36, 36, 48, 48, 48, 48] # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 skip_connections = [[0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1], [0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ] filters = [(3, 3), (3, 3), (3, 3), (5, 5), (3, 7), (7, 7), (7, 7), (7, 3), (7, 1), (7, 7), (5, 7), (7, 7), (7, 5), (7, 5), (7, 5)] modules = [] in_channel = 3 for i in six.moves.range(len(out_channels)): modules += [('conv{}'.format(i), Conv_ReLU_BN(in_channel, out_channels[i], filters[i], 1, 0))] in_channel = int(np.sum([out_channels[ii] for ii, s in enumerate(skip_connections) if s[i] == 1])) + out_channels[i] modules += [('linear', Conv(out_channels[-1], category_num, 1, 1, 0))] # register layers [self.add_link(*link) for link in modules] self.modules = modules self.category_num = category_num self.out_channels = out_channels self.skip_connections = skip_connections self.filters = filters self.name = 'appndix_a_{}'.format(category_num) def weight_initialization(self): [link.weight_initialization() for _, link in self.modules] def count_parameters(self): return int(np.sum([link.count_parameters() for _, link in self.modules])) @staticmethod def _zero_pads(x, pad, axis): if type(x.data) is not np.ndarray: pad.to_gpu() return F.concat((x, pad), axis=axis) @staticmethod def zero_pads(x, sizes): batch, channel, height, width = x.data.shape diff_height = sizes[2] - height diff_width = sizes[3] - width # pad along with height if diff_height >= 1: pad = chainer.Variable(np.zeros((batch, channel, diff_height, width), dtype=x.dtype), volatile=x.volatile) x = AppendixA._zero_pads(x, pad, axis=2) _, _, height, _ = x.data.shape # pad along with width if diff_width >= 1: pad = chainer.Variable(np.zeros((batch, channel, height, diff_width), dtype=x.dtype), volatile=x.volatile) x = AppendixA._zero_pads(x, pad, axis=3) return x def _max(a, b): return (max(a[0], b[0]), max(a[1], b[1]), max(a[2], b[2]), max(a[3], b[3])) @staticmethod def concatenate(X): sizes = (0, 0, 0, 0) for x in X: sizes = AppendixA._max(sizes, x.data.shape) X = [AppendixA.zero_pads(x, sizes) for x in X] return F.concat(X, axis=1) def __call__(self, x, train=False): x = [x] outputs = [] for i in six.moves.range(len(self.out_channels)): x = self['conv{}'.format(i)](self.concatenate(x), train=train) outputs.append(x) x = [outputs[ii] for ii, s in enumerate(self.skip_connections) if s[i] == 1] + [outputs[i]] x = outputs[-1] batch, channels, height, width = x.data.shape x = F.reshape(F.average_pooling_2d(x, (height, width)), (batch, channels, 1, 1)) return F.reshape(self.linear(x, train), (batch, self.category_num)) def calc_loss(self, y, t): loss = F.softmax_cross_entropy(y, t) return loss def accuracy(self, y, t, xp=np): y.to_cpu() t.to_cpu() indices = np.where((t.data == np.argmax(y.data, axis=1)) == True)[0] accuracy = defaultdict(int) for i in indices: accuracy[t.data[i]] += 1 indices = np.where((t.data == np.argmax(y.data, axis=1)) == False)[0] false_accuracy = defaultdict(int) false_y = np.argmax(y.data, axis=1) for i in indices: false_accuracy[(t.data[i], false_y[i])] += 1 return accuracy, false_accuracy
42.227273
138
0.531755
1,011
6,503
3.288823
0.138477
0.090226
0.118195
0.137143
0.503158
0.438195
0.393083
0.377744
0.351579
0.350376
0
0.080375
0.311241
6,503
153
139
42.503268
0.661978
0.015685
0
0.192
0
0
0.004704
0
0
0
0
0
0
1
0.144
false
0
0.064
0.048
0.328
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f46df9cfbed7221c6dfc035138710969c22cfd18
1,992
py
Python
MachineLearning/hw1/models/LinearRegression.py
ChoKyuWon/SchoolProjects
71a5decefc85ae941ba2d537c4507ba8e615cc34
[ "MIT" ]
null
null
null
MachineLearning/hw1/models/LinearRegression.py
ChoKyuWon/SchoolProjects
71a5decefc85ae941ba2d537c4507ba8e615cc34
[ "MIT" ]
null
null
null
MachineLearning/hw1/models/LinearRegression.py
ChoKyuWon/SchoolProjects
71a5decefc85ae941ba2d537c4507ba8e615cc34
[ "MIT" ]
null
null
null
import numpy as np class LinearRegression: def __init__(self, num_features): self.num_features = num_features self.W = np.zeros((self.num_features, 1)) def train(self, x, y, epochs, batch_size, lr, optim): final_loss = None # loss of final epoch # Training should be done for 'epochs' times with minibatch size of 'batch_size' # The function 'train' should return the loss of final epoch # Loss of an epoch is calculated as an average of minibatch losses # ========================= EDIT HERE ======================== # xline 과 n번째 y가 매칭됨. f(xline)=yi final_loss=0 num_data=len(y) k=0 def dlossF(k, j): s=0 size = batch_size for Xi, Yi in zip(x[k:k+batch_size], y[k:k+batch_size]): fx=np.transpose(Xi).dot(self.W) s = s + (fx-Yi)*Xi[j] if (num_data - k) < batch_size: size = num_data - k return s/size for iterative in range(0, epochs): k = k + batch_size if k == num_data: k = batch_size grad = np.zeros((self.num_features, 1)) for j in range(0, self.num_features): grad[j] = dlossF(k, j) self.W = optim.update(self.W, grad, lr) # ============================================================ return final_loss def eval(self, x): pred = None # Evaluation Function # Given the input 'x', the function should return prediction for 'x' # ========================= EDIT HERE ======================== ylist=[] for xline in x: y = np.transpose(xline).dot(self.W) ylist.append(y[0]) pred = np.array(ylist) # ============================================================ return pred
33.2
89
0.449799
236
1,992
3.690678
0.334746
0.082664
0.086108
0.037887
0.091848
0.052813
0
0
0
0
0
0.006265
0.358936
1,992
59
90
33.762712
0.675803
0.293675
0
0
0
0
0
0
0
0
0
0
0
1
0.111111
false
0
0.027778
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f46e88c174121a507ecd5ff0eff0efa5c6c1e776
1,655
py
Python
apps/bc_scraper/actions/schedule.py
aurmeneta/ramos-uc
364ab3c5a55032ab7ffc08665a2da4c5ff04ae58
[ "MIT" ]
7
2021-07-14T18:13:35.000Z
2021-11-21T20:10:54.000Z
apps/bc_scraper/actions/schedule.py
aurmeneta/ramos-uc
364ab3c5a55032ab7ffc08665a2da4c5ff04ae58
[ "MIT" ]
57
2021-07-10T01:31:56.000Z
2022-01-14T02:02:58.000Z
apps/bc_scraper/actions/schedule.py
aurmeneta/ramos-uc
364ab3c5a55032ab7ffc08665a2da4c5ff04ae58
[ "MIT" ]
4
2021-07-23T16:51:55.000Z
2021-08-31T02:41:41.000Z
from copy import copy DEFAULT_SCHEDULE = {} for day in "lmwjvs": for mod in "12345678": DEFAULT_SCHEDULE[day + mod] = "'FREE'" def process_schedule(text_sc): """For a given schedule text in BC format, returns the SQL queries for inserting the full schedule and schedule info. Those queries have to format ID. """ ### Full Schedule data = text_sc.split("\nROW: ")[1:] # data rows -> day-day:module,module <> type <> room <><> schedule = copy(DEFAULT_SCHEDULE) for row in data: row = row.split("<>")[:2] horario = row[0].split(":") days = horario[0].split("-") modules = horario[1].split(",") for day in days: for mod in modules: if len(day) and len(mod): schedule[day.lower() + mod] = "'" + row[1] + "'" cols = ",".join(schedule.keys()) values = ",".join(schedule.values()) full_sc_query = ( f"INSERT INTO courses_fullschedule (section_id, {cols}) VALUES (%s, {values});" ) ### Info Schedule schedule_info = {"total": 0} for type in ["AYU", "CLAS", "LAB", "PRA", "SUP", "TAL", "TER", "TES"]: schedule_info[type] = list(schedule.values()).count("'" + type + "'") schedule_info["total"] += schedule_info[type] schedule_info[type] = str(schedule_info[type]) schedule_info["total"] = str(schedule_info["total"]) cols = ",".join(schedule_info.keys()) values = ",".join(schedule_info.values()) info_sc_query = ( f"INSERT INTO courses_scheduleinfo (section_id, {cols}) VALUES (%s, {values});" ) return full_sc_query, info_sc_query
33.77551
87
0.583686
208
1,655
4.509615
0.360577
0.140725
0.072495
0.046908
0.168444
0.108742
0
0
0
0
0
0.012058
0.248338
1,655
48
88
34.479167
0.741961
0.140181
0
0
0
0
0.169044
0
0
0
0
0
0
1
0.029412
false
0
0.029412
0
0.088235
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f46f4f4b92656a15af396d51e27d17942b2af4aa
9,739
py
Python
openstack_dashboard/dashboards/admin/volumes/views.py
NunoEdgarGFlowHub/horizon
73a0bbd43ea78ac5337f7d00977ec5f32452067e
[ "Apache-2.0" ]
1
2018-04-17T02:32:05.000Z
2018-04-17T02:32:05.000Z
openstack_dashboard/dashboards/admin/volumes/views.py
NunoEdgarGFlowHub/horizon
73a0bbd43ea78ac5337f7d00977ec5f32452067e
[ "Apache-2.0" ]
3
2021-01-21T14:27:55.000Z
2021-06-10T23:08:49.000Z
openstack_dashboard/dashboards/admin/volumes/views.py
Surfndez/horizon
a56765b6b3dbc09fd467b83a57bea2433ae3909e
[ "Apache-2.0" ]
null
null
null
# Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Admin views for managing volumes and snapshots. """ from collections import OrderedDict from django.conf import settings from django.urls import reverse from django.urls import reverse_lazy from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import tables from horizon.utils import memoized from openstack_dashboard.api import cinder from openstack_dashboard.api import keystone from openstack_dashboard.dashboards.admin.volumes \ import forms as volumes_forms from openstack_dashboard.dashboards.admin.volumes \ import tables as volumes_tables from openstack_dashboard.dashboards.admin.volumes \ import tabs as volumes_tabs from openstack_dashboard.dashboards.project.volumes \ import views as volumes_views class VolumesView(tables.PagedTableMixin, volumes_views.VolumeTableMixIn, tables.DataTableView): table_class = volumes_tables.VolumesTable page_title = _("Volumes") FILTERS_MAPPING = {'bootable': {_('yes'): 'true', _('no'): 'false'}, 'encrypted': {_('yes'): True, _('no'): False}} def get_data(self): default_filters = {'all_tenants': True} filters = self.get_filters(default_filters.copy()) filter_first = getattr(settings, 'FILTER_DATA_FIRST', {}) volumes = [] self.table.needs_filter_first = False if filter_first.get('admin.volumes', False) and \ len(filters) == len(default_filters): self.table.needs_filter_first = True return volumes if 'project' in filters: # Keystone returns a tuple ([],false) where the first element is # tenant list that's why the 0 is hardcoded below tenants = keystone.tenant_list(self.request)[0] tenant_ids = [t.id for t in tenants if t.name == filters['project']] if not tenant_ids: return [] del filters['project'] for id in tenant_ids: filters['project_id'] = id volumes += self._get_volumes(search_opts=filters) else: volumes = self._get_volumes(search_opts=filters) attached_instance_ids = self._get_attached_instance_ids(volumes) instances = self._get_instances(search_opts={'all_tenants': True}, instance_ids=attached_instance_ids) volume_ids_with_snapshots = self._get_volumes_ids_with_snapshots( search_opts={'all_tenants': True}) self._set_volume_attributes( volumes, instances, volume_ids_with_snapshots) # Gather our tenants to correlate against IDs try: tenants, has_more = keystone.tenant_list(self.request) except Exception: tenants = [] msg = _('Unable to retrieve volume project information.') exceptions.handle(self.request, msg) tenant_dict = OrderedDict([(t.id, t) for t in tenants]) for volume in volumes: tenant_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None) tenant = tenant_dict.get(tenant_id, None) volume.tenant_name = getattr(tenant, "name", None) return volumes def get_filters(self, filters): self.table = self._tables['volumes'] self.handle_server_filter(self.request, table=self.table) self.update_server_filter_action(self.request, table=self.table) filters = super(VolumesView, self).get_filters(filters, self.FILTERS_MAPPING) return filters class DetailView(volumes_views.DetailView): tab_group_class = volumes_tabs.VolumeDetailTabs def get_context_data(self, **kwargs): context = super(DetailView, self).get_context_data(**kwargs) table = volumes_tables.VolumesTable(self.request) context["actions"] = table.render_row_actions(context["volume"]) return context def get_search_opts(self, volume): search_opts = super(DetailView, self).get_search_opts(volume) search_opts['all_tenants'] = True return search_opts def get_redirect_url(self): return reverse('horizon:admin:volumes:index') class ManageVolumeView(forms.ModalFormView): form_class = volumes_forms.ManageVolume template_name = 'admin/volumes/manage_volume.html' form_id = "manage_volume_modal" submit_label = _("Manage") success_url = reverse_lazy('horizon:admin:volumes:index') submit_url = reverse_lazy('horizon:admin:volumes:manage') cancel_url = reverse_lazy("horizon:admin:volumes:index") page_title = _("Manage Volume") def get_context_data(self, **kwargs): context = super(ManageVolumeView, self).get_context_data(**kwargs) return context class UnmanageVolumeView(forms.ModalFormView): form_class = volumes_forms.UnmanageVolume template_name = 'admin/volumes/unmanage_volume.html' form_id = "unmanage_volume_modal" submit_label = _("Unmanage") success_url = reverse_lazy('horizon:admin:volumes:index') submit_url = 'horizon:admin:volumes:unmanage' cancel_url = reverse_lazy("horizon:admin:volumes:index") page_title = _("Unmanage Volume") def get_context_data(self, **kwargs): context = super(UnmanageVolumeView, self).get_context_data(**kwargs) args = (self.kwargs['volume_id'],) context['submit_url'] = reverse(self.submit_url, args=args) return context @memoized.memoized_method def get_data(self): try: volume_id = self.kwargs['volume_id'] volume = cinder.volume_get(self.request, volume_id) except Exception: exceptions.handle(self.request, _('Unable to retrieve volume details.'), redirect=self.success_url) return volume def get_initial(self): volume = self.get_data() return {'volume_id': self.kwargs["volume_id"], 'name': volume.name, 'host': getattr(volume, "os-vol-host-attr:host")} class MigrateVolumeView(forms.ModalFormView): form_class = volumes_forms.MigrateVolume template_name = 'admin/volumes/migrate_volume.html' form_id = "migrate_volume_modal" submit_label = _("Migrate") success_url = reverse_lazy('horizon:admin:volumes:index') submit_url = 'horizon:admin:volumes:migrate' cancel_url = reverse_lazy("horizon:admin:volumes:index") page_title = _("Migrate Volume") def get_context_data(self, **kwargs): context = super(MigrateVolumeView, self).get_context_data(**kwargs) args = (self.kwargs['volume_id'],) context['submit_url'] = reverse(self.submit_url, args=args) return context @memoized.memoized_method def get_data(self): try: volume_id = self.kwargs['volume_id'] volume = cinder.volume_get(self.request, volume_id) except Exception: exceptions.handle(self.request, _('Unable to retrieve volume details.'), redirect=self.success_url) return volume @memoized.memoized_method def get_hosts(self): try: return cinder.pool_list(self.request) except Exception: exceptions.handle(self.request, _('Unable to retrieve pools information.'), redirect=self.success_url) def get_initial(self): volume = self.get_data() return {'volume_id': self.kwargs["volume_id"], 'name': volume.name, 'current_host': getattr(volume, "os-vol-host-attr:host"), 'hosts': self.get_hosts()} class UpdateStatusView(forms.ModalFormView): form_class = volumes_forms.UpdateStatus modal_id = "update_volume_status_modal" template_name = 'admin/volumes/update_status.html' submit_label = _("Update Status") submit_url = "horizon:admin:volumes:update_status" success_url = reverse_lazy('horizon:admin:volumes:index') page_title = _("Update Volume Status") def get_context_data(self, **kwargs): context = super(UpdateStatusView, self).get_context_data(**kwargs) context["volume_id"] = self.kwargs['volume_id'] args = (self.kwargs['volume_id'],) context['submit_url'] = reverse(self.submit_url, args=args) return context @memoized.memoized_method def get_data(self): try: volume_id = self.kwargs['volume_id'] volume = cinder.volume_get(self.request, volume_id) except Exception: exceptions.handle(self.request, _('Unable to retrieve volume details.'), redirect=self.success_url) return volume def get_initial(self): volume = self.get_data() return {'volume_id': self.kwargs["volume_id"], 'status': volume.status}
38.34252
78
0.652736
1,122
9,739
5.442068
0.191622
0.039306
0.03734
0.029479
0.464297
0.381756
0.347036
0.310023
0.284966
0.24566
0
0.00137
0.250539
9,739
253
79
38.494071
0.835183
0.080193
0
0.398964
0
0
0.142026
0.065585
0
0
0
0
0
1
0.082902
false
0
0.07772
0.005181
0.466321
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f471777a68cf3b70989f0f48f2b4ea4d759a30a8
5,382
py
Python
rasa-sample/actions.py
ijufumi/demo-python
b48bdebde172ca581a48346a77b12c30ff202e73
[ "MIT" ]
null
null
null
rasa-sample/actions.py
ijufumi/demo-python
b48bdebde172ca581a48346a77b12c30ff202e73
[ "MIT" ]
null
null
null
rasa-sample/actions.py
ijufumi/demo-python
b48bdebde172ca581a48346a77b12c30ff202e73
[ "MIT" ]
null
null
null
import re from typing import Any, Text, Dict, List from rasa_sdk import Action, Tracker from rasa_sdk.executor import CollectingDispatcher from rasa_sdk.events import SlotSet import lark_module class ActionHelloWorld(Action): state_map = {} def name(self) -> Text: return "action_hello_world" def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: state = tracker.current_state() print("current_state: {}".format(state)) sender_id = state["sender_id"] if sender_id not in self.state_map: self.state_map[sender_id] = 0 self.state_map[sender_id] += 1 dispatcher.utter_message( text="Hello World!", json_message={"data": "hogeohge"}, # template="<div></div>", buttons=[{"title": "OK", "payload": "99!"}]) print("state: {}".format(self.state_map[sender_id])) return [] class ActionCustomButton(Action): def name(self) -> Text: return "action_custom_button" def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: dispatcher.utter_message( text="Which ?", buttons=[{"title": "OK", "payload": "1"}, {"title": "NG", "payload": "2"}, {"title": "Unknown", "payload": "9"}]) return [] class ActionJsonMessage(Action): def name(self) -> Text: return "action_json_message" def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: dispatcher.utter_message( text="Which ?", json_message={"data": { "key1": "value1", "key2": "value2", }} ) return [] class ActionConversation(Action): def name(self) -> Text: return "action_conversation" def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: state = tracker.current_state() print("current_state: {}".format(state)) input_text = state['latest_message'].get('text') latest_bot = None for event in reversed(state['events']): if event['event'] == 'bot': data = event.get('data', {}).get('custom', {}).get('data', []) latest_bot = data[0] if len(data) > 0 else None break print("latest_bot: {}".format(latest_bot)) if not latest_bot: print("use utter_conversation_1") dispatcher.utter_message(template="utter_conversation_1", json_message={"data": {"key1": "value1", "key2": "value2"}}) else: if latest_bot == 'conversation_1': print("use utter_conversation_2") dispatcher.utter_message(template="utter_conversation_2", json_message={"data": ["conversation_2"]}) elif latest_bot == 'conversation_2': result = re.match("\\d+", input_text) if result: print("use utter_conversation_3") dispatcher.utter_message(template="utter_conversation_3", json_message={"data": ["conversation_3"]}) else: print("use utter_conversation_2") dispatcher.utter_message(template="utter_conversation_2", json_message={"data": ["conversation_2"]}) elif latest_bot == 'conversation_3': result = re.match("\\d+", input_text) if not result: print("use utter_conversation_3") dispatcher.utter_message(template="utter_conversation_3", json_message={"data": ["conversation_3"]}) else: dispatcher.utter_message(text="Bye", json_message={"data": ["conversation_3"]}) else: dispatcher.utter_message(text="Bye", json_message={"data": ["conversation_3"]}) return [] class ActionConversation2(Action): action_state = {} def name(self) -> Text: return "action_conversation2" def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: state = tracker.current_state() sender_id = state.get("sender_id") current_action = self.action_state.get(sender_id) input_text = state['latest_message'].get('text') print("state: {}, current_action: {}".format(state, current_action)) if current_action: result = lark_module.execute(input_text) if result: dispatcher.utter_message(text=result, json_message={"data": ["step2"]}, elements=[{"data": ["step2"]}]) else: dispatcher.utter_message(text="Bye", json_message={"data": ["step3"]}) else: dispatcher.utter_message(text="Where are you from ?", json_message={"data": ["step3"]}) self.action_state[sender_id] = "get_start" return []
35.642384
120
0.556856
544
5,382
5.306985
0.176471
0.067544
0.099065
0.072047
0.603395
0.57222
0.537236
0.437825
0.437825
0.421198
0
0.01129
0.308807
5,382
150
121
35.88
0.764785
0.004274
0
0.491379
0
0
0.162218
0
0
0
0
0
0
1
0.086207
false
0
0.051724
0.043103
0.284483
0.086207
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f471a2c4554505f4474a4ceb98a24f55991c2cdc
1,557
py
Python
parsers/politico.py
plympton/newsdiffs
2a055850bda850b9b6c28c989512d4e4b3e9b64e
[ "MIT" ]
null
null
null
parsers/politico.py
plympton/newsdiffs
2a055850bda850b9b6c28c989512d4e4b3e9b64e
[ "MIT" ]
null
null
null
parsers/politico.py
plympton/newsdiffs
2a055850bda850b9b6c28c989512d4e4b3e9b64e
[ "MIT" ]
null
null
null
from baseparser import BaseParser, grab_url, logger # Different versions of BeautifulSoup have different properties. # Some work with one site, some with another. # This is BeautifulSoup 3.2. from BeautifulSoup import BeautifulSoup # This is BeautifulSoup 4 import bs4 class PoliticoParser(BaseParser): domains = ['www.politico.com'] feeder_pat = '^http://www.politico.com/(news/stories|story)/' feeder_pages = ['http://www.politico.com/'] feeder_bs = bs4.BeautifulSoup def _parse(self, html): soup = bs4.BeautifulSoup(html) print_link = soup.findAll('a', text='Print')[0].get('href') html2 = grab_url(print_link) logger.debug('got html 2') # Now we have to switch back to bs3. Hilarious. # and the labeled encoding is wrong, so force utf-8. soup = BeautifulSoup(html2, convertEntities=BeautifulSoup.HTML_ENTITIES, fromEncoding='utf-8') self.meta = soup.findAll('meta') p_tags = soup.findAll('p')[1:] real_p_tags = [p for p in p_tags if not p.findAll(attrs={'class':"twitter-follow-button"})] self.title = soup.find('strong').getText() entity = soup.find('span', attrs={'class':'author'}) children = list(entity.childGenerator()) try: self.byline = 'By ' + children[1].getText() except IndexError: self.byline = '' self.date = children[-1].strip() self.body = '\n'+'\n\n'.join([p.getText() for p in real_p_tags])
35.386364
80
0.620424
196
1,557
4.852041
0.540816
0.02103
0.044164
0.042061
0
0
0
0
0
0
0
0.013664
0.247913
1,557
43
81
36.209302
0.798463
0.163776
0
0
0
0
0.132819
0.016216
0.035714
0
0
0
0
1
0.035714
false
0
0.107143
0
0.321429
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f472e924139d73818eedf6b97de856c2ca049e7a
6,535
py
Python
integration-tests/bats/server_multiclient_test.py
fairhopeweb/dolt
276b85b7b1287f883640ef3fcacb0bdb112749b2
[ "Apache-2.0" ]
2
2021-03-09T07:32:40.000Z
2021-06-11T21:41:30.000Z
integration-tests/bats/server_multiclient_test.py
albertusortiz/dolt
38fc4fcb0357a56eb97abdb25296f45571a5418f
[ "Apache-2.0" ]
null
null
null
integration-tests/bats/server_multiclient_test.py
albertusortiz/dolt
38fc4fcb0357a56eb97abdb25296f45571a5418f
[ "Apache-2.0" ]
1
2021-08-06T13:05:57.000Z
2021-08-06T13:05:57.000Z
import os import sys from queue import Queue from threading import Thread from helper.pytest import DoltConnection # Utility functions def print_err(e): print(e, file=sys.stderr) def query(dc, query_str): return dc.query(query_str, False) def query_with_expected_error(dc, non_error_msg , query_str): try: dc.query(query_str, False) raise Exception(non_error_msg) except: pass def row(pk, c1, c2): return {"pk":str(pk),"c1":str(c1),"c2":str(c2)} UPDATE_BRANCH_FAIL_MSG = "Failed to update branch" def commit_and_update_branch(dc, commit_message, expected_hashes, branch_name): expected_hash = "(" for i, eh in enumerate(expected_hashes): if i != 0: expected_hash += " or " expected_hash += "hash = %s" % eh expected_hash += ")" query_str = 'UPDATE dolt_branches SET hash = Commit("-m", "%s") WHERE name = "%s" AND %s' % (commit_message, branch_name, expected_hash) _, row_count = query(dc, query_str) if row_count != 1: raise Exception(UPDATE_BRANCH_FAIL_MSG) query(dc, 'SET @@repo1_head=HASHOF("%s");' % branch_name) def query_and_test_results(dc, query_str, expected): results, _ = query(dc, query_str) if results != expected: raise Exception("Unexpected results for query:\n\t%s\nExpected:\n\t%s\nActual:\n\t%s" % (query_str, str(), str(results))) def resolve_theirs(dc): query_str = "REPLACE INTO test (pk, c1, c2) SELECT their_pk, their_c1, their_c2 FROM dolt_conflicts_test WHERE their_pk IS NOT NULL;" query(dc, query_str) query_str = """DELETE FROM test WHERE pk in ( SELECT base_pk FROM dolt_conflicts_test WHERE their_pk IS NULL );""" query(dc, query_str) query(dc, "DELETE FROM dolt_conflicts_test") def create_branch(dc, branch_name): query_str = 'INSERT INTO dolt_branches (name, hash) VALUES ("%s", @@repo1_head);' % branch_name _, row_count = query(dc, query_str) if row_count != 1: raise Exception("Failed to create branch") # work functions def connect(dc): dc.connect() def create_tables(dc): query(dc, 'SET @@repo1_head=HASHOF("master");') query(dc, """ CREATE TABLE test ( pk INT NOT NULL, c1 INT, c2 INT, PRIMARY KEY(pk));""") commit_and_update_branch(dc, "Created tables", ["@@repo1_head"], "master") query_and_test_results(dc, "SHOW TABLES;", [{"Table": "test"}]) def duplicate_table_create(dc): query(dc, 'SET @@repo1_head=HASHOF("master");') query_with_expected_error(dc, "Should have failed creating duplicate table", """ CREATE TABLE test ( pk INT NOT NULL, c1 INT, c2 INT, PRIMARY KEY(pk));""") def seed_master(dc): query(dc, 'SET @@repo1_head=HASHOF("master");') _, row_count = query(dc, 'INSERT INTO test VALUES (0,0,0),(1,1,1),(2,2,2)') if row_count != 3: raise Exception("Failed to update rows") commit_and_update_branch(dc, "Seeded initial data", ["@@repo1_head"], "master") expected = [row(0,0,0), row(1,1,1), row(2,2,2)] query_and_test_results(dc, "SELECT pk, c1, c2 FROM test ORDER BY pk", expected) def modify_pk0_on_master_and_commit(dc): query(dc, 'SET @@repo1_head=HASHOF("master");') query(dc, "UPDATE test SET c1=1 WHERE pk=0;") commit_and_update_branch(dc, "set c1 to 1", ["@@repo1_head"], "master") def modify_pk0_on_master_no_commit(dc): query(dc, 'SET @@repo1_head=HASHOF("master");') query(dc, "UPDATE test SET c1=2 WHERE pk=0") def fail_to_commit(dc): try: commit_and_update_branch(dc, "Created tables", ["@@repo1_head"], "master") raise Exception("Failed to fail commit") except Exception as e: if str(e) != UPDATE_BRANCH_FAIL_MSG: raise e def commit_to_feature(dc): create_branch(dc, "feature") commit_and_update_branch(dc, "set c1 to 2", ["@@repo1_head"], "feature") def merge_resolve_commit(dc): query(dc, 'SET @@repo1_head=Merge("master");') query_and_test_results(dc, "SELECT * from dolt_conflicts;", [{"table": "test", "num_conflicts": "1"}]) resolve_theirs(dc) expected = [row(0,1,0), row(1,1,1), row(2,2,2)] query_and_test_results(dc, "SELECT pk, c1, c2 FROM test ORDER BY pk", expected) commit_and_update_branch(dc, "resolved conflicts", ['HASHOF("HEAD^1")', 'HASHOF("HEAD^2")'], "master") # test script MAX_SIMULTANEOUS_CONNECTIONS = 2 PORT_STR = sys.argv[1] CONNECTIONS = [None]*MAX_SIMULTANEOUS_CONNECTIONS for i in range(MAX_SIMULTANEOUS_CONNECTIONS): CONNECTIONS[i] = DoltConnection(port=int(PORT_STR), database="repo1", user='dolt', auto_commit=False) WORK_QUEUE = Queue() # work item run by workers class WorkItem(object): def __init__(self, dc, *work_funcs): self.dc = dc self.work_funcs = work_funcs self.exception = None # worker thread function def worker(): while True: try: item = WORK_QUEUE.get() for work_func in item.work_funcs: work_func(item.dc) WORK_QUEUE.task_done() except Exception as e: work_item.exception = e WORK_QUEUE.task_done() # start the worker threads for i in range(MAX_SIMULTANEOUS_CONNECTIONS): t = Thread(target=worker) t.daemon = True t.start() # This defines the actual test script. Each stage in the script has a list of work items. Each work item # in a stage should have a different connection associated with it. Each connections work is done in parallel # each of the work functions for a connection is executed in order. work_item_stages = [ [WorkItem(CONNECTIONS[0], connect, create_tables)], [WorkItem(CONNECTIONS[0], seed_master), WorkItem(CONNECTIONS[1], connect, duplicate_table_create)], [WorkItem(CONNECTIONS[0], modify_pk0_on_master_and_commit), WorkItem(CONNECTIONS[1], modify_pk0_on_master_no_commit)], [WorkItem(CONNECTIONS[1], fail_to_commit, commit_to_feature, merge_resolve_commit)] ] # Loop through the work item stages executing each stage by sending the work items for the stage to the worker threads # and then waiting for all of them to finish before moving on to the next one. Checks for an error after every stage. for stage, work_items in enumerate(work_item_stages): print("Running stage %d / %d" % (stage,len(work_item_stages))) for work_item in work_items: WORK_QUEUE.put(work_item) WORK_QUEUE.join() for work_item in work_items: if work_item.exception is not None: print_err(work_item.exception) sys.exit(1)
32.839196
140
0.680643
984
6,535
4.303862
0.197154
0.029752
0.01889
0.034711
0.320897
0.274616
0.212987
0.189138
0.150649
0.141677
0
0.016885
0.19342
6,535
198
141
33.005051
0.786568
0.096557
0
0.258993
0
0.028777
0.25594
0.046164
0
0
0
0
0
1
0.136691
false
0.007194
0.035971
0.014388
0.194245
0.028777
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f475a7baedbb00d2706f41a680754762b1e5e2d7
6,599
py
Python
oscar/lib/python2.7/site-packages/prompt_toolkit/utils.py
sainjusajan/django-oscar
466e8edc807be689b0a28c9e525c8323cc48b8e1
[ "BSD-3-Clause" ]
null
null
null
oscar/lib/python2.7/site-packages/prompt_toolkit/utils.py
sainjusajan/django-oscar
466e8edc807be689b0a28c9e525c8323cc48b8e1
[ "BSD-3-Clause" ]
null
null
null
oscar/lib/python2.7/site-packages/prompt_toolkit/utils.py
sainjusajan/django-oscar
466e8edc807be689b0a28c9e525c8323cc48b8e1
[ "BSD-3-Clause" ]
null
null
null
from __future__ import unicode_literals import inspect import os import signal import sys import threading import weakref from wcwidth import wcwidth from six.moves import range __all__ = ( 'Event', 'DummyContext', 'get_cwidth', 'suspend_to_background_supported', 'is_conemu_ansi', 'is_windows', 'in_main_thread', 'take_using_weights', 'test_callable_args', ) class Event(object): """ Simple event to which event handlers can be attached. For instance:: class Cls: def __init__(self): # Define event. The first parameter is the sender. self.event = Event(self) obj = Cls() def handler(sender): pass # Add event handler by using the += operator. obj.event += handler # Fire event. obj.event() """ def __init__(self, sender, handler=None): self.sender = sender self._handlers = [] if handler is not None: self += handler def __call__(self): " Fire event. " for handler in self._handlers: handler(self.sender) def fire(self): " Alias for just calling the event. " self() def __iadd__(self, handler): """ Add another handler to this callback. (Handler should be a callable that takes exactly one parameter: the sender object.) """ # Test handler. assert callable(handler) if not test_callable_args(handler, [None]): raise TypeError("%r doesn't take exactly one argument." % handler) # Add to list of event handlers. self._handlers.append(handler) return self def __isub__(self, handler): """ Remove a handler from this callback. """ self._handlers.remove(handler) return self # Cache of signatures. Improves the performance of `test_callable_args`. _signatures_cache = weakref.WeakKeyDictionary() def test_callable_args(func, args): """ Return True when this function can be called with the given arguments. """ assert isinstance(args, (list, tuple)) signature = getattr(inspect, 'signature', None) if signature is not None: # For Python 3, use inspect.signature. try: sig = _signatures_cache[func] except KeyError: sig = signature(func) _signatures_cache[func] = sig try: sig.bind(*args) except TypeError: return False else: return True else: # For older Python versions, fall back to using getargspec. spec = inspect.getargspec(func) # Drop the 'self' def drop_self(spec): args, varargs, varkw, defaults = spec if args[0:1] == ['self']: args = args[1:] return inspect.ArgSpec(args, varargs, varkw, defaults) spec = drop_self(spec) # When taking *args, always return True. if spec.varargs is not None: return True # Test whether the given amount of args is between the min and max # accepted argument counts. return len(spec.args) - len(spec.defaults or []) <= len(args) <= len(spec.args) class DummyContext(object): """ (contextlib.nested is not available on Py3) """ def __enter__(self): pass def __exit__(self, *a): pass class _CharSizesCache(dict): """ Cache for wcwidth sizes. """ def __missing__(self, string): # Note: We use the `max(0, ...` because some non printable control # characters, like e.g. Ctrl-underscore get a -1 wcwidth value. # It can be possible that these characters end up in the input # text. if len(string) == 1: result = max(0, wcwidth(string)) else: result = sum(max(0, wcwidth(c)) for c in string) # Cache for short strings. # (It's hard to tell what we can consider short...) if len(string) < 256: self[string] = result return result _CHAR_SIZES_CACHE = _CharSizesCache() def get_cwidth(string): """ Return width of a string. Wrapper around ``wcwidth``. """ return _CHAR_SIZES_CACHE[string] def suspend_to_background_supported(): """ Returns `True` when the Python implementation supports suspend-to-background. This is typically `False' on Windows systems. """ return hasattr(signal, 'SIGTSTP') def is_windows(): """ True when we are using Windows. """ return sys.platform.startswith('win') # E.g. 'win32', not 'darwin' or 'linux2' def is_conemu_ansi(): """ True when the ConEmu Windows console is used. """ return is_windows() and os.environ.get('ConEmuANSI', 'OFF') == 'ON' def in_main_thread(): """ True when the current thread is the main thread. """ return threading.current_thread().__class__.__name__ == '_MainThread' def take_using_weights(items, weights): """ Generator that keeps yielding items from the items list, in proportion to their weight. For instance:: # Getting the first 70 items from this generator should have yielded 10 # times A, 20 times B and 40 times C, all distributed equally.. take_using_weights(['A', 'B', 'C'], [5, 10, 20]) :param items: List of items to take from. :param weights: Integers representing the weight. (Numbers have to be integers, not floats.) """ assert isinstance(items, list) assert isinstance(weights, list) assert all(isinstance(i, int) for i in weights) assert len(items) == len(weights) assert len(items) > 0 already_taken = [0 for i in items] item_count = len(items) max_weight = max(weights) i = 0 while True: # Each iteration of this loop, we fill up until by (total_weight/max_weight). adding = True while adding: adding = False for item_i, item, weight in zip(range(item_count), items, weights): if already_taken[item_i] < i * weight / float(max_weight): yield item already_taken[item_i] += 1 adding = True i += 1
27.381743
88
0.575542
772
6,599
4.76943
0.331606
0.010864
0.017382
0.015209
0.015209
0
0
0
0
0
0
0.007738
0.334142
6,599
240
89
27.495833
0.830223
0.340052
0
0.114035
0
0
0.068948
0.008035
0
0
0
0
0.061404
1
0.140351
false
0.017544
0.078947
0
0.359649
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f476ce15c4cf3ddf393197690eec2e823de61189
92,209
py
Python
lmdb/cffi.py
hirnimeshrampuresoftware/py-lmdb
9aa7560f8e1a89b437fb3fed7ea36f5888b7a963
[ "OLDAP-2.8" ]
185
2019-06-18T15:58:49.000Z
2022-03-09T09:42:57.000Z
lmdb/cffi.py
hirnimeshrampuresoftware/py-lmdb
9aa7560f8e1a89b437fb3fed7ea36f5888b7a963
[ "OLDAP-2.8" ]
114
2019-06-15T04:19:04.000Z
2022-03-30T06:34:44.000Z
lmdb/cffi.py
hirnimeshrampuresoftware/py-lmdb
9aa7560f8e1a89b437fb3fed7ea36f5888b7a963
[ "OLDAP-2.8" ]
32
2019-07-03T23:56:58.000Z
2022-02-12T04:46:16.000Z
# # Copyright 2013 The py-lmdb authors, all rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted only as authorized by the OpenLDAP # Public License. # # A copy of this license is available in the file LICENSE in the # top-level directory of the distribution or, alternatively, at # <http://www.OpenLDAP.org/license.html>. # # OpenLDAP is a registered trademark of the OpenLDAP Foundation. # # Individual files and/or contributed packages may be copyright by # other parties and/or subject to additional restrictions. # # This work also contains materials derived from public sources. # # Additional information about OpenLDAP can be obtained at # <http://www.openldap.org/>. # """ CPython/CFFI wrapper for OpenLDAP's "Lightning" MDB database. Please see https://lmdb.readthedocs.io/ """ from __future__ import absolute_import from __future__ import with_statement import errno import inspect import os import sys import threading is_win32 = sys.platform == 'win32' if is_win32: import msvcrt try: import __builtin__ except ImportError: import builtins as __builtin__ # type: ignore import lmdb try: from lmdb import _config except ImportError: _config = None # type: ignore __all__ = [ 'Cursor', 'Environment', 'Transaction', '_Database', 'enable_drop_gil', 'version', ] __all__ += [ 'BadDbiError', 'BadRslotError', 'BadTxnError', 'BadValsizeError', 'CorruptedError', 'CursorFullError', 'DbsFullError', 'DiskError', 'Error', 'IncompatibleError', 'InvalidError', 'InvalidParameterError', 'KeyExistsError', 'LockError', 'MapFullError', 'MapResizedError', 'MemoryError', 'NotFoundError', 'PageFullError', 'PageNotFoundError', 'PanicError', 'ReadersFullError', 'ReadonlyError', 'TlsFullError', 'TxnFullError', 'VersionMismatchError', ] # Handle moronic Python 3 mess. UnicodeType = getattr(__builtin__, 'unicode', str) BytesType = getattr(__builtin__, 'bytes', str) O_0755 = int('0755', 8) O_0111 = int('0111', 8) EMPTY_BYTES = UnicodeType().encode() # Used to track context across CFFI callbacks. _callbacks = threading.local() _CFFI_CDEF = ''' typedef int mode_t; typedef ... MDB_env; typedef struct MDB_txn MDB_txn; typedef struct MDB_cursor MDB_cursor; typedef unsigned int MDB_dbi; enum MDB_cursor_op { MDB_FIRST, MDB_FIRST_DUP, MDB_GET_BOTH, MDB_GET_BOTH_RANGE, MDB_GET_CURRENT, MDB_GET_MULTIPLE, MDB_LAST, MDB_LAST_DUP, MDB_NEXT, MDB_NEXT_DUP, MDB_NEXT_MULTIPLE, MDB_NEXT_NODUP, MDB_PREV, MDB_PREV_DUP, MDB_PREV_NODUP, MDB_SET, MDB_SET_KEY, MDB_SET_RANGE, ... }; typedef enum MDB_cursor_op MDB_cursor_op; struct MDB_val { size_t mv_size; void *mv_data; ...; }; typedef struct MDB_val MDB_val; struct MDB_stat { unsigned int ms_psize; unsigned int ms_depth; size_t ms_branch_pages; size_t ms_leaf_pages; size_t ms_overflow_pages; size_t ms_entries; ...; }; typedef struct MDB_stat MDB_stat; struct MDB_envinfo { void *me_mapaddr; size_t me_mapsize; size_t me_last_pgno; size_t me_last_txnid; unsigned int me_maxreaders; unsigned int me_numreaders; ...; }; typedef struct MDB_envinfo MDB_envinfo; typedef int (*MDB_cmp_func)(const MDB_val *a, const MDB_val *b); typedef void (*MDB_rel_func)(MDB_val *item, void *oldptr, void *newptr, void *relctx); char *mdb_strerror(int err); int mdb_env_create(MDB_env **env); int mdb_env_open(MDB_env *env, const char *path, unsigned int flags, mode_t mode); int mdb_env_copy2(MDB_env *env, const char *path, int flags); int mdb_env_copyfd2(MDB_env *env, int fd, int flags); int mdb_env_stat(MDB_env *env, MDB_stat *stat); int mdb_env_info(MDB_env *env, MDB_envinfo *stat); int mdb_env_get_maxkeysize(MDB_env *env); int mdb_env_sync(MDB_env *env, int force); void mdb_env_close(MDB_env *env); int mdb_env_set_flags(MDB_env *env, unsigned int flags, int onoff); int mdb_env_get_flags(MDB_env *env, unsigned int *flags); int mdb_env_get_path(MDB_env *env, const char **path); int mdb_env_set_mapsize(MDB_env *env, size_t size); int mdb_env_set_maxreaders(MDB_env *env, unsigned int readers); int mdb_env_get_maxreaders(MDB_env *env, unsigned int *readers); int mdb_env_set_maxdbs(MDB_env *env, MDB_dbi dbs); int mdb_txn_begin(MDB_env *env, MDB_txn *parent, unsigned int flags, MDB_txn **txn); int mdb_txn_commit(MDB_txn *txn); void mdb_txn_reset(MDB_txn *txn); int mdb_txn_renew(MDB_txn *txn); void mdb_txn_abort(MDB_txn *txn); size_t mdb_txn_id(MDB_txn *txn); int mdb_dbi_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi); int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *stat); int mdb_drop(MDB_txn *txn, MDB_dbi dbi, int del_); int mdb_get(MDB_txn *txn, MDB_dbi dbi, MDB_val *key, MDB_val *data); int mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **cursor); void mdb_cursor_close(MDB_cursor *cursor); int mdb_cursor_del(MDB_cursor *cursor, unsigned int flags); int mdb_cursor_count(MDB_cursor *cursor, size_t *countp); int mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val*data, int op); typedef int (MDB_msg_func)(const char *msg, void *ctx); int mdb_reader_list(MDB_env *env, MDB_msg_func *func, void *ctx); int mdb_reader_check(MDB_env *env, int *dead); int mdb_dbi_flags(MDB_txn *txn, MDB_dbi dbi, unsigned int *flags); #define MDB_VERSION_MAJOR ... #define MDB_VERSION_MINOR ... #define MDB_VERSION_PATCH ... #define EACCES ... #define EAGAIN ... #define EINVAL ... #define ENOMEM ... #define ENOSPC ... #define MDB_BAD_RSLOT ... #define MDB_BAD_DBI ... #define MDB_BAD_TXN ... #define MDB_BAD_VALSIZE ... #define MDB_CORRUPTED ... #define MDB_CURSOR_FULL ... #define MDB_DBS_FULL ... #define MDB_INCOMPATIBLE ... #define MDB_INVALID ... #define MDB_KEYEXIST ... #define MDB_MAP_FULL ... #define MDB_MAP_RESIZED ... #define MDB_NOTFOUND ... #define MDB_PAGE_FULL ... #define MDB_PAGE_NOTFOUND ... #define MDB_PANIC ... #define MDB_READERS_FULL ... #define MDB_TLS_FULL ... #define MDB_TXN_FULL ... #define MDB_VERSION_MISMATCH ... #define MDB_APPEND ... #define MDB_APPENDDUP ... #define MDB_CP_COMPACT ... #define MDB_CREATE ... #define MDB_DUPFIXED ... #define MDB_DUPSORT ... #define MDB_INTEGERDUP ... #define MDB_INTEGERKEY ... #define MDB_MAPASYNC ... #define MDB_NODUPDATA ... #define MDB_NOLOCK ... #define MDB_NOMEMINIT ... #define MDB_NOMETASYNC ... #define MDB_NOOVERWRITE ... #define MDB_NORDAHEAD ... #define MDB_NOSUBDIR ... #define MDB_NOSYNC ... #define MDB_NOTLS ... #define MDB_RDONLY ... #define MDB_REVERSEKEY ... #define MDB_WRITEMAP ... // Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where // CFFI will use PyString_AS_STRING when passed as an argument. static int pymdb_del(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen, char *val_s, size_t vallen); static int pymdb_put(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen, char *val_s, size_t vallen, unsigned int flags); static int pymdb_get(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen, MDB_val *val_out); static int pymdb_cursor_get(MDB_cursor *cursor, char *key_s, size_t key_len, char *data_s, size_t data_len, MDB_val *key, MDB_val *data, int op); static int pymdb_cursor_put(MDB_cursor *cursor, char *key_s, size_t keylen, char *val_s, size_t vallen, int flags); // Prefaults a range static void preload(int rc, void *x, size_t size); ''' _CFFI_CDEF_PATCHED = ''' int mdb_env_copy3(MDB_env *env, const char *path, unsigned int flags, MDB_txn *txn); int mdb_env_copyfd3(MDB_env *env, int fd, unsigned int flags, MDB_txn *txn); ''' _CFFI_VERIFY = ''' #include <sys/stat.h> #include "lmdb.h" #include "preload.h" // Helpers below inline MDB_vals. Avoids key alloc/dup on CPython, where // CFFI will use PyString_AS_STRING when passed as an argument. static int pymdb_get(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen, MDB_val *val_out) { MDB_val key = {keylen, key_s}; int rc = mdb_get(txn, dbi, &key, val_out); return rc; } static int pymdb_put(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen, char *val_s, size_t vallen, unsigned int flags) { MDB_val key = {keylen, key_s}; MDB_val val = {vallen, val_s}; return mdb_put(txn, dbi, &key, &val, flags); } static int pymdb_del(MDB_txn *txn, MDB_dbi dbi, char *key_s, size_t keylen, char *val_s, size_t vallen) { MDB_val key = {keylen, key_s}; MDB_val val = {vallen, val_s}; MDB_val *valptr; if(vallen == 0) { valptr = NULL; } else { valptr = &val; } return mdb_del(txn, dbi, &key, valptr); } static int pymdb_cursor_get(MDB_cursor *cursor, char *key_s, size_t key_len, char *data_s, size_t data_len, MDB_val *key, MDB_val *data, int op) { MDB_val tmp_key = {key_len, key_s}; MDB_val tmp_data = {data_len, data_s}; int rc = mdb_cursor_get(cursor, &tmp_key, &tmp_data, op); if(! rc) { *key = tmp_key; *data = tmp_data; } return rc; } static int pymdb_cursor_put(MDB_cursor *cursor, char *key_s, size_t keylen, char *val_s, size_t vallen, int flags) { MDB_val tmpkey = {keylen, key_s}; MDB_val tmpval = {vallen, val_s}; return mdb_cursor_put(cursor, &tmpkey, &tmpval, flags); } ''' if not lmdb._reading_docs(): import cffi # Try to use distutils-bundled CFFI configuration to avoid a recompile and # potential compile errors during first module import. _config_vars = _config.CONFIG if _config else { 'extra_compile_args': ['-w'], 'extra_sources': ['lib/mdb.c', 'lib/midl.c'], 'extra_include_dirs': ['lib'], 'extra_library_dirs': [], 'libraries': [] } _have_patched_lmdb = '-DHAVE_PATCHED_LMDB=1' in _config.CONFIG['extra_compile_args'] # type: ignore if _have_patched_lmdb: _CFFI_CDEF += _CFFI_CDEF_PATCHED _ffi = cffi.FFI() _ffi.cdef(_CFFI_CDEF) _lib = _ffi.verify(_CFFI_VERIFY, modulename='lmdb_cffi', ext_package='lmdb', sources=_config_vars['extra_sources'], extra_compile_args=_config_vars['extra_compile_args'], include_dirs=_config_vars['extra_include_dirs'], libraries=_config_vars['libraries'], library_dirs=_config_vars['extra_library_dirs']) @_ffi.callback("int(char *, void *)") def _msg_func(s, _): """mdb_msg_func() callback. Appends `s` to _callbacks.msg_func list. """ _callbacks.msg_func.append(_ffi.string(s).decode()) return 0 class Error(Exception): """Raised when an LMDB-related error occurs, and no more specific :py:class:`lmdb.Error` subclass exists.""" def __init__(self, what, code=0): self.what = what self.code = code self.reason = _ffi.string(_lib.mdb_strerror(code)) msg = what if code: msg = '%s: %s' % (what, self.reason) hint = getattr(self, 'MDB_HINT', None) if hint: msg += ' (%s)' % (hint,) Exception.__init__(self, msg) class KeyExistsError(Error): """Key/data pair already exists.""" MDB_NAME = 'MDB_KEYEXIST' class NotFoundError(Error): """No matching key/data pair found. Normally py-lmdb indicates a missing key by returning ``None``, or a user-supplied default value, however LMDB may return this error where py-lmdb does not know to convert it into a non-exceptional return. """ MDB_NAME = 'MDB_NOTFOUND' class PageNotFoundError(Error): """Request page not found.""" MDB_NAME = 'MDB_PAGE_NOTFOUND' class CorruptedError(Error): """Located page was of the wrong type.""" MDB_NAME = 'MDB_CORRUPTED' class PanicError(Error): """Update of meta page failed.""" MDB_NAME = 'MDB_PANIC' class VersionMismatchError(Error): """Database environment version mismatch.""" MDB_NAME = 'MDB_VERSION_MISMATCH' class InvalidError(Error): """File is not an MDB file.""" MDB_NAME = 'MDB_INVALID' class MapFullError(Error): """Environment map_size= limit reached.""" MDB_NAME = 'MDB_MAP_FULL' MDB_HINT = 'Please use a larger Environment(map_size=) parameter' class DbsFullError(Error): """Environment max_dbs= limit reached.""" MDB_NAME = 'MDB_DBS_FULL' MDB_HINT = 'Please use a larger Environment(max_dbs=) parameter' class ReadersFullError(Error): """Environment max_readers= limit reached.""" MDB_NAME = 'MDB_READERS_FULL' MDB_HINT = 'Please use a larger Environment(max_readers=) parameter' class TlsFullError(Error): """Thread-local storage keys full - too many environments open.""" MDB_NAME = 'MDB_TLS_FULL' class TxnFullError(Error): """Transaciton has too many dirty pages - transaction too big.""" MDB_NAME = 'MDB_TXN_FULL' MDB_HINT = 'Please do less work within your transaction' class CursorFullError(Error): """Internal error - cursor stack limit reached.""" MDB_NAME = 'MDB_CURSOR_FULL' class PageFullError(Error): """Internal error - page has no more space.""" MDB_NAME = 'MDB_PAGE_FULL' class MapResizedError(Error): """Database contents grew beyond environment map_size=.""" MDB_NAME = 'MDB_MAP_RESIZED' class IncompatibleError(Error): """Operation and DB incompatible, or DB flags changed.""" MDB_NAME = 'MDB_INCOMPATIBLE' class BadRslotError(Error): """Invalid reuse of reader locktable slot.""" MDB_NAME = 'MDB_BAD_RSLOT' class BadDbiError(Error): """The specified DBI was changed unexpectedly.""" MDB_NAME = 'MDB_BAD_DBI' class BadTxnError(Error): """Transaction cannot recover - it must be aborted.""" MDB_NAME = 'MDB_BAD_TXN' class BadValsizeError(Error): """Too big key/data, key is empty, or wrong DUPFIXED size.""" MDB_NAME = 'MDB_BAD_VALSIZE' class ReadonlyError(Error): """An attempt was made to modify a read-only database.""" MDB_NAME = 'EACCES' class InvalidParameterError(Error): """An invalid parameter was specified.""" MDB_NAME = 'EINVAL' class LockError(Error): """The environment was locked by another process.""" MDB_NAME = 'EAGAIN' class MemoryError(Error): """Out of memory.""" MDB_NAME = 'ENOMEM' class DiskError(Error): """No more disk space.""" MDB_NAME = 'ENOSPC' # Prepare _error_map, a mapping of integer MDB_ERROR_CODE to exception class. if not lmdb._reading_docs(): _error_map = {} for obj in list(globals().values()): if inspect.isclass(obj) and issubclass(obj, Error) and obj is not Error: _error_map[getattr(_lib, obj.MDB_NAME)] = obj del obj def _error(what, rc): """Lookup and instantiate the correct exception class for the error code `rc`, using :py:class:`Error` if no better class exists.""" return _error_map.get(rc, Error)(what, rc) class Some_LMDB_Resource_That_Was_Deleted_Or_Closed(object): """We need this because CFFI on PyPy treats None as cffi.NULL, instead of throwing an exception it feeds LMDB null pointers. That means simply replacing native handles with None during _invalidate() will cause NULL pointer dereferences. Instead use this class, and its weird name to cause a TypeError, with a very obvious string in the exception text. The only alternatives to this are inserting a check around every single use of a native handle to ensure the handle is still valid prior to calling LMDB, or doing no crash-safety checking at all. """ def __nonzero__(self): return 0 def __bool__(self): return False def __repr__(self): return "<This used to be a LMDB resource but it was deleted or closed>" _invalid = Some_LMDB_Resource_That_Was_Deleted_Or_Closed() def _mvbuf(mv): """Convert a MDB_val cdata to a CFFI buffer object.""" return _ffi.buffer(mv.mv_data, mv.mv_size) def _mvstr(mv): """Convert a MDB_val cdata to Python bytes.""" return _ffi.buffer(mv.mv_data, mv.mv_size)[:] def preload(mv): _lib.preload(0, mv.mv_data, mv.mv_size) def enable_drop_gil(): """Deprecated.""" def version(subpatch=False): """ Return a tuple of integers `(major, minor, patch)` describing the LMDB library version that the binding is linked against. The version of the binding itself is available from ``lmdb.__version__``. `subpatch`: If true, returns a 4 integer tuple consisting of the same plus an extra integer that represents any patches applied by py-lmdb itself (0 representing no patches). """ if subpatch: return (_lib.MDB_VERSION_MAJOR, _lib.MDB_VERSION_MINOR, _lib.MDB_VERSION_PATCH, 1 if _have_patched_lmdb else 0) return (_lib.MDB_VERSION_MAJOR, _lib.MDB_VERSION_MINOR, _lib.MDB_VERSION_PATCH) class Environment(object): """ Structure for a database environment. An environment may contain multiple databases, all residing in the same shared-memory map and underlying disk file. To write to the environment a :py:class:`Transaction` must be created. One simultaneous write transaction is allowed, however there is no limit on the number of read transactions even when a write transaction exists. This class is aliased to `lmdb.open`. It is a serious error to have open the same LMDB file in the same process at the same time. Failure to heed this may lead to data corruption and interpreter crash. Equivalent to `mdb_env_open() <http://lmdb.tech/doc/group__mdb.html#ga1fe2740e25b1689dc412e7b9faadba1b>`_ `path`: Location of directory (if `subdir=True`) or file prefix to store the database. `map_size`: Maximum size database may grow to; used to size the memory mapping. If database grows larger than ``map_size``, an exception will be raised and the user must close and reopen :py:class:`Environment`. On 64-bit there is no penalty for making this huge (say 1TB). Must be <2GB on 32-bit. .. note:: **The default map size is set low to encourage a crash**, so users can figure out a good value before learning about this option too late. `subdir`: If ``True``, `path` refers to a subdirectory to store the data and lock files in, otherwise it refers to a filename prefix. `readonly`: If ``True``, disallow any write operations. Note the lock file is still modified. If specified, the ``write`` flag to :py:meth:`begin` or :py:class:`Transaction` is ignored. `metasync`: If ``False``, flush system buffers to disk only once per transaction, omit the metadata flush. Defer that until the system flushes files to disk, or next commit or :py:meth:`sync`. This optimization maintains database integrity, but a system crash may undo the last committed transaction. I.e. it preserves the ACI (atomicity, consistency, isolation) but not D (durability) database property. `sync`: If ``False``, don't flush system buffers to disk when committing a transaction. This optimization means a system crash can corrupt the database or lose the last transactions if buffers are not yet flushed to disk. The risk is governed by how often the system flushes dirty buffers to disk and how often :py:meth:`sync` is called. However, if the filesystem preserves write order and `writemap=False`, transactions exhibit ACI (atomicity, consistency, isolation) properties and only lose D (durability). I.e. database integrity is maintained, but a system crash may undo the final transactions. Note that `sync=False, writemap=True` leaves the system with no hint for when to write transactions to disk, unless :py:meth:`sync` is called. `map_async=True, writemap=True` may be preferable. `mode`: File creation mode. `create`: If ``False``, do not create the directory `path` if it is missing. `readahead`: If ``False``, LMDB will disable the OS filesystem readahead mechanism, which may improve random read performance when a database is larger than RAM. `writemap`: If ``True``, use a writeable memory map unless `readonly=True`. This is faster and uses fewer mallocs, but loses protection from application bugs like wild pointer writes and other bad updates into the database. Incompatible with nested transactions. Processes with and without `writemap` on the same environment do not cooperate well. `meminit`: If ``False`` LMDB will not zero-initialize buffers prior to writing them to disk. This improves performance but may cause old heap data to be written saved in the unused portion of the buffer. Do not use this option if your application manipulates confidential data (e.g. plaintext passwords) in memory. This option is only meaningful when `writemap=False`; new pages are always zero-initialized when `writemap=True`. `map_async`: When ``writemap=True``, use asynchronous flushes to disk. As with ``sync=False``, a system crash can then corrupt the database or lose the last transactions. Calling :py:meth:`sync` ensures on-disk database integrity until next commit. `max_readers`: Maximum number of simultaneous read transactions. Can only be set by the first process to open an environment, as it affects the size of the lock file and shared memory area. Attempts to simultaneously start more than this many *read* transactions will fail. `max_dbs`: Maximum number of databases available. If 0, assume environment will be used as a single database. `max_spare_txns`: Read-only transactions to cache after becoming unused. Caching transactions avoids two allocations, one lock and linear scan of the shared environment per invocation of :py:meth:`begin`, :py:class:`Transaction`, :py:meth:`get`, :py:meth:`gets`, or :py:meth:`cursor`. Should match the process's maximum expected concurrent transactions (e.g. thread count). `lock`: If ``False``, don't do any locking. If concurrent access is anticipated, the caller must manage all concurrency itself. For proper operation the caller must enforce single-writer semantics, and must ensure that no readers are using old transactions while a writer is active. The simplest approach is to use an exclusive lock so that no readers may be active at all when a writer begins. """ def __init__(self, path, map_size=10485760, subdir=True, readonly=False, metasync=True, sync=True, map_async=False, mode=O_0755, create=True, readahead=True, writemap=False, meminit=True, max_readers=126, max_dbs=0, max_spare_txns=1, lock=True): self._max_spare_txns = max_spare_txns self._spare_txns = [] envpp = _ffi.new('MDB_env **') rc = _lib.mdb_env_create(envpp) if rc: raise _error("mdb_env_create", rc) self._env = envpp[0] self._deps = set() self._creating_db_in_readonly = False self.set_mapsize(map_size) rc = _lib.mdb_env_set_maxreaders(self._env, max_readers) if rc: raise _error("mdb_env_set_maxreaders", rc) rc = _lib.mdb_env_set_maxdbs(self._env, max_dbs) if rc: raise _error("mdb_env_set_maxdbs", rc) if create and subdir and not readonly: try: os.mkdir(path, mode) except EnvironmentError as e: if e.errno != errno.EEXIST: raise flags = _lib.MDB_NOTLS if not subdir: flags |= _lib.MDB_NOSUBDIR if readonly: flags |= _lib.MDB_RDONLY self.readonly = readonly if not metasync: flags |= _lib.MDB_NOMETASYNC if not sync: flags |= _lib.MDB_NOSYNC if map_async: flags |= _lib.MDB_MAPASYNC if not readahead: flags |= _lib.MDB_NORDAHEAD if writemap: flags |= _lib.MDB_WRITEMAP if not meminit: flags |= _lib.MDB_NOMEMINIT if not lock: flags |= _lib.MDB_NOLOCK if isinstance(path, UnicodeType): path = path.encode(sys.getfilesystemencoding()) rc = _lib.mdb_env_open(self._env, path, flags, mode & ~O_0111) if rc: raise _error(path, rc) with self.begin(db=object()) as txn: self._db = _Database( env=self, txn=txn, name=None, reverse_key=False, dupsort=False, create=True, integerkey=False, integerdup=False, dupfixed=False ) self._dbs = {None: self._db} def __enter__(self): return self def __exit__(self, _1, _2, _3): self.close() def __del__(self): self.close() _env = None _deps = None _spare_txns = None _dbs = None def set_mapsize(self, map_size): """Change the maximum size of the map file. This function will fail if any transactions are active in the current process. `map_size`: The new size in bytes. Equivalent to `mdb_env_set_mapsize() <http://lmdb.tech/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5>`_ Warning: There's a data race in the underlying library that may cause catastrophic loss of data if you use this method. You are safe if one of the following are true: * Only one process accessing a particular LMDB file ever calls this method. * You use locking external to this library to ensure that only one process accessing the current LMDB file can be inside this function. """ rc = _lib.mdb_env_set_mapsize(self._env, map_size) if rc: raise _error("mdb_env_set_mapsize", rc) def close(self): """Close the environment, invalidating any open iterators, cursors, and transactions. Repeat calls to :py:meth:`close` have no effect. Equivalent to `mdb_env_close() <http://lmdb.tech/doc/group__mdb.html#ga4366c43ada8874588b6a62fbda2d1e95>`_ """ if self._env: if self._deps: while self._deps: self._deps.pop()._invalidate() self._deps = None if self._spare_txns: while self._spare_txns: _lib.mdb_txn_abort(self._spare_txns.pop()) self._spare_txns = None if self._dbs: self._dbs.clear() self._dbs = None self._db = None _lib.mdb_env_close(self._env) self._env = _invalid def path(self): """Directory path or file name prefix where this environment is stored. Equivalent to `mdb_env_get_path() <http://lmdb.tech/doc/group__mdb.html#gac699fdd8c4f8013577cb933fb6a757fe>`_ """ path = _ffi.new('char **') rc = _lib.mdb_env_get_path(self._env, path) if rc: raise _error("mdb_env_get_path", rc) return _ffi.string(path[0]).decode(sys.getfilesystemencoding()) def copy(self, path, compact=False, txn=None): """Make a consistent copy of the environment in the given destination directory. `compact`: If ``True``, perform compaction while copying: omit free pages and sequentially renumber all pages in output. This option consumes more CPU and runs more slowly than the default, but may produce a smaller output database. `txn`: If provided, the backup will be taken from the database with respect to that transaction, otherwise a temporary read-only transaction will be created. Note: this parameter being non-None is not available if the module was built with LMDB_PURE. Note: this parameter may be set only if compact=True. Equivalent to `mdb_env_copy2() or mdb_env_copy3() <http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_ """ flags = _lib.MDB_CP_COMPACT if compact else 0 if txn and not _have_patched_lmdb: raise TypeError("Non-patched LMDB doesn't support transaction with env.copy") if txn and not flags: raise TypeError("txn argument only compatible with compact=True") encoded = path.encode(sys.getfilesystemencoding()) if _have_patched_lmdb: rc = _lib.mdb_env_copy3(self._env, encoded, flags, txn._txn if txn else _ffi.NULL) if rc: raise _error("mdb_env_copy3", rc) else: rc = _lib.mdb_env_copy2(self._env, encoded, flags) if rc: raise _error("mdb_env_copy2", rc) def copyfd(self, fd, compact=False, txn=None): """Copy a consistent version of the environment to file descriptor `fd`. `compact`: If ``True``, perform compaction while copying: omit free pages and sequentially renumber all pages in output. This option consumes more CPU and runs more slowly than the default, but may produce a smaller output database. `txn`: If provided, the backup will be taken from the database with respect to that transaction, otherwise a temporary read-only transaction will be created. Note: this parameter being non-None is not available if the module was built with LMDB_PURE. Equivalent to `mdb_env_copyfd2() or mdb_env_copyfd3 <http://lmdb.tech/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378>`_ """ if txn and not _have_patched_lmdb: raise TypeError("Non-patched LMDB doesn't support transaction with env.copy") if is_win32: # Convert C library handle to kernel handle. fd = msvcrt.get_osfhandle(fd) flags = _lib.MDB_CP_COMPACT if compact else 0 if txn and not flags: raise TypeError("txn argument only compatible with compact=True") if _have_patched_lmdb: rc = _lib.mdb_env_copyfd3(self._env, fd, flags, txn._txn if txn else _ffi.NULL) if rc: raise _error("mdb_env_copyfd3", rc) else: rc = _lib.mdb_env_copyfd2(self._env, fd, flags) if rc: raise _error("mdb_env_copyfd2", rc) def sync(self, force=False): """Flush the data buffers to disk. Equivalent to `mdb_env_sync() <http://lmdb.tech/doc/group__mdb.html#ga85e61f05aa68b520cc6c3b981dba5037>`_ Data is always written to disk when :py:meth:`Transaction.commit` is called, but the operating system may keep it buffered. MDB always flushes the OS buffers upon commit as well, unless the environment was opened with `sync=False` or `metasync=False`. `force`: If ``True``, force a synchronous flush. Otherwise if the environment was opened with `sync=False` the flushes will be omitted, and with `map_async=True` they will be asynchronous. """ rc = _lib.mdb_env_sync(self._env, force) if rc: raise _error("mdb_env_sync", rc) def _convert_stat(self, st): """Convert a MDB_stat to a dict. """ return { "psize": st.ms_psize, "depth": st.ms_depth, "branch_pages": st.ms_branch_pages, "leaf_pages": st.ms_leaf_pages, "overflow_pages": st.ms_overflow_pages, "entries": st.ms_entries } def stat(self): """stat() Return some environment statistics for the default database as a dict: +--------------------+---------------------------------------+ | ``psize`` | Size of a database page in bytes. | +--------------------+---------------------------------------+ | ``depth`` | Height of the B-tree. | +--------------------+---------------------------------------+ | ``branch_pages`` | Number of internal (non-leaf) pages. | +--------------------+---------------------------------------+ | ``leaf_pages`` | Number of leaf pages. | +--------------------+---------------------------------------+ | ``overflow_pages`` | Number of overflow pages. | +--------------------+---------------------------------------+ | ``entries`` | Number of data items. | +--------------------+---------------------------------------+ Equivalent to `mdb_env_stat() <http://lmdb.tech/doc/group__mdb.html#gaf881dca452050efbd434cd16e4bae255>`_ """ st = _ffi.new('MDB_stat *') rc = _lib.mdb_env_stat(self._env, st) if rc: raise _error("mdb_env_stat", rc) return self._convert_stat(st) def info(self): """Return some nice environment information as a dict: +--------------------+---------------------------------------------+ | ``map_addr`` | Address of database map in RAM. | +--------------------+---------------------------------------------+ | ``map_size`` | Size of database map in RAM. | +--------------------+---------------------------------------------+ | ``last_pgno`` | ID of last used page. | +--------------------+---------------------------------------------+ | ``last_txnid`` | ID of last committed transaction. | +--------------------+---------------------------------------------+ | ``max_readers`` | Number of reader slots allocated in the | | | lock file. Equivalent to the value of | | | `maxreaders=` specified by the first | | | process opening the Environment. | +--------------------+---------------------------------------------+ | ``num_readers`` | Maximum number of reader slots in | | | simultaneous use since the lock file was | | | initialized. | +--------------------+---------------------------------------------+ Equivalent to `mdb_env_info() <http://lmdb.tech/doc/group__mdb.html#ga18769362c7e7d6cf91889a028a5c5947>`_ """ info = _ffi.new('MDB_envinfo *') rc = _lib.mdb_env_info(self._env, info) if rc: raise _error("mdb_env_info", rc) return { "map_addr": int(_ffi.cast('long', info.me_mapaddr)), "map_size": info.me_mapsize, "last_pgno": info.me_last_pgno, "last_txnid": info.me_last_txnid, "max_readers": info.me_maxreaders, "num_readers": info.me_numreaders } def flags(self): """Return a dict describing Environment constructor flags used to instantiate this environment.""" flags_ = _ffi.new('unsigned int[]', 1) rc = _lib.mdb_env_get_flags(self._env, flags_) if rc: raise _error("mdb_env_get_flags", rc) flags = flags_[0] return { 'subdir': not (flags & _lib.MDB_NOSUBDIR), 'readonly': bool(flags & _lib.MDB_RDONLY), 'metasync': not (flags & _lib.MDB_NOMETASYNC), 'sync': not (flags & _lib.MDB_NOSYNC), 'map_async': bool(flags & _lib.MDB_MAPASYNC), 'readahead': not (flags & _lib.MDB_NORDAHEAD), 'writemap': bool(flags & _lib.MDB_WRITEMAP), 'meminit': not (flags & _lib.MDB_NOMEMINIT), 'lock': not (flags & _lib.MDB_NOLOCK), } def max_key_size(self): """Return the maximum size in bytes of a record's key part. This matches the ``MDB_MAXKEYSIZE`` constant set at compile time.""" return _lib.mdb_env_get_maxkeysize(self._env) def max_readers(self): """Return the maximum number of readers specified during open of the environment by the first process. This is the same as `max_readers=` specified to the constructor if this process was the first to open the environment.""" readers_ = _ffi.new('unsigned int[]', 1) rc = _lib.mdb_env_get_maxreaders(self._env, readers_) if rc: raise _error("mdb_env_get_maxreaders", rc) return readers_[0] def readers(self): """Return a multi line Unicode string describing the current state of the reader lock table.""" _callbacks.msg_func = [] try: rc = _lib.mdb_reader_list(self._env, _msg_func, _ffi.NULL) if rc: raise _error("mdb_reader_list", rc) return UnicodeType().join(_callbacks.msg_func) finally: del _callbacks.msg_func def reader_check(self): """Search the reader lock table for stale entries, for example due to a crashed process. Returns the number of stale entries that were cleared. """ reaped = _ffi.new('int[]', 1) rc = _lib.mdb_reader_check(self._env, reaped) if rc: raise _error('mdb_reader_check', rc) return reaped[0] def open_db(self, key=None, txn=None, reverse_key=False, dupsort=False, create=True, integerkey=False, integerdup=False, dupfixed=False): """ Open a database, returning an instance of :py:class:`_Database`. Repeat :py:meth:`Environment.open_db` calls for the same name will return the same handle. As a special case, the main database is always open. Equivalent to `mdb_dbi_open() <http://lmdb.tech/doc/group__mdb.html#gac08cad5b096925642ca359a6d6f0562a>`_ Named databases are implemented by *storing a special descriptor in the main database*. All databases in an environment *share the same file*. Because the descriptor is present in the main database, attempts to create a named database will fail if a key matching the database's name already exists. Furthermore *the key is visible to lookups and enumerations*. If your main database keyspace conflicts with the names you use for named databases, then move the contents of your main database to another named database. :: >>> env = lmdb.open('/tmp/test', max_dbs=2) >>> with env.begin(write=True) as txn ... txn.put('somename', 'somedata') >>> # Error: database cannot share name of existing key! >>> subdb = env.open_db('somename') A newly created database will not exist if the transaction that created it aborted, nor if another process deleted it. The handle resides in the shared environment, it is not owned by the current transaction or process. Only one thread should call this function; it is not mutex-protected in a read-only transaction. The `dupsort`, `integerkey`, `integerdup`, and `dupfixed` parameters are ignored if the database already exists. The state of those settings are persistent and immutable per database. See :py:meth:`_Database.flags` to view the state of those options for an opened database. A consequence of the immutability of these flags is that the default non-named database will never have these flags set. Preexisting transactions, other than the current transaction and any parents, must not use the new handle, nor must their children. `key`: Bytestring database name. If ``None``, indicates the main database should be returned, otherwise indicates a named database should be created inside the main database. In other words, *a key representing the database will be visible in the main database, and the database name cannot conflict with any existing key.* `txn`: Transaction used to create the database if it does not exist. If unspecified, a temporarily write transaction is used. Do not call :py:meth:`open_db` from inside an existing transaction without supplying it here. Note the passed transaction must have `write=True`. `reverse_key`: If ``True``, keys are compared from right to left (e.g. DNS names). `dupsort`: Duplicate keys may be used in the database. (Or, from another perspective, keys may have multiple data items, stored in sorted order.) By default keys must be unique and may have only a single data item. `create`: If ``True``, create the database if it doesn't exist, otherwise raise an exception. `integerkey`: If ``True``, indicates keys in the database are C unsigned or ``size_t`` integers encoded in native byte order. Keys must all be either unsigned or ``size_t``, they cannot be mixed in a single database. `integerdup`: If ``True``, values in the database are C unsigned or ``size_t`` integers encode din native byte order. Implies `dupsort` and `dupfixed` are ``True``. `dupfixed`: If ``True``, values for each key in database are of fixed size, allowing each additional duplicate value for a key to be stored without a header indicating its size. Implies `dupsort` is ``True``. """ if isinstance(key, UnicodeType): raise TypeError('key must be bytes') if key is None and (reverse_key or dupsort or integerkey or integerdup or dupfixed): raise ValueError('May not set flags on the main database') db = self._dbs.get(key) if db: return db if integerdup: dupfixed = True if dupfixed: dupsort = True if txn: db = _Database(self, txn, key, reverse_key, dupsort, create, integerkey, integerdup, dupfixed) else: try: self._creating_db_in_readonly = True with self.begin(write=not self.readonly) as txn: db = _Database(self, txn, key, reverse_key, dupsort, create, integerkey, integerdup, dupfixed) finally: self._creating_db_in_readonly = False self._dbs[key] = db return db def begin(self, db=None, parent=None, write=False, buffers=False): """Shortcut for :py:class:`lmdb.Transaction`""" return Transaction(self, db, parent, write, buffers) class _Database(object): """ Internal database handle. This class is opaque, save a single method. Should not be constructed directly. Use :py:meth:`Environment.open_db` instead. """ def __init__(self, env, txn, name, reverse_key, dupsort, create, integerkey, integerdup, dupfixed): env._deps.add(self) self._deps = set() self._name = name flags = 0 if reverse_key: flags |= _lib.MDB_REVERSEKEY if dupsort: flags |= _lib.MDB_DUPSORT if create: flags |= _lib.MDB_CREATE if integerkey: flags |= _lib.MDB_INTEGERKEY if integerdup: flags |= _lib.MDB_INTEGERDUP if dupfixed: flags |= _lib.MDB_DUPFIXED dbipp = _ffi.new('MDB_dbi *') self._dbi = None rc = _lib.mdb_dbi_open(txn._txn, name or _ffi.NULL, flags, dbipp) if rc: raise _error("mdb_dbi_open", rc) self._dbi = dbipp[0] self._load_flags(txn) def _load_flags(self, txn): """Load MDB's notion of the database flags.""" flags_ = _ffi.new('unsigned int[]', 1) rc = _lib.mdb_dbi_flags(txn._txn, self._dbi, flags_) if rc: raise _error("mdb_dbi_flags", rc) self._flags = flags_[0] def flags(self, *args): """Return the database's associated flags as a dict of _Database constructor kwargs.""" if len(args) > 1: raise TypeError('flags takes 0 or 1 arguments') return { 'reverse_key': bool(self._flags & _lib.MDB_REVERSEKEY), 'dupsort': bool(self._flags & _lib.MDB_DUPSORT), 'integerkey': bool(self._flags & _lib.MDB_INTEGERKEY), 'integerdup': bool(self._flags & _lib.MDB_INTEGERDUP), 'dupfixed': bool(self._flags & _lib.MDB_DUPFIXED), } def _invalidate(self): self._dbi = _invalid open = Environment class Transaction(object): """ A transaction object. All operations require a transaction handle, transactions may be read-only or read-write. Write transactions may not span threads. Transaction objects implement the context manager protocol, so that reliable release of the transaction happens even in the face of unhandled exceptions: .. code-block:: python # Transaction aborts correctly: with env.begin(write=True) as txn: crash() # Transaction commits automatically: with env.begin(write=True) as txn: txn.put('a', 'b') Equivalent to `mdb_txn_begin() <http://lmdb.tech/doc/group__mdb.html#gad7ea55da06b77513609efebd44b26920>`_ `env`: Environment the transaction should be on. `db`: Default named database to operate on. If unspecified, defaults to the environment's main database. Can be overridden on a per-call basis below. `parent`: ``None``, or a parent transaction (see lmdb.h). `write`: Transactions are read-only by default. To modify the database, you must pass `write=True`. This flag is ignored if :py:class:`Environment` was opened with ``readonly=True``. `buffers`: If ``True``, indicates :py:func:`buffer` objects should be yielded instead of bytestrings. This setting applies to the :py:class:`Transaction` instance itself and any :py:class:`Cursors <Cursor>` created within the transaction. This feature significantly improves performance, since MDB has a zero-copy design, but it requires care when manipulating the returned buffer objects. The benefit of this facility is diminished when using small keys and values. """ # If constructor fails, then __del__ will attempt to access these # attributes. _env = _invalid _txn = _invalid _parent = None _write = False # Mutations occurred since transaction start. Required to know when Cursor # key/value must be refreshed. _mutations = 0 def __init__(self, env, db=None, parent=None, write=False, buffers=False): env._deps.add(self) self.env = env # hold ref self._db = db or env._db self._env = env._env self._key = _ffi.new('MDB_val *') self._val = _ffi.new('MDB_val *') self._to_py = _mvbuf if buffers else _mvstr self._deps = set() if parent: self._parent = parent parent_txn = parent._txn parent._deps.add(self) else: parent_txn = _ffi.NULL if write: if env.readonly: msg = 'Cannot start write transaction with read-only env' raise _error(msg, _lib.EACCES) txnpp = _ffi.new('MDB_txn **') rc = _lib.mdb_txn_begin(self._env, parent_txn, 0, txnpp) if rc: raise _error("mdb_txn_begin", rc) self._txn = txnpp[0] self._write = True else: try: # Exception catch in order to avoid racy 'if txns:' test if env._creating_db_in_readonly: # Don't use spare txns for creating a DB when read-only raise IndexError self._txn = env._spare_txns.pop() env._max_spare_txns += 1 rc = _lib.mdb_txn_renew(self._txn) if rc: while self._deps: self._deps.pop()._invalidate() _lib.mdb_txn_abort(self._txn) self._txn = _invalid self._invalidate() raise _error("mdb_txn_renew", rc) except IndexError: txnpp = _ffi.new('MDB_txn **') flags = _lib.MDB_RDONLY rc = _lib.mdb_txn_begin(self._env, parent_txn, flags, txnpp) if rc: raise _error("mdb_txn_begin", rc) self._txn = txnpp[0] def _invalidate(self): if self._txn: self.abort() self.env._deps.discard(self) self._parent = None self._env = _invalid def __del__(self): self.abort() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if exc_type: self.abort() else: self.commit() def id(self): """id() Return the transaction's ID. This returns the identifier associated with this transaction. For a read-only transaction, this corresponds to the snapshot being read; concurrent readers will frequently have the same transaction ID. """ return _lib.mdb_txn_id(self._txn) def stat(self, db): """stat(db) Return statistics like :py:meth:`Environment.stat`, except for a single DBI. `db` must be a database handle returned by :py:meth:`open_db`. """ st = _ffi.new('MDB_stat *') rc = _lib.mdb_stat(self._txn, db._dbi, st) if rc: raise _error('mdb_stat', rc) return self.env._convert_stat(st) def drop(self, db, delete=True): """Delete all keys in a named database and optionally delete the named database itself. Deleting the named database causes it to become unavailable, and invalidates existing cursors. Equivalent to `mdb_drop() <http://lmdb.tech/doc/group__mdb.html#gab966fab3840fc54a6571dfb32b00f2db>`_ """ while db._deps: db._deps.pop()._invalidate() rc = _lib.mdb_drop(self._txn, db._dbi, delete) self._mutations += 1 if rc: raise _error("mdb_drop", rc) if db._name in self.env._dbs: del self.env._dbs[db._name] def _cache_spare(self): # In order to avoid taking and maintaining a lock, a race is allowed # below which may result in more spare txns than desired. It seems # unlikely the race could ever result in a large amount of spare txns, # and in any case a correctly configured program should not be opening # more read-only transactions than there are configured spares. if self.env._max_spare_txns > 0: _lib.mdb_txn_reset(self._txn) self.env._spare_txns.append(self._txn) self.env._max_spare_txns -= 1 self._txn = _invalid self._invalidate() return True return False def commit(self): """Commit the pending transaction. Equivalent to `mdb_txn_commit() <http://lmdb.tech/doc/group__mdb.html#ga846fbd6f46105617ac9f4d76476f6597>`_ """ while self._deps: self._deps.pop()._invalidate() if self._write or not self._cache_spare(): rc = _lib.mdb_txn_commit(self._txn) self._txn = _invalid if rc: raise _error("mdb_txn_commit", rc) self._invalidate() def abort(self): """Abort the pending transaction. Repeat calls to :py:meth:`abort` have no effect after a previously successful :py:meth:`commit` or :py:meth:`abort`, or after the associated :py:class:`Environment` has been closed. Equivalent to `mdb_txn_abort() <http://lmdb.tech/doc/group__mdb.html#ga73a5938ae4c3239ee11efa07eb22b882>`_ """ if self._txn: while self._deps: self._deps.pop()._invalidate() if self._write or not self._cache_spare(): rc = _lib.mdb_txn_abort(self._txn) self._txn = _invalid if rc: raise _error("mdb_txn_abort", rc) self._invalidate() def get(self, key, default=None, db=None): """Fetch the first value matching `key`, returning `default` if `key` does not exist. A cursor must be used to fetch all values for a key in a `dupsort=True` database. Equivalent to `mdb_get() <http://lmdb.tech/doc/group__mdb.html#ga8bf10cd91d3f3a83a34d04ce6b07992d>`_ """ rc = _lib.pymdb_get(self._txn, (db or self._db)._dbi, key, len(key), self._val) if rc: if rc == _lib.MDB_NOTFOUND: return default raise _error("mdb_cursor_get", rc) preload(self._val) return self._to_py(self._val) def put(self, key, value, dupdata=True, overwrite=True, append=False, db=None): """Store a record, returning ``True`` if it was written, or ``False`` to indicate the key was already present and `overwrite=False`. On success, the cursor is positioned on the new record. Equivalent to `mdb_put() <http://lmdb.tech/doc/group__mdb.html#ga4fa8573d9236d54687c61827ebf8cac0>`_ `key`: Bytestring key to store. `value`: Bytestring value to store. `dupdata`: If ``False`` and database was opened with `dupsort=True`, will return ``False`` if the key already has that value. In other words, this only affects the return value. `overwrite`: If ``False``, do not overwrite any existing matching key. If False and writing to a dupsort=True database, this will not add a value to the key and this function will return ``False``. `append`: If ``True``, append the pair to the end of the database without comparing its order first. Appending a key that is not greater than the highest existing key will fail and return ``False``. `db`: Named database to operate on. If unspecified, defaults to the database given to the :py:class:`Transaction` constructor. """ flags = 0 if not dupdata: flags |= _lib.MDB_NODUPDATA if not overwrite: flags |= _lib.MDB_NOOVERWRITE if append: flags |= _lib.MDB_APPEND rc = _lib.pymdb_put(self._txn, (db or self._db)._dbi, key, len(key), value, len(value), flags) self._mutations += 1 if rc: if rc == _lib.MDB_KEYEXIST: return False raise _error("mdb_put", rc) return True def replace(self, key, value, db=None): """Use a temporary cursor to invoke :py:meth:`Cursor.replace`. `db`: Named database to operate on. If unspecified, defaults to the database given to the :py:class:`Transaction` constructor. """ with Cursor(db or self._db, self) as curs: return curs.replace(key, value) def pop(self, key, db=None): """Use a temporary cursor to invoke :py:meth:`Cursor.pop`. `db`: Named database to operate on. If unspecified, defaults to the database given to the :py:class:`Transaction` constructor. """ with Cursor(db or self._db, self) as curs: return curs.pop(key) def delete(self, key, value=EMPTY_BYTES, db=None): """Delete a key from the database. Equivalent to `mdb_del() <http://lmdb.tech/doc/group__mdb.html#gab8182f9360ea69ac0afd4a4eaab1ddb0>`_ `key`: The key to delete. value: If the database was opened with dupsort=True and value is not the empty bytestring, then delete elements matching only this `(key, value)` pair, otherwise all values for key are deleted. Returns True if at least one key was deleted. """ if value is None: # for bug-compatibility with cpython impl value = EMPTY_BYTES rc = _lib.pymdb_del(self._txn, (db or self._db)._dbi, key, len(key), value, len(value)) self._mutations += 1 if rc: if rc == _lib.MDB_NOTFOUND: return False raise _error("mdb_del", rc) return True def cursor(self, db=None): """Shortcut for ``lmdb.Cursor(db, self)``""" return Cursor(db or self._db, self) class Cursor(object): """ Structure for navigating a database. Equivalent to `mdb_cursor_open() <http://lmdb.tech/doc/group__mdb.html#ga9ff5d7bd42557fd5ee235dc1d62613aa>`_ `db`: :py:class:`_Database` to navigate. `txn`: :py:class:`Transaction` to navigate. As a convenience, :py:meth:`Transaction.cursor` can be used to quickly return a cursor: :: >>> env = lmdb.open('/tmp/foo') >>> child_db = env.open_db('child_db') >>> with env.begin() as txn: ... cursor = txn.cursor() # Cursor on main database. ... cursor2 = txn.cursor(child_db) # Cursor on child database. Cursors start in an unpositioned state. If :py:meth:`iternext` or :py:meth:`iterprev` are used in this state, iteration proceeds from the start or end respectively. Iterators directly position using the cursor, meaning strange behavior results when multiple iterators exist on the same cursor. .. note:: From the perspective of the Python binding, cursors return to an 'unpositioned' state once any scanning or seeking method (e.g. :py:meth:`next`, :py:meth:`prev_nodup`, :py:meth:`set_range`) returns ``False`` or raises an exception. This is primarily to ensure safe, consistent semantics in the face of any error condition. When the Cursor returns to an unpositioned state, its :py:meth:`key` and :py:meth:`value` return empty strings to indicate there is no active position, although internally the LMDB cursor may still have a valid position. This may lead to slightly surprising behaviour when iterating the values for a `dupsort=True` database's keys, since methods such as :py:meth:`iternext_dup` will cause Cursor to appear unpositioned, despite it returning ``False`` only to indicate there are no more values for the current key. In that case, simply calling :py:meth:`next` would cause iteration to resume at the next available key. This behaviour may change in future. Iterator methods such as :py:meth:`iternext` and :py:meth:`iterprev` accept `keys` and `values` arguments. If both are ``True``, then the value of :py:meth:`item` is yielded on each iteration. If only `keys` is ``True``, :py:meth:`key` is yielded, otherwise only :py:meth:`value` is yielded. Prior to iteration, a cursor can be positioned anywhere in the database: :: >>> with env.begin() as txn: ... cursor = txn.cursor() ... if not cursor.set_range('5'): # Position at first key >= '5'. ... print('Not found!') ... else: ... for key, value in cursor: # Iterate from first key >= '5'. ... print((key, value)) Iteration is not required to navigate, and sometimes results in ugly or inefficient code. In cases where the iteration order is not obvious, or is related to the data being read, use of :py:meth:`set_key`, :py:meth:`set_range`, :py:meth:`key`, :py:meth:`value`, and :py:meth:`item` may be preferable: :: >>> # Record the path from a child to the root of a tree. >>> path = ['child14123'] >>> while path[-1] != 'root': ... assert cursor.set_key(path[-1]), \\ ... 'Tree is broken! Path: %s' % (path,) ... path.append(cursor.value()) """ def __init__(self, db, txn): db._deps.add(self) txn._deps.add(self) self.db = db # hold ref self.txn = txn # hold ref self._dbi = db._dbi self._txn = txn._txn self._key = _ffi.new('MDB_val *') self._val = _ffi.new('MDB_val *') self._valid = False self._to_py = txn._to_py curpp = _ffi.new('MDB_cursor **') self._cur = None rc = _lib.mdb_cursor_open(self._txn, self._dbi, curpp) if rc: raise _error("mdb_cursor_open", rc) self._cur = curpp[0] # If Transaction.mutations!=last_mutation, must MDB_GET_CURRENT to # refresh `key' and `val'. self._last_mutation = txn._mutations def _invalidate(self): if self._cur: _lib.mdb_cursor_close(self._cur) self.db._deps.discard(self) self.txn._deps.discard(self) self._cur = _invalid self._dbi = _invalid self._txn = _invalid def __del__(self): self._invalidate() def close(self): """Close the cursor, freeing its associated resources.""" self._invalidate() def __enter__(self): return self def __exit__(self, _1, _2, _3): self._invalidate() def key(self): """Return the current key.""" # Must refresh `key` and `val` following mutation. if self._last_mutation != self.txn._mutations: self._cursor_get(_lib.MDB_GET_CURRENT) return self._to_py(self._key) def value(self): """Return the current value.""" # Must refresh `key` and `val` following mutation. if self._last_mutation != self.txn._mutations: self._cursor_get(_lib.MDB_GET_CURRENT) preload(self._val) return self._to_py(self._val) def item(self): """Return the current `(key, value)` pair.""" # Must refresh `key` and `val` following mutation. if self._last_mutation != self.txn._mutations: self._cursor_get(_lib.MDB_GET_CURRENT) preload(self._val) return self._to_py(self._key), self._to_py(self._val) def _iter(self, op, keys, values): if not values: get = self.key elif not keys: get = self.value else: get = self.item cur = self._cur key = self._key val = self._val rc = 0 while self._valid: yield get() rc = _lib.mdb_cursor_get(cur, key, val, op) self._valid = not rc if rc: self._key.mv_size = 0 self._val.mv_size = 0 if rc != _lib.MDB_NOTFOUND: raise _error("mdb_cursor_get", rc) def iternext(self, keys=True, values=True): """Return a forward iterator that yields the current element before calling :py:meth:`next`, repeating until the end of the database is reached. As a convenience, :py:class:`Cursor` implements the iterator protocol by automatically returning a forward iterator when invoked: :: >>> # Equivalent: >>> it = iter(cursor) >>> it = cursor.iternext(keys=True, values=True) If the cursor is not yet positioned, it is moved to the first key in the database, otherwise iteration proceeds from the current position. """ if not self._valid: self.first() return self._iter(_lib.MDB_NEXT, keys, values) __iter__ = iternext def iternext_dup(self, keys=False, values=True): """Return a forward iterator that yields the current value ("duplicate") of the current key before calling :py:meth:`next_dup`, repeating until the last value of the current key is reached. Only meaningful for databases opened with `dupsort=True`. .. code-block:: python if not cursor.set_key("foo"): print("No values found for 'foo'") else: for idx, data in enumerate(cursor.iternext_dup()): print("%d'th value for 'foo': %s" % (idx, data)) """ return self._iter(_lib.MDB_NEXT_DUP, keys, values) def iternext_nodup(self, keys=True, values=False): """Return a forward iterator that yields the current value ("duplicate") of the current key before calling :py:meth:`next_nodup`, repeating until the end of the database is reached. Only meaningful for databases opened with `dupsort=True`. If the cursor is not yet positioned, it is moved to the first key in the database, otherwise iteration proceeds from the current position. .. code-block:: python for key in cursor.iternext_nodup(): print("Key '%s' has %d values" % (key, cursor.count())) """ if not self._valid: self.first() return self._iter(_lib.MDB_NEXT_NODUP, keys, values) def iterprev(self, keys=True, values=True): """Return a reverse iterator that yields the current element before calling :py:meth:`prev`, until the start of the database is reached. If the cursor is not yet positioned, it is moved to the last key in the database, otherwise iteration proceeds from the current position. :: >>> with env.begin() as txn: ... for i, (key, value) in enumerate(txn.cursor().iterprev()): ... print('%dth last item is (%r, %r)' % (1+i, key, value)) """ if not self._valid: self.last() return self._iter(_lib.MDB_PREV, keys, values) def iterprev_dup(self, keys=False, values=True): """Return a reverse iterator that yields the current value ("duplicate") of the current key before calling :py:meth:`prev_dup`, repeating until the first value of the current key is reached. Only meaningful for databases opened with `dupsort=True`. """ return self._iter(_lib.MDB_PREV_DUP, keys, values) def iterprev_nodup(self, keys=True, values=False): """Return a reverse iterator that yields the current value ("duplicate") of the current key before calling :py:meth:`prev_nodup`, repeating until the start of the database is reached. If the cursor is not yet positioned, it is moved to the last key in the database, otherwise iteration proceeds from the current position. Only meaningful for databases opened with `dupsort=True`. """ if not self._valid: self.last() return self._iter(_lib.MDB_PREV_NODUP, keys, values) def _cursor_get(self, op): rc = _lib.mdb_cursor_get(self._cur, self._key, self._val, op) self._valid = v = not rc self._last_mutation = self.txn._mutations if rc: self._key.mv_size = 0 self._val.mv_size = 0 if rc != _lib.MDB_NOTFOUND: if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT): raise _error("mdb_cursor_get", rc) return v def _cursor_get_kv(self, op, k, v): rc = _lib.pymdb_cursor_get(self._cur, k, len(k), v, len(v), self._key, self._val, op) self._valid = v = not rc if rc: self._key.mv_size = 0 self._val.mv_size = 0 if rc != _lib.MDB_NOTFOUND: if not (rc == _lib.EINVAL and op == _lib.MDB_GET_CURRENT): raise _error("mdb_cursor_get", rc) return v def first(self): """Move to the first key in the database, returning ``True`` on success or ``False`` if the database is empty. If the database was opened with `dupsort=True` and the key contains duplicates, the cursor is positioned on the first value ("duplicate"). Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_FIRST <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ return self._cursor_get(_lib.MDB_FIRST) def first_dup(self): """Move to the first value ("duplicate") for the current key, returning ``True`` on success or ``False`` if the database is empty. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_FIRST_DUP <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ return self._cursor_get(_lib.MDB_FIRST_DUP) def last(self): """Move to the last key in the database, returning ``True`` on success or ``False`` if the database is empty. If the database was opened with `dupsort=True` and the key contains duplicates, the cursor is positioned on the last value ("duplicate"). Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_LAST <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ return self._cursor_get(_lib.MDB_LAST) def last_dup(self): """Move to the last value ("duplicate") for the current key, returning ``True`` on success or ``False`` if the database is empty. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_LAST_DUP <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ return self._cursor_get(_lib.MDB_LAST_DUP) def prev(self): """Move to the previous element, returning ``True`` on success or ``False`` if there is no previous item. For databases opened with `dupsort=True`, moves to the previous data item ("duplicate") for the current key if one exists, otherwise moves to the previous key. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_PREV <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ return self._cursor_get(_lib.MDB_PREV) def prev_dup(self): """Move to the previous value ("duplicate") of the current key, returning ``True`` on success or ``False`` if there is no previous value. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_PREV_DUP <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ return self._cursor_get(_lib.MDB_PREV_DUP) def prev_nodup(self): """Move to the last value ("duplicate") of the previous key, returning ``True`` on success or ``False`` if there is no previous key. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_PREV_NODUP <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ return self._cursor_get(_lib.MDB_PREV_NODUP) def next(self): """Move to the next element, returning ``True`` on success or ``False`` if there is no next element. For databases opened with `dupsort=True`, moves to the next value ("duplicate") for the current key if one exists, otherwise moves to the first value of the next key. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_NEXT <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ return self._cursor_get(_lib.MDB_NEXT) def next_dup(self): """Move to the next value ("duplicate") of the current key, returning ``True`` on success or ``False`` if there is no next value. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_NEXT_DUP <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ return self._cursor_get(_lib.MDB_NEXT_DUP) def next_nodup(self): """Move to the first value ("duplicate") of the next key, returning ``True`` on success or ``False`` if there is no next key. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_NEXT_NODUP <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ return self._cursor_get(_lib.MDB_NEXT_NODUP) def set_key(self, key): """Seek exactly to `key`, returning ``True`` on success or ``False`` if the exact key was not found. It is an error to :py:meth:`set_key` the empty bytestring. For databases opened with `dupsort=True`, moves to the first value ("duplicate") for the key. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_SET_KEY <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ return self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES) def set_key_dup(self, key, value): """Seek exactly to `(key, value)`, returning ``True`` on success or ``False`` if the exact key and value was not found. It is an error to :py:meth:`set_key` the empty bytestring. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_GET_BOTH <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ return self._cursor_get_kv(_lib.MDB_GET_BOTH, key, value) def get(self, key, default=None): """Equivalent to :py:meth:`set_key()`, except :py:meth:`value` is returned when `key` is found, otherwise `default`. """ if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES): return self.value() return default def getmulti(self, keys, dupdata=False, dupfixed_bytes=None, keyfixed=False): """Returns an iterable of `(key, value)` 2-tuples containing results for each key in the iterable `keys`. `keys`: Iterable to read keys from. `dupdata`: If ``True`` and database was opened with `dupsort=True`, read all duplicate values for each matching key. `dupfixed_bytes`: If database was opened with `dupsort=True` and `dupfixed=True`, accepts the size of each value, in bytes, and applies an optimization reducing the number of database lookups. `keyfixed`: If `dupfixed_bytes` is set and database key size is fixed, setting keyfixed=True will result in this function returning a memoryview to the results as a structured array of bytes. The structured array can be instantiated by passing the memoryview buffer to NumPy: .. code-block:: python key_bytes, val_bytes = 4, 8 dtype = np.dtype([(f'S{key_bytes}', f'S{val_bytes}}')]) arr = np.frombuffer( cur.getmulti(keys, dupdata=True, dupfixed_bytes=val_bytes, keyfixed=True) ) """ if dupfixed_bytes and dupfixed_bytes < 0: raise _error("dupfixed_bytes must be a positive integer.") elif (dupfixed_bytes or keyfixed) and not dupdata: raise _error("dupdata is required for dupfixed_bytes/key_bytes.") elif keyfixed and not dupfixed_bytes: raise _error("dupfixed_bytes is required for key_bytes.") if dupfixed_bytes: get_op = _lib.MDB_GET_MULTIPLE next_op = _lib.MDB_NEXT_MULTIPLE else: get_op = _lib.MDB_GET_CURRENT next_op = _lib.MDB_NEXT_DUP a = bytearray() lst = list() for key in keys: if self.set_key(key): while self._valid: self._cursor_get(get_op) preload(self._val) key = self._to_py(self._key) val = self._to_py(self._val) if dupfixed_bytes: gen = ( (key, val[i:i + dupfixed_bytes]) for i in range(0, len(val), dupfixed_bytes)) if keyfixed: for k, v in gen: a.extend(k + v) else: for k, v in gen: lst.append((k, v)) else: lst.append((key, val)) if dupdata: self._cursor_get(next_op) else: break if keyfixed: return memoryview(a) else: return lst def set_range(self, key): """Seek to the first key greater than or equal to `key`, returning ``True`` on success, or ``False`` to indicate key was past end of database. Behaves like :py:meth:`first` if `key` is the empty bytestring. For databases opened with `dupsort=True`, moves to the first value ("duplicate") for the key. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_SET_RANGE <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ if not key: return self.first() return self._cursor_get_kv(_lib.MDB_SET_RANGE, key, EMPTY_BYTES) def set_range_dup(self, key, value): """Seek to the first key/value pair greater than or equal to `key`, returning ``True`` on success, or ``False`` to indicate that `value` was past the last value of `key` or that `(key, value)` was past the end end of database. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_get() <http://lmdb.tech/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0>`_ with `MDB_GET_BOTH_RANGE <http://lmdb.tech/doc/group__mdb.html#ga1206b2af8b95e7f6b0ef6b28708c9127>`_ """ rc = self._cursor_get_kv(_lib.MDB_GET_BOTH_RANGE, key, value) # issue #126: MDB_GET_BOTH_RANGE does not satisfy its documentation, # and fails to update `key` and `value` on success. Therefore # explicitly call MDB_GET_CURRENT after MDB_GET_BOTH_RANGE. self._cursor_get(_lib.MDB_GET_CURRENT) return rc def delete(self, dupdata=False): """Delete the current element and move to the next, returning ``True`` on success or ``False`` if the database was empty. If `dupdata` is ``True``, delete all values ("duplicates") for the current key, otherwise delete only the currently positioned value. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_del() <http://lmdb.tech/doc/group__mdb.html#ga26a52d3efcfd72e5bf6bd6960bf75f95>`_ """ v = self._valid if v: flags = _lib.MDB_NODUPDATA if dupdata else 0 rc = _lib.mdb_cursor_del(self._cur, flags) self.txn._mutations += 1 if rc: raise _error("mdb_cursor_del", rc) self._cursor_get(_lib.MDB_GET_CURRENT) v = rc == 0 return v def count(self): """Return the number of values ("duplicates") for the current key. Only meaningful for databases opened with `dupsort=True`. Equivalent to `mdb_cursor_count() <http://lmdb.tech/doc/group__mdb.html#ga4041fd1e1862c6b7d5f10590b86ffbe2>`_ """ countp = _ffi.new('size_t *') rc = _lib.mdb_cursor_count(self._cur, countp) if rc: raise _error("mdb_cursor_count", rc) return countp[0] def put(self, key, val, dupdata=True, overwrite=True, append=False): """Store a record, returning ``True`` if it was written, or ``False`` to indicate the key was already present and `overwrite=False`. On success, the cursor is positioned on the key. Equivalent to `mdb_cursor_put() <http://lmdb.tech/doc/group__mdb.html#ga1f83ccb40011837ff37cc32be01ad91e>`_ `key`: Bytestring key to store. `val`: Bytestring value to store. `dupdata`: If ``False`` and database was opened with `dupsort=True`, will return ``False`` if the key already has that value. In other words, this only affects the return value. `overwrite`: If ``False``, do not overwrite the value for the key if it exists, just return ``False``. For databases opened with `dupsort=True`, ``False`` will always be returned if a duplicate key/value pair is inserted, regardless of the setting for `overwrite`. `append`: If ``True``, append the pair to the end of the database without comparing its order first. Appending a key that is not greater than the highest existing key will fail and return ``False``. """ flags = 0 if not dupdata: flags |= _lib.MDB_NODUPDATA if not overwrite: flags |= _lib.MDB_NOOVERWRITE if append: if self.txn._db._flags & _lib.MDB_DUPSORT: flags |= _lib.MDB_APPENDDUP else: flags |= _lib.MDB_APPEND rc = _lib.pymdb_cursor_put(self._cur, key, len(key), val, len(val), flags) self.txn._mutations += 1 if rc: if rc == _lib.MDB_KEYEXIST: return False raise _error("mdb_cursor_put", rc) self._cursor_get(_lib.MDB_GET_CURRENT) return True def putmulti(self, items, dupdata=True, overwrite=True, append=False): """Invoke :py:meth:`put` for each `(key, value)` 2-tuple from the iterable `items`. Elements must be exactly 2-tuples, they may not be of any other type, or tuple subclass. Returns a tuple `(consumed, added)`, where `consumed` is the number of elements read from the iterable, and `added` is the number of new entries added to the database. `added` may be less than `consumed` when `overwrite=False`. `items`: Iterable to read records from. `dupdata`: If ``True`` and database was opened with `dupsort=True`, add pair as a duplicate if the given key already exists. Otherwise overwrite any existing matching key. `overwrite`: If ``False``, do not overwrite the value for the key if it exists, just return ``False``. For databases opened with `dupsort=True`, ``False`` will always be returned if a duplicate key/value pair is inserted, regardless of the setting for `overwrite`. `append`: If ``True``, append records to the end of the database without comparing their order first. Appending a key that is not greater than the highest existing key will cause corruption. """ flags = 0 if not dupdata: flags |= _lib.MDB_NODUPDATA if not overwrite: flags |= _lib.MDB_NOOVERWRITE if append: if self.txn._db._flags & _lib.MDB_DUPSORT: flags |= _lib.MDB_APPENDDUP else: flags |= _lib.MDB_APPEND added = 0 skipped = 0 for key, value in items: rc = _lib.pymdb_cursor_put(self._cur, key, len(key), value, len(value), flags) self.txn._mutations += 1 added += 1 if rc: if rc == _lib.MDB_KEYEXIST: skipped += 1 else: raise _error("mdb_cursor_put", rc) self._cursor_get(_lib.MDB_GET_CURRENT) return added, added - skipped def replace(self, key, val): """Store a record, returning its previous value if one existed. Returns ``None`` if no previous value existed. This uses the best available mechanism to minimize the cost of a `set-and-return-previous` operation. For databases opened with `dupsort=True`, only the first data element ("duplicate") is returned if it existed, all data elements are removed and the new `(key, data)` pair is inserted. `key`: Bytestring key to store. `value`: Bytestring value to store. """ if self.db._flags & _lib.MDB_DUPSORT: if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES): preload(self._val) old = _mvstr(self._val) self.delete(True) else: old = None self.put(key, val) return old flags = _lib.MDB_NOOVERWRITE keylen = len(key) rc = _lib.pymdb_cursor_put(self._cur, key, keylen, val, len(val), flags) self.txn._mutations += 1 if not rc: return if rc != _lib.MDB_KEYEXIST: raise _error("mdb_cursor_put", rc) self._cursor_get(_lib.MDB_GET_CURRENT) preload(self._val) old = _mvstr(self._val) rc = _lib.pymdb_cursor_put(self._cur, key, keylen, val, len(val), 0) self.txn._mutations += 1 if rc: raise _error("mdb_cursor_put", rc) self._cursor_get(_lib.MDB_GET_CURRENT) return old def pop(self, key): """Fetch a record's value then delete it. Returns ``None`` if no previous value existed. This uses the best available mechanism to minimize the cost of a `delete-and-return-previous` operation. For databases opened with `dupsort=True`, the first data element ("duplicate") for the key will be popped. `key`: Bytestring key to delete. """ if self._cursor_get_kv(_lib.MDB_SET_KEY, key, EMPTY_BYTES): preload(self._val) old = _mvstr(self._val) rc = _lib.mdb_cursor_del(self._cur, 0) self.txn._mutations += 1 if rc: raise _error("mdb_cursor_del", rc) self._cursor_get(_lib.MDB_GET_CURRENT) return old def _iter_from(self, k, reverse): """Helper for centidb. Please do not rely on this interface, it may be removed in future. """ if not k and not reverse: found = self.first() else: found = self.set_range(k) if reverse: if not found: self.last() return self.iterprev() else: if not found: return iter(()) return self.iternext()
37.728723
105
0.596298
11,551
92,209
4.570513
0.104233
0.016366
0.011138
0.013922
0.387525
0.349983
0.332386
0.307024
0.282381
0.265636
0
0.017764
0.305849
92,209
2,443
106
37.744167
0.807052
0.472405
0
0.290688
0
0
0.258397
0.023818
0
0
0
0
0
1
0.08094
false
0.001741
0.013055
0.005222
0.228895
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f477633c1badf20c6b9aa7cdc1d086ce3dd6b193
6,425
py
Python
.virtual_documents/00_core.ipynb.py
AtomScott/image_folder_datasets
935580929abc9d8ec9eeaf944a0d3c670a09d04d
[ "Apache-2.0" ]
null
null
null
.virtual_documents/00_core.ipynb.py
AtomScott/image_folder_datasets
935580929abc9d8ec9eeaf944a0d3c670a09d04d
[ "Apache-2.0" ]
null
null
null
.virtual_documents/00_core.ipynb.py
AtomScott/image_folder_datasets
935580929abc9d8ec9eeaf944a0d3c670a09d04d
[ "Apache-2.0" ]
null
null
null
# default_exp core #hide from nbdev.showdoc import * from fastcore.test import * # export import os import torch from torch import nn from torch.nn import functional as F from torch.utils.data import DataLoader import warnings import torchvision from torchvision.datasets import MNIST, ImageFolder from torchvision.transforms import ToTensor, Resize, Compose, CenterCrop, Normalize import pytorch_lightning as pl # from pytorch_lightning.metrics.functional import classification, f1 from pytorch_lightning.loggers import TensorBoardLogger import fastai.vision.augment import fastai.vision.data # from fastai.vision.data import ImageDataLoaders # from fastai.vision.augment import Resize #export class ImageFolderDataModule(pl.LightningDataModule): def __init__(self, data_dir, batch_size, transform): super().__init__() self.data_dir = data_dir self.batch_size = batch_size self.transform = transform # Compose([ # Resize(256, interpolation=2), # CenterCrop(224), # ToTensor(), # # TODO: check whether normalize is the same for imagenet and fractalDB # Normalize(mean=[0.485, 0.456, 0.406], # std=[0.229, 0.224, 0.225]) # ]) def prepare_data(self, stage=None): pass def setup(self, stage=None): data_dir = self.data_dir transform = self.transform self.dls = fastai.vision.data.ImageDataLoaders.from_folder(data_dir, item_tfms=fastai.vision.augment.Resize(224)) self.trainset = ImageFolder(os.path.join(data_dir, 'train'), transform) self.valset = ImageFolder(os.path.join(data_dir, 'valid'), transform) def train_dataloader(self): return DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True) def val_dataloader(self): return DataLoader(self.valset, batch_size=self.batch_size, shuffle=False) def test_dataloader(self): pass data_dir = 'Datasets/cifar10' transform = Compose([ Resize(256, interpolation=2), CenterCrop(224), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) dm = ImageFolderDataModule(data_dir, 128, transform) dm.setup() for x,y in dm.train_dataloader(): test_eq(type(x), torch.Tensor) test_eq(type(y), torch.Tensor) break #export class CNNModule(pl.LightningModule): def __init__(self, model=None, pretrained=False, freeze_extractor=False, log_level=10, num_classes=None, weight_path=None): super().__init__() self.num_classes = num_classes self.pretrained = pretrained self.freeze_extractor = freeze_extractor assert model is not None, 'Select model from torchvision' assert num_classes is not None, 'Must configure number of classes with num_classes' if not model.startswith('resnet'): warnings.warn('models other than resnet variants may need different setup for finetuning to work.') # Prepare model for finetuning if weight_path is not None: param = torch.load(weight_path) backbone = eval(f'torchvision.models.{model}(pretrained={False})') backbone.load_state_dict(param) else: backbone = eval(f'torchvision.models.{model}(pretrained={pretrained})') num_filters = backbone.fc.in_features layers = list(backbone.children())[:-1] self.feature_extractor = torch.nn.Sequential(*layers) self.classifier = nn.Linear(num_filters, num_classes) def forward(self, x): if self.freeze_extractor: self.feature_extractor.eval() with torch.no_grad(): representations = self.feature_extractor(x).flatten(1) else: representations = self.feature_extractor(x).flatten(1) y = self.classifier(representations) return y def training_step(self, batch, batch_idx): x, y = batch y_hat = self(x) outputs = self.calculate_metrics(y_hat=y_hat, y=y) return outputs def training_epoch_end(self, outputs): avg_metrics = {} for metric in outputs[0].keys(): val = torch.stack([x[metric] for x in outputs]).mean() self.logger.experiment.add_scalar(f"{metric}/train", val, self.current_epoch) avg_metrics[metric] = val # epoch_dictionary = {'loss': avg_metrics['loss']} def validation_step(self, batch, batch_idx): x, y = batch y_hat = self(x) outputs = self.calculate_metrics(y_hat=y_hat, y=y) return outputs def validation_epoch_end(self, outputs): avg_metrics = {} for metric in outputs[0].keys(): val = torch.stack([x[metric] for x in outputs]).mean() self.logger.experiment.add_scalar(f"{metric}/validation", val, self.current_epoch) avg_metrics[metric] = val def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=0.02, weight_decay=1e-04) # > return torch.optim.SGF(self.parameters(), lr=self.lr, aldsfk'a) def calculate_metrics(self, y, y_hat): loss = F.cross_entropy(y_hat, y) y_pred = y_hat.argmax(dim=1) acc = classification.accuracy(y_pred, y) f1_score = f1(y_pred, y, self.num_classes) return { "loss":loss, "acc": acc, "f1": f1_score } def on_sanity_check_start(self): self.logger.disable() def on_sanity_check_end(self): self.logger.enable() modelname = 'resnet18' logger = TensorBoardLogger('tb_logs', name=modelname) trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=logger, fast_dev_run=5) model = CNNModule(modelname, pretrained=True, num_classes=len(dm.trainset.classes)) test_eq(trainer.fit(model, dm), 1) weight_path = 'FractalDB-1000_resnet50_epoch90.pth' modelname = 'resnet50' logger = TensorBoardLogger('tb_logs', name=modelname) trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=logger, fast_dev_run=5) model = CNNModule(modelname, pretrained=True, num_classes=len(dm.trainset.classes), weight_path=weight_path) test_eq(trainer.fit(model, dm), 1)
32.286432
127
0.651518
803
6,425
5.048568
0.28269
0.017267
0.006167
0.00444
0.361618
0.344845
0.316724
0.260977
0.24223
0.21263
0
0.021994
0.242802
6,425
198
128
32.449495
0.811305
0.104591
0
0.23622
0
0
0.069134
0.023045
0
0
0
0.005051
0.015748
1
0.125984
false
0.015748
0.11811
0.023622
0.314961
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f477fec40612fa1a5fd9ffbd050a890ebec79d19
2,030
py
Python
test_scripts/pyfora2/containerTests.py
ufora/ufora
04db96ab049b8499d6d6526445f4f9857f1b6c7e
[ "Apache-2.0", "CC0-1.0", "MIT", "BSL-1.0", "BSD-3-Clause" ]
571
2015-11-05T20:07:07.000Z
2022-01-24T22:31:09.000Z
test_scripts/pyfora2/containerTests.py
timgates42/ufora
04db96ab049b8499d6d6526445f4f9857f1b6c7e
[ "Apache-2.0", "CC0-1.0", "MIT", "BSL-1.0", "BSD-3-Clause" ]
218
2015-11-05T20:37:55.000Z
2021-05-30T03:53:50.000Z
test_scripts/pyfora2/containerTests.py
timgates42/ufora
04db96ab049b8499d6d6526445f4f9857f1b6c7e
[ "Apache-2.0", "CC0-1.0", "MIT", "BSL-1.0", "BSD-3-Clause" ]
40
2015-11-07T21:42:19.000Z
2021-05-23T03:48:19.000Z
# Copyright 2015 Ufora Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import pyfora import ufora.config.Setup as Setup import ufora.FORA.python.PurePython.DictTestCases as DictTestCases import ufora.FORA.python.PurePython.ListTestCases as ListTestCases import ufora.FORA.python.PurePython.TupleTestCases as TupleTestCases import ufora.FORA.python.PurePython.ExecutorTestCommon as ExecutorTestCommon import ufora.test.ClusterSimulation as ClusterSimulation class ExecutorSimulationTest( unittest.TestCase, ExecutorTestCommon.ExecutorTestCommon, DictTestCases.DictTestCases, ListTestCases.ListTestCases, TupleTestCases.TupleTestCases): @classmethod def setUpClass(cls): cls.config = Setup.config() cls.executor = None cls.simulation = ClusterSimulation.Simulator.createGlobalSimulator() cls.simulation.startService() cls.simulation.getDesirePublisher().desireNumberOfWorkers(1) @classmethod def tearDownClass(cls): cls.simulation.stopService() @classmethod def create_executor(cls, allowCached=True): if not allowCached: return pyfora.connect('http://localhost:30000') if cls.executor is None: cls.executor = pyfora.connect('http://localhost:30000') cls.executor.stayOpenOnExit = True return cls.executor if __name__ == '__main__': import ufora.config.Mainline as Mainline Mainline.UnitTestMainline()
33.833333
76
0.733005
227
2,030
6.515419
0.475771
0.052062
0.040568
0.056795
0.125761
0
0
0
0
0
0
0.011607
0.193596
2,030
59
77
34.40678
0.891875
0.278325
0
0.085714
0
0
0.035912
0
0
0
0
0
0
1
0.085714
false
0
0.257143
0
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4782d553047c0d6c83eb8c3ac341a236af78e5e
597
py
Python
src/utils/torch_common.py
quochungto/SIIM-COVID19-Detection
88bc10d7b01d277d223c4dddd4c223a782616611
[ "MIT" ]
null
null
null
src/utils/torch_common.py
quochungto/SIIM-COVID19-Detection
88bc10d7b01d277d223c4dddd4c223a782616611
[ "MIT" ]
null
null
null
src/utils/torch_common.py
quochungto/SIIM-COVID19-Detection
88bc10d7b01d277d223c4dddd4c223a782616611
[ "MIT" ]
null
null
null
import os import gc import random import numpy as np import torch def seed_everything(seed): os.environ['PYTHONHASHSEED'] = str(seed) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True def memory_cleanup(): """ Cleans up GPU memory https://github.com/huggingface/transformers/issues/1742 """ for obj in gc.get_objects(): if torch.is_tensor(obj): del obj gc.collect() torch.cuda.empty_cache()
21.321429
59
0.673367
80
597
4.9375
0.5625
0.081013
0.098734
0.096203
0
0
0
0
0
0
0
0.008565
0.217755
597
27
60
22.111111
0.837259
0.127303
0
0
0
0
0.027944
0
0
0
0
0
0
1
0.105263
false
0
0.263158
0
0.368421
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4793bd8d4530ee80fabe88563d6a3ddbecb48d2
6,713
py
Python
recipes/freeimage/all/conanfile.py
marsven/conan-center-index
d8bb4ad617cee02d8664e8341fa32cdf702e4284
[ "MIT" ]
null
null
null
recipes/freeimage/all/conanfile.py
marsven/conan-center-index
d8bb4ad617cee02d8664e8341fa32cdf702e4284
[ "MIT" ]
null
null
null
recipes/freeimage/all/conanfile.py
marsven/conan-center-index
d8bb4ad617cee02d8664e8341fa32cdf702e4284
[ "MIT" ]
null
null
null
from conans import ConanFile, CMake, tools import os import shutil required_conan_version = ">=1.43.0" class FreeImageConan(ConanFile): name = "freeimage" description = "Open Source library project for developers who would like to support popular graphics image formats"\ "like PNG, BMP, JPEG, TIFF and others as needed by today's multimedia applications." homepage = "https://freeimage.sourceforge.io" url = "https://github.com/conan-io/conan-center-index" license = "FreeImage", "GPL-3.0-or-later", "GPL-2.0-or-later" topics = ("freeimage", "image", "decoding", "graphics") generators = "cmake", "cmake_find_package" settings = "os", "arch", "compiler", "build_type" options = { "shared": [True, False], "fPIC": [True, False], "with_jpeg": [False, "libjpeg", "libjpeg-turbo"], "with_png": [True, False], "with_tiff": [True, False], "with_jpeg2000": [True, False], "with_openexr": [True, False], "with_eigen": [True, False], "with_webp": [True, False], "with_raw": [True, False], "with_jxr": [True, False], } default_options = { "shared": False, "fPIC": True, "with_jpeg": "libjpeg", "with_png": True, "with_tiff": True, "with_jpeg2000": True, "with_openexr": True, "with_eigen": True, "with_webp": True, "with_raw": True, "with_jxr": True, } _cmake = None @property def _source_subfolder(self): return "source_subfolder" @property def _build_subfolder(self): return "build_subfolder" def export_sources(self): self.copy("CMakeLists.txt") for patch in self.conan_data.get("patches", {}).get(self.version, []): self.copy(patch["patch_file"]) def config_options(self): if self.settings.os == "Windows": del self.options.fPIC def configure(self): tools.check_min_cppstd(self, "11") if self.options.shared: del self.options.fPIC self.output.warn("G3 plugin and JPEGTransform are disabled.") if self.options.with_jpeg is not None: if self.options.with_tiff: self.options["libtiff"].jpeg = self.options.with_jpeg def requirements(self): self.requires("zlib/1.2.11") if self.options.with_jpeg == "libjpeg": self.requires("libjpeg/9d") elif self.options.with_jpeg == "libjpeg-turbo": self.requires("libjpeg-turbo/2.1.2") if self.options.with_jpeg2000: self.requires("openjpeg/2.4.0") if self.options.with_png: self.requires("libpng/1.6.37") if self.options.with_webp: self.requires("libwebp/1.2.2") if self.options.with_openexr: self.requires("openexr/2.5.7") if self.options.with_raw: self.requires("libraw/0.20.2") if self.options.with_jxr: self.requires("jxrlib/cci.20170615") if self.options.with_tiff: self.requires("libtiff/4.3.0") def source(self): tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True) def _configure_cmake(self): if self._cmake: return self._cmake self._cmake = CMake(self) self._cmake.definitions["WITH_JPEG"] = self.options.with_jpeg != False self._cmake.definitions["WITH_OPENJPEG"] = self.options.with_jpeg2000 self._cmake.definitions["WITH_PNG"] = self.options.with_png self._cmake.definitions["WITH_WEBP"] = self.options.with_webp self._cmake.definitions["WITH_OPENEXR"] = self.options.with_openexr self._cmake.definitions["WITH_RAW"] = self.options.with_raw self._cmake.definitions["WITH_JXR"] = self.options.with_jxr self._cmake.definitions["WITH_TIFF"] = self.options.with_tiff self._cmake.configure(build_dir=self._build_subfolder) return self._cmake def build(self): tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibPNG")) tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibTIFF4")) tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibOpenJPEG")) tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibJXR")) tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibWebP")) tools.rmdir(os.path.join(self._source_subfolder, "Source", "LibRawLite")) tools.rmdir(os.path.join(self._source_subfolder, "Source", "OpenEXR")) for patch in self.conan_data.get("patches", {}).get(self.version, {}): tools.patch(**patch) cmake = self._configure_cmake() cmake.build() def package(self): cmake = self._configure_cmake() cmake.install() self.copy("license-fi.txt", dst="licenses", src=self._source_subfolder) self.copy("license-gplv3.txt", dst="licenses", src=self._source_subfolder) self.copy("license-gplv2.txt", dst="licenses", src=self._source_subfolder) def package_info(self): def imageformats_deps(): components = [] components.append("zlib::zlib") if self.options.with_jpeg: components.append("{0}::{0}".format(self.options.with_jpeg)) if self.options.with_jpeg2000: components.append("openjpeg::openjpeg") if self.options.with_png: components.append("libpng::libpng") if self.options.with_webp: components.append("libwebp::libwebp") if self.options.with_openexr: components.append("openexr::openexr") if self.options.with_raw: components.append("libraw::libraw") if self.options.with_jxr: components.append("jxrlib::jxrlib") if self.options.with_tiff: components.append("libtiff::libtiff") return components self.cpp_info.names["pkg_config"] = "freeimage" self.cpp_info.names["cmake_find_package"] = "FreeImage" self.cpp_info.names["cmake_find_package_multi"] = "FreeImage" self.cpp_info.components["FreeImage"].libs = ["freeimage"] self.cpp_info.components["FreeImage"].requires = imageformats_deps() self.cpp_info.components["FreeImagePlus"].libs = ["freeimageplus"] self.cpp_info.components["FreeImagePlus"].requires = ["FreeImage"] if not self.options.shared: self.cpp_info.components["FreeImage"].defines.append("FREEIMAGE_LIB")
40.439759
120
0.619097
791
6,713
5.078382
0.219975
0.093104
0.10829
0.076176
0.379885
0.188449
0.156585
0.147623
0.127209
0.048793
0
0.014162
0.242663
6,713
165
121
40.684848
0.775964
0
0
0.156463
0
0
0.216148
0.003575
0
0
0
0
0
1
0.081633
false
0
0.020408
0.013605
0.217687
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f47944bb4b7b60683bb6b4d4d72854dfc4c98c2a
110,180
py
Python
src/google/appengine/datastore/datastore_query.py
myelin/appengine-python-standard
2a99acd114f7cdd66fbad9bfd185384eef847c84
[ "Apache-2.0" ]
null
null
null
src/google/appengine/datastore/datastore_query.py
myelin/appengine-python-standard
2a99acd114f7cdd66fbad9bfd185384eef847c84
[ "Apache-2.0" ]
null
null
null
src/google/appengine/datastore/datastore_query.py
myelin/appengine-python-standard
2a99acd114f7cdd66fbad9bfd185384eef847c84
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # # Copyright 2007 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A thin wrapper around datastore query RPC calls. This provides wrappers around the internal only datastore_pb library and is designed to be the lowest-level API to be used by all Python datastore client libraries for executing queries. It provides a layer of protection so the actual RPC syntax can change without affecting client libraries. Any class, function, field or argument starting with an '_' is for INTERNAL use only and should not be used by developers! """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import base64 import collections import functools import pickle import six from google.appengine.api import cmp_compat from google.appengine.api import datastore_errors from google.appengine.api import datastore_types from google.appengine.datastore import datastore_index from google.appengine.datastore import datastore_pb from google.appengine.datastore import datastore_pbs from google.appengine.datastore import datastore_rpc from google.protobuf import message from google.appengine.datastore import entity_bytes_pb2 as entity_pb2 __all__ = ['Batch', 'Batcher', 'CompositeFilter', 'CompositeOrder', 'CorrelationFilter', 'Cursor', 'FetchOptions', 'FilterPredicate', 'Order', 'PropertyFilter', 'PropertyOrder', 'Query', 'QueryOptions', 'ResultsIterator', 'make_filter', 'apply_query', 'inject_results'] if datastore_pbs._CLOUD_DATASTORE_ENABLED: from google.appengine.datastore.datastore_pbs import googledatastore class _BaseComponent(object): """A base class for query components. Currently just implements basic == and != functions. """ def __eq__(self, other): if self.__class__ is not other.__class__: return NotImplemented return self is other or self.__dict__ == other.__dict__ def __ne__(self, other): equal = self.__eq__(other) if equal is NotImplemented: return equal return not equal def make_filter(name, op, values): """Constructs a FilterPredicate from the given name, op and values. Args: name: A non-empty string, the name of the property to filter. op: One of PropertyFilter._OPERATORS.keys(), the operator to use. values: A supported value, the value to compare against. Returns: if values is a list, a CompositeFilter that uses AND to combine all values, otherwise a PropertyFilter for the single value. Raises: datastore_errors.BadPropertyError: if the property name is invalid. datastore_errors.BadValueError: if the property did not validate correctly or the value was an empty list. Other exception types (like OverflowError): if the property value does not meet type-specific criteria. """ datastore_types.ValidateProperty(name, values) properties = datastore_types.ToPropertyPb(name, values) if isinstance(properties, list): filters = [PropertyFilter(op, prop) for prop in properties] return CompositeFilter(CompositeFilter.AND, filters) else: return PropertyFilter(op, properties) def _make_key_value_map(entity, property_names): """Extracts key values from the given entity. Args: entity: The entity_pb2.EntityProto to extract values from. property_names: The names of the properties from which to extract values. Returns: A dict mapping property names to a lists of key values. """ value_map = dict((six.ensure_text(name), []) for name in property_names) for prop in entity.property: prop_name = six.ensure_text(prop.name) if prop_name in value_map: value_map[prop_name].append( datastore_types.PropertyValueToKeyValue(prop.value)) key_prop = six.ensure_text(datastore_types.KEY_SPECIAL_PROPERTY) if key_prop in value_map: value_map[key_prop] = [datastore_types.ReferenceToKeyValue(entity.key)] return value_map class _PropertyComponent(_BaseComponent): """A component that operates on a specific set of properties.""" def _get_prop_names(self): """Returns a set of property names used by the filter.""" raise NotImplementedError class FilterPredicate(_PropertyComponent): """An abstract base class for all query filters. All sub-classes must be immutable as these are often stored without creating a defensive copy. """ def __call__(self, entity): """Applies the filter predicate to the given entity. Args: entity: the datastore_pb.EntityProto to test. Returns: True if the given entity matches the filter, False otherwise. """ return self._apply(_make_key_value_map(entity, self._get_prop_names())) def _apply(self, key_value_map): """Apply the given component to the comparable value map. A filter matches a list of values if at least one value in the list matches the filter, for example: 'prop: [1, 2]' matches both 'prop = 1' and 'prop = 2' but not 'prop = 3' Note: the values are actually represented as tuples whose first item encodes the type; see datastore_types.PropertyValueToKeyValue(). Args: key_value_map: A dict mapping property names to a list of comparable values. Return: A boolean indicating if the given map matches the filter. """ raise NotImplementedError def _prune(self, key_value_map): """Removes values from the given map that do not match the filter. When doing a scan in the datastore, only index values that match the filters are seen. When multiple values that point to the same entity are seen, the entity only appears where the first value is found. This function removes all values that don't match the query so that the first value in the map is the same one the datastore would see first. Args: key_value_map: the comparable value map from which to remove values. Does not need to contain values for all filtered properties. Returns: A value that evaluates to False if every value in a single list was completely removed. This effectively applies the filter but is less efficient than _apply(). """ raise NotImplementedError def _to_pb(self): """Internal only function to generate a pb.""" raise NotImplementedError( 'This filter only supports in memory operations (%r)' % self) def _to_pbs(self): """Internal only function to generate a list of pbs.""" return [self._to_pb()] def _to_pb_v1(self, adapter): """Internal only function to generate a v1 pb. Args: adapter: A datastore_rpc.AbstractAdapter """ raise NotImplementedError( 'This filter only supports in memory operations (%r)' % self) class _SinglePropertyFilter(FilterPredicate): """Base class for a filter that operates on a single property.""" def _get_prop_name(self): """Returns the name of the property being filtered.""" raise NotImplementedError def _apply_to_value(self, value): """Apply the filter to the given value. Args: value: The comparable value to check. Returns: A boolean indicating if the given value matches the filter. """ raise NotImplementedError def _get_prop_names(self): return set([self._get_prop_name()]) def _apply(self, value_map): for other_value in value_map[self._get_prop_name()]: if self._apply_to_value(other_value): return True return False def _prune(self, value_map): if self._get_prop_name() not in value_map: return True values = [value for value in value_map[self._get_prop_name()] if self._apply_to_value(value)] value_map[self._get_prop_name()] = values return bool(values) class PropertyFilter(_SinglePropertyFilter): """An immutable filter predicate that constrains a single property.""" _OPERATORS = { '<': datastore_pb.Query.Filter.LESS_THAN, '<=': datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL, '>': datastore_pb.Query.Filter.GREATER_THAN, '>=': datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL, '=': datastore_pb.Query.Filter.EQUAL, } _OPERATORS_INVERSE = dict((value, key) for key, value in _OPERATORS.items()) _OPERATORS_TO_PYTHON_OPERATOR = { datastore_pb.Query.Filter.LESS_THAN: '<', datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL: '<=', datastore_pb.Query.Filter.GREATER_THAN: '>', datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL: '>=', datastore_pb.Query.Filter.EQUAL: '==', } _INEQUALITY_OPERATORS = frozenset(['<', '<=', '>', '>=']) _INEQUALITY_OPERATORS_ENUM = frozenset([ datastore_pb.Query.Filter.LESS_THAN, datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL, datastore_pb.Query.Filter.GREATER_THAN, datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL, ]) _UPPERBOUND_INEQUALITY_OPERATORS = frozenset(['<', '<=']) def __init__(self, op, value): """Constructor. Args: op: A string representing the operator to use. value: A entity_pb2.Property, the property and value to compare against. Raises: datastore_errors.BadArgumentError if op has an unsupported value or value is not an entity_pb2.Property. """ if op not in self._OPERATORS: raise datastore_errors.BadArgumentError('unknown operator: %r' % (op,)) if not isinstance(value, entity_pb2.Property): raise datastore_errors.BadArgumentError( 'value argument should be entity_pb2.Property (%r)' % (value,)) super(PropertyFilter, self).__init__() self._filter = datastore_pb.Query.Filter() self._filter.op = self._OPERATORS[op] self._filter.property.add().CopyFrom(value) @property def op(self): raw_op = self._filter.op return self._OPERATORS_INVERSE.get(raw_op, str(raw_op)) @property def value(self): return self._filter.property[0] def __repr__(self): prop = self.value name = prop.name value = datastore_types.FromPropertyPb(prop) if six.PY2 and isinstance(value, long): value = int(value) return '%s(%r, <%r, %r>)' % (self.__class__.__name__, six.ensure_str( self.op), six.ensure_str(name), value) def _get_prop_name(self): return self._filter.property[0].name def _apply_to_value(self, value): if not hasattr(self, '_cmp_value'): if self._filter.op == datastore_pb.Query.Filter.EXISTS: return True self._cmp_value = datastore_types.PropertyValueToKeyValue( self._filter.property[0].value) self._condition = ('value %s self._cmp_value' % self._OPERATORS_TO_PYTHON_OPERATOR[self._filter.op]) return eval(self._condition) def _has_inequality(self): """Returns True if the filter predicate contains inequalities filters.""" return self._filter.op in self._INEQUALITY_OPERATORS_ENUM @classmethod def _from_pb(cls, filter_pb): self = cls.__new__(cls) self._filter = filter_pb return self def _to_pb(self): """Returns the internal only pb representation.""" return self._filter def _to_pb_v1(self, adapter): """Returns a googledatastore.Filter representation of the filter. Args: adapter: A datastore_rpc.AbstractAdapter """ filter_pb = googledatastore.Filter() prop_filter_pb = filter_pb.property_filter adapter.get_query_converter()._v3_filter_to_v1_property_filter( self._filter, prop_filter_pb) return filter_pb def __getstate__(self): raise pickle.PicklingError( 'Pickling of datastore_query.PropertyFilter is unsupported.') def __eq__(self, other): if self.__class__ is not other.__class__: if other.__class__ is _PropertyRangeFilter: return [self._filter] == other._to_pbs() return NotImplemented return self._filter == other._filter class _PropertyRangeFilter(_SinglePropertyFilter): """A filter predicate that represents a range of values. Since we allow multi-valued properties there is a large difference between "x > 0 AND x < 1" and "0 < x < 1." An entity with x = [-1, 2] will match the first but not the second. Since the datastore only allows a single inequality filter, multiple in-equality filters are merged into a single range filter in the datastore (unlike equality filters). This class is used by datastore_query.CompositeFilter to implement the same logic. """ _start_key_value = None _end_key_value = None @datastore_rpc._positional(1) def __init__(self, start=None, start_incl=True, end=None, end_incl=True): """Constructs a range filter using start and end properties. Args: start: A entity_pb2.Property to use as a lower bound or None to indicate no lower bound. start_incl: A boolean that indicates if the lower bound is inclusive. end: A entity_pb2.Property to use as an upper bound or None to indicate no upper bound. end_incl: A boolean that indicates if the upper bound is inclusive. """ if start is not None and not isinstance(start, entity_pb2.Property): raise datastore_errors.BadArgumentError( 'start argument should be entity_pb2.Property (%r)' % (start,)) if end is not None and not isinstance(end, entity_pb2.Property): raise datastore_errors.BadArgumentError( 'start argument should be entity_pb2.Property (%r)' % (end,)) if start and end and start.name != end.name: raise datastore_errors.BadArgumentError( 'start and end arguments must be on the same property (%s != %s)' % (start.name, end.name)) if not start and not end: raise datastore_errors.BadArgumentError( 'Unbounded ranges are not supported.') super(_PropertyRangeFilter, self).__init__() self._start = start self._start_incl = start_incl self._end = end self._end_incl = end_incl @classmethod def from_property_filter(cls, prop_filter): op = prop_filter._filter.op if op == datastore_pb.Query.Filter.GREATER_THAN: return cls(start=prop_filter._filter.property[0], start_incl=False) elif op == datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL: return cls(start=prop_filter._filter.property[0]) elif op == datastore_pb.Query.Filter.LESS_THAN: return cls(end=prop_filter._filter.property[0], end_incl=False) elif op == datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL: return cls(end=prop_filter._filter.property[0]) else: raise datastore_errors.BadArgumentError( 'Unsupported operator (%s)' % (op,)) def intersect(self, other): """Returns a filter representing the intersection of self and other.""" if isinstance(other, PropertyFilter): other = self.from_property_filter(other) elif not isinstance(other, _PropertyRangeFilter): raise datastore_errors.BadArgumentError( 'other argument should be a _PropertyRangeFilter (%r)' % (other,)) if other._get_prop_name() != self._get_prop_name(): raise datastore_errors.BadArgumentError( 'other argument must be on the same property (%s != %s)' % (other._get_prop_name(), self._get_prop_name())) start_source = None if other._start: if self._start: result = cmp_compat.cmp( self._get_start_key_value(), other._get_start_key_value()) if result == 0: result = cmp_compat.cmp(other._start_incl, self._start_incl) if result > 0: start_source = self elif result < 0: start_source = other else: start_source = other elif self._start: start_source = self end_source = None if other._end: if self._end: result = cmp_compat.cmp( self._get_end_key_value(), other._get_end_key_value()) if result == 0: result = cmp_compat.cmp(self._end_incl, other._end_incl) if result < 0: end_source = self elif result > 0: end_source = other else: end_source = other elif self._end: end_source = self if start_source: if end_source in (start_source, None): return start_source result = _PropertyRangeFilter(start=start_source._start, start_incl=start_source._start_incl, end=end_source._end, end_incl=end_source._end_incl) result._start_key_value = start_source._start_key_value result._end_key_value = end_source._end_key_value return result else: return end_source or self def _get_start_key_value(self): if self._start_key_value is None: self._start_key_value = datastore_types.PropertyValueToKeyValue( self._start.value) return self._start_key_value def _get_end_key_value(self): if self._end_key_value is None: self._end_key_value = datastore_types.PropertyValueToKeyValue( self._end.value) return self._end_key_value def _apply_to_value(self, value): """Apply the filter to the given value. Args: value: The comparable value to check. Returns: A boolean indicating if the given value matches the filter. """ if self._start: result = cmp_compat.cmp(self._get_start_key_value(), value) if result > 0 or (result == 0 and not self._start_incl): return False if self._end: result = cmp_compat.cmp(self._get_end_key_value(), value) if result < 0 or (result == 0 and not self._end_incl): return False return True def _get_prop_name(self): if self._start: return self._start.name if self._end: return self._end.name assert False def _to_pbs(self): pbs = [] if self._start: if self._start_incl: op = datastore_pb.Query.Filter.GREATER_THAN_OR_EQUAL else: op = datastore_pb.Query.Filter.GREATER_THAN pb = datastore_pb.Query.Filter() pb.op = op pb.property.add().CopyFrom(self._start) pbs.append(pb) if self._end: if self._end_incl: op = datastore_pb.Query.Filter.LESS_THAN_OR_EQUAL else: op = datastore_pb.Query.Filter.LESS_THAN pb = datastore_pb.Query.Filter() pb.op = op pb.property.add().CopyFrom(self._end) pbs.append(pb) return pbs def _to_pb_v1(self, adapter): """Returns a googledatastore.Filter representation of the filter. Args: adapter: A datastore_rpc.AbstractAdapter. """ filter_pb = googledatastore.Filter() composite_filter = filter_pb.composite_filter composite_filter.op = googledatastore.CompositeFilter.AND if self._start: if self._start_incl: op = googledatastore.PropertyFilter.GREATER_THAN_OR_EQUAL else: op = googledatastore.PropertyFilter.GREATER_THAN pb = composite_filter.filters.add().property_filter pb.op = op pb.property.name = self._start.name adapter.get_entity_converter().v3_property_to_v1_value( self._start, True, pb.value) if self._end: if self._end_incl: op = googledatastore.PropertyFilter.LESS_THAN_OR_EQUAL else: op = googledatastore.PropertyFilter.LESS_THAN pb = composite_filter.filters.add().property_filter pb.op = op pb.property.name = self._end.name adapter.get_entity_converter().v3_property_to_v1_value( self._end, True, pb.value) return filter_pb def __getstate__(self): raise pickle.PicklingError( 'Pickling of %r is unsupported.' % self) def __eq__(self, other): if self.__class__ is not other.__class__: return NotImplemented return (self._start == other._start and self._end == other._end and (self._start_incl == other._start_incl or self._start is None) and (self._end_incl == other._end_incl or self._end is None)) class _PropertyExistsFilter(FilterPredicate): """A FilterPredicate that matches entities containing specific properties. Only works as an in-memory filter. Used internally to filter out entities that don't have all properties in a given Order. """ def __init__(self, names): super(_PropertyExistsFilter, self).__init__() self._names = frozenset(names) def _apply(self, value_map): for name in self._names: if not value_map.get(name): return False return True def _get_prop_names(self): return self._names def _prune(self, _): raise NotImplementedError def __getstate__(self): raise pickle.PicklingError( 'Pickling of %r is unsupported.' % self) class CorrelationFilter(FilterPredicate): """A filter that isolates correlated values and applies a sub-filter on them. This filter assumes that every property used by the sub-filter should be grouped before being passed to the sub-filter. The default grouping puts each value in its own group. Consider: e = {a: [1, 2], b: [2, 1, 3], c: 4} A correlation filter with a sub-filter that operates on (a, b) will be tested against the following 3 sets of values: {a: 1, b: 2} {a: 2, b: 1} {b: 3} In this case CorrelationFilter('a = 2 AND b = 2') won't match this entity but CorrelationFilter('a = 2 AND b = 1') will. To apply an uncorrelated filter on c, the filter must be applied in parallel to the correlation filter. For example: CompositeFilter(AND, [CorrelationFilter('a = 2 AND b = 1'), 'c = 3']) If 'c = 3' was included in the correlation filter, c would be grouped as well. This would result in the following values: {a: 1, b: 2, c: 3} {a: 2, b: 1} {b: 3} If any set of correlated values match the sub-filter then the entity matches the correlation filter. """ def __init__(self, subfilter): """Constructor. Args: subfilter: A FilterPredicate to apply to the correlated values """ self._subfilter = subfilter @property def subfilter(self): return self._subfilter def __repr__(self): return '%s(%r)' % (self.__class__.__name__, self.subfilter) def _apply(self, value_map): base_map = dict((prop, []) for prop in self._get_prop_names()) value_maps = [] for prop in base_map: grouped = self._group_values(prop, value_map[prop]) while len(value_maps) < len(grouped): value_maps.append(base_map.copy()) for value, m in six.moves.zip(grouped, value_maps): m[prop] = value return self._apply_correlated(value_maps) def _apply_correlated(self, value_maps): """Applies sub-filter to the correlated value maps. The default implementation matches when any value_map in value_maps matches the sub-filter. Args: value_maps: A list of correlated value_maps. Returns: True if any the entity matches the correlation filter. """ for map in value_maps: if self._subfilter._apply(map): return True return False def _group_values(self, prop, values): """A function that groups the given values. Override this function to introduce custom grouping logic. The default implementation assumes each value belongs in its own group. Args: prop: The name of the property who's values are being grouped. values: A list of opaque values. Returns: A list of lists of grouped values. """ return [[value] for value in values] def _get_prop_names(self): return self._subfilter._get_prop_names() class CompositeFilter(FilterPredicate): """An immutable filter predicate that combines other predicates. This class proactively merges sub-filters that are combined using the same operator. For example: CompositeFilter(AND, [f1, f2, CompositeFilter(AND, [f3, f4]), f5, f6]) is equivalent to: CompositeFilter(AND, [f1, f2, f3, f4, f5, f6]) Currently filters can only be combined using an AND operator. """ AND = 'and' _OPERATORS = frozenset([AND]) def __init__(self, op, filters): """Constructor. Args: op: The operator to use to combine the given filters filters: A list of one or more filters to combine Raises: datastore_errors.BadArgumentError if op is not in CompsiteFilter.OPERATORS or filters is not a non-empty list containing only FilterPredicates. """ if not op in self._OPERATORS: raise datastore_errors.BadArgumentError('unknown operator (%s)' % (op,)) if not filters or not isinstance(filters, (list, tuple)): raise datastore_errors.BadArgumentError( 'filters argument should be a non-empty list (%r)' % (filters,)) super(CompositeFilter, self).__init__() self._op = op flattened = [] for f in filters: if isinstance(f, CompositeFilter) and f._op == self._op: flattened.extend(f._filters) elif isinstance(f, FilterPredicate): flattened.append(f) else: raise datastore_errors.BadArgumentError( 'filters argument must be a list of FilterPredicates, found (%r)' % (f,)) if op == self.AND: filters = flattened flattened = [] ineq_map = {} for f in filters: if (isinstance(f, _PropertyRangeFilter) or (isinstance(f, PropertyFilter) and f._has_inequality())): name = f._get_prop_name() index = ineq_map.get(name) if index is not None: range_filter = flattened[index] flattened[index] = range_filter.intersect(f) else: if isinstance(f, PropertyFilter): range_filter = _PropertyRangeFilter.from_property_filter(f) else: range_filter = f ineq_map[name] = len(flattened) flattened.append(range_filter) else: flattened.append(f) self._filters = tuple(flattened) @property def op(self): return self._op @property def filters(self): return self._filters def __repr__(self): op = self.op if op == self.AND: op = 'AND' else: op = str(op) return '%s(%s, %r)' % (self.__class__.__name__, op, list(self.filters)) def _get_prop_names(self): names = set() for f in self._filters: names |= f._get_prop_names() return names def _apply(self, value_map): if self._op == self.AND: for f in self._filters: if not f._apply(value_map): return False return True raise NotImplementedError def _prune(self, value_map): if self._op == self.AND: matches = collections.defaultdict(set) for f in self._filters: props = f._get_prop_names() local_value_map = dict((k, v) for k, v in value_map.items() if k in props) if not f._prune(local_value_map): return False for (prop, values) in local_value_map.items(): matches[prop].update(values) for prop, value_set in matches.items(): value_map[prop] = sorted(value_set) return True raise NotImplementedError def _to_pbs(self): """Returns the internal only pb representation.""" pbs = [] for f in self._filters: pbs.extend(f._to_pbs()) return pbs def _to_pb_v1(self, adapter): """Returns a googledatastore.Filter. Args: adapter: A datastore_rpc.AbstractAdapter """ if not self._filters: return None if len(self._filters) == 1: return self._filters[0]._to_pb_v1(adapter) pb = googledatastore.Filter() comp_pb = pb.composite_filter if self.op == self.AND: comp_pb.op = googledatastore.CompositeFilter.AND else: raise datastore_errors.BadArgumentError( 'Datastore V4 only supports CompositeFilter with AND operator.') for f in self._filters: comp_pb.filters.add().CopyFrom(f._to_pb_v1(adapter)) return pb def __eq__(self, other): if self.__class__ is other.__class__: return super(CompositeFilter, self).__eq__(other) if len(self._filters) == 1: result = self._filters[0].__eq__(other) if result is NotImplemented and hasattr(other, '__eq__'): return other.__eq__(self._filters[0]) return result return NotImplemented class _IgnoreFilter(_SinglePropertyFilter): """A filter that removes all entities with the given keys.""" def __init__(self, key_value_set): super(_IgnoreFilter, self).__init__() self._keys = key_value_set def _get_prop_name(self): return datastore_types.KEY_SPECIAL_PROPERTY def _apply_to_value(self, value): return value not in self._keys class _DedupingFilter(_IgnoreFilter): """A filter that removes duplicate keys.""" def __init__(self, key_value_set=None): super(_DedupingFilter, self).__init__(key_value_set or set()) def _apply_to_value(self, value): if super(_DedupingFilter, self)._apply_to_value(value): self._keys.add(value) return True return False class Order(_PropertyComponent): """A base class that represents a sort order on a query. All sub-classes must be immutable as these are often stored without creating a defensive copying. This class can be used as either the cmp or key arg in sorted() or list.sort(). To provide a stable ordering a trailing key ascending order is always used. """ @datastore_rpc._positional(1) def reversed(self, group_by=None): """Constructs an order representing the reverse of the current order. This function takes into account the effects of orders on properties not in the group_by clause of a query. For example, consider: SELECT A, First(B) ... GROUP BY A ORDER BY A, B Changing the order of B would effect which value is listed in the 'First(B)' column which would actually change the results instead of just reversing them. Args: group_by: If specified, only orders on properties in group_by will be reversed. Returns: A new order representing the reverse direction. """ raise NotImplementedError def _key(self, lhs_value_map): """Creates a key for the given value map.""" raise NotImplementedError def _cmp(self, lhs_value_map, rhs_value_map): """Compares the given value maps.""" raise NotImplementedError def _to_pb(self): """Internal only function to generate a filter pb.""" raise NotImplementedError def _to_pb_v1(self, adapter): """Internal only function to generate a v1 filter pb. Args: adapter: A datastore_rpc.AbstractAdapter """ raise NotImplementedError def key_for_filter(self, filter_predicate): if filter_predicate: return lambda x: self.key(x, filter_predicate) return self.key def cmp_for_filter(self, filter_predicate): if filter_predicate: return lambda x, y: self.cmp(x, y, filter_predicate) return self.cmp def key(self, entity, filter_predicate=None): """Constructs a "key" value for the given entity based on the current order. This function can be used as the key argument for list.sort() and sorted(). Args: entity: The entity_pb2.EntityProto to convert filter_predicate: A FilterPredicate used to prune values before comparing entities or None. Returns: A key value that identifies the position of the entity when sorted by the current order. """ names = self._get_prop_names() names.add(datastore_types.KEY_SPECIAL_PROPERTY) if filter_predicate is not None: names |= filter_predicate._get_prop_names() value_map = _make_key_value_map(entity, names) if filter_predicate is not None: filter_predicate._prune(value_map) return (self._key(value_map), value_map[datastore_types.KEY_SPECIAL_PROPERTY]) def cmp(self, lhs, rhs, filter_predicate=None): """Compares the given values taking into account any filters. This function can be used as the cmp argument for list.sort() and sorted(). This function is slightly more efficient that Order.key when comparing two entities, however it is much less efficient when sorting a list of entities. Args: lhs: An entity_pb2.EntityProto rhs: An entity_pb2.EntityProto filter_predicate: A FilterPredicate used to prune values before comparing entities or None. Returns: An integer <, = or > 0 representing the operator that goes in between lhs and rhs that to create a true statement. """ names = self._get_prop_names() if filter_predicate is not None: names |= filter_predicate._get_prop_names() lhs_value_map = _make_key_value_map(lhs, names) rhs_value_map = _make_key_value_map(rhs, names) if filter_predicate is not None: filter_predicate._prune(lhs_value_map) filter_predicate._prune(rhs_value_map) result = self._cmp(lhs_value_map, rhs_value_map) if result: return result if not lhs.HasField('key') and not rhs.HasField('key'): return 0 lhs_key = (lhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or datastore_types.ReferenceToKeyValue(lhs.key)) rhs_key = (rhs_value_map.get(datastore_types.KEY_SPECIAL_PROPERTY) or datastore_types.ReferenceToKeyValue(rhs.key)) return cmp_compat.cmp(lhs_key, rhs_key) @cmp_compat.total_ordering_from_cmp class _ReverseOrder(_BaseComponent): """Reverses the comparison for the given object.""" def __init__(self, obj): """Constructor for _ReverseOrder. Args: obj: Any comparable and hashable object. """ super(_ReverseOrder, self).__init__() self._obj = obj def __hash__(self): return hash(self._obj) def __cmp__(self, other): assert self.__class__ == other.__class__, ( 'A datastore_query._ReverseOrder object can only be compared to ' 'an object of the same type.') return -cmp_compat.cmp(self._obj, other._obj) class PropertyOrder(Order): """An immutable class that represents a sort order for a single property.""" ASCENDING = datastore_pb.Query.Order.ASCENDING DESCENDING = datastore_pb.Query.Order.DESCENDING _DIRECTIONS = frozenset([ASCENDING, DESCENDING]) def __init__(self, prop, direction=ASCENDING): """Constructor. Args: prop: the name of the prop by which to sort. direction: the direction in which to sort the given prop. Raises: datastore_errors.BadArgumentError if the prop name or direction is invalid. """ datastore_types.ValidateString(prop, 'prop', datastore_errors.BadArgumentError) if not direction in self._DIRECTIONS: raise datastore_errors.BadArgumentError('unknown direction: %r' % (direction,)) super(PropertyOrder, self).__init__() self.__order = datastore_pb.Query.Order() self.__order.property = six.ensure_binary(prop, 'utf-8') self.__order.direction = direction @property def prop(self): return self.__order.property @property def direction(self): return self.__order.direction def __repr__(self): extra = '' if self.direction == self.DESCENDING: extra = ', DESCENDING' name = repr(six.ensure_str(self.prop))[1:-1] return '%s(<%s>%s)' % (self.__class__.__name__, name, extra) @datastore_rpc._positional(1) def reversed(self, group_by=None): if group_by and self.__order.property not in group_by: return self if self.__order.direction == self.ASCENDING: return PropertyOrder( six.ensure_text(self.__order.property), self.DESCENDING) else: return PropertyOrder( six.ensure_text(self.__order.property), self.ASCENDING) def _get_prop_names(self): return set([self.__order.property]) def _key(self, lhs_value_map): lhs_values = lhs_value_map[self.__order.property] if not lhs_values: raise datastore_errors.BadArgumentError( 'Missing value for property (%s)' % self.__order.property) if self.__order.direction == self.ASCENDING: return min(lhs_values) else: return _ReverseOrder(max(lhs_values)) def _cmp(self, lhs_value_map, rhs_value_map): lhs_values = lhs_value_map[self.__order.property] rhs_values = rhs_value_map[self.__order.property] if not lhs_values and not rhs_values: return 0 if not lhs_values: raise datastore_errors.BadArgumentError( 'LHS missing value for property (%s)' % self.__order.property) if not rhs_values: raise datastore_errors.BadArgumentError( 'RHS missing value for property (%s)' % self.__order.property) if self.__order.direction == self.ASCENDING: return cmp_compat.cmp(min(lhs_values), min(rhs_values)) else: return cmp_compat.cmp(max(rhs_values), max(lhs_values)) @classmethod def _from_pb(cls, order_pb): self = cls.__new__(cls) self.__order = order_pb return self def _to_pb(self): """Returns the internal only pb representation.""" return self.__order def _to_pb_v1(self, adapter): """Returns a googledatastore.PropertyOrder representation of the order. Args: adapter: A datastore_rpc.AbstractAdapter. """ v1_order = googledatastore.PropertyOrder() adapter.get_query_converter().v3_order_to_v1_order(self.__order, v1_order) return v1_order def __getstate__(self): raise pickle.PicklingError( 'Pickling of datastore_query.PropertyOrder is unsupported.') class CompositeOrder(Order): """An immutable class that represents a sequence of Orders. This class proactively flattens sub-orders that are of type CompositeOrder. For example: CompositeOrder([O1, CompositeOrder([02, 03]), O4]) is equivalent to: CompositeOrder([O1, 02, 03, O4]) """ def __init__(self, orders): """Constructor. Args: orders: A list of Orders which are applied in order. """ if not isinstance(orders, (list, tuple)): raise datastore_errors.BadArgumentError( 'orders argument should be list or tuple (%r)' % (orders,)) super(CompositeOrder, self).__init__() flattened = [] for order in orders: if isinstance(order, CompositeOrder): flattened.extend(order._orders) elif isinstance(order, Order): flattened.append(order) else: raise datastore_errors.BadArgumentError( 'orders argument should only contain Order (%r)' % (order,)) self._orders = tuple(flattened) @property def orders(self): return self._orders def __repr__(self): return '%s(%r)' % (self.__class__.__name__, list(self.orders)) @datastore_rpc._positional(1) def reversed(self, group_by=None): return CompositeOrder([order.reversed(group_by=group_by) for order in self._orders]) def _get_prop_names(self): names = set() for order in self._orders: names |= order._get_prop_names() return names def _key(self, lhs_value_map): result = [] for order in self._orders: result.append(order._key(lhs_value_map)) return tuple(result) def _cmp(self, lhs_value_map, rhs_value_map): for order in self._orders: result = order._cmp(lhs_value_map, rhs_value_map) if result != 0: return result return 0 def size(self): """Returns the number of sub-orders the instance contains.""" return len(self._orders) def _to_pbs(self): """Returns an ordered list of internal only pb representations.""" return [order._to_pb() for order in self._orders] def _to_pb_v1(self, adapter): """Returns an ordered list of googledatastore.PropertyOrder. Args: adapter: A datastore_rpc.AbstractAdapter """ return [order._to_pb_v1(adapter) for order in self._orders] def __eq__(self, other): if self.__class__ is other.__class__: return super(CompositeOrder, self).__eq__(other) if len(self._orders) == 1: result = self._orders[0].__eq__(other) if result is NotImplemented and hasattr(other, '__eq__'): return other.__eq__(self._orders[0]) return result return NotImplemented class FetchOptions(datastore_rpc.Configuration): """An immutable class that contains all options for fetching results. These options apply to any request that pulls results from a query. This class reserves the right to define configuration options of any name except those that start with 'user_'. External subclasses should only define function or variables with names that start with in 'user_'. Options are set by passing keyword arguments to the constructor corresponding to the configuration options defined below and in datastore_rpc.Configuration. This object can be used as the default config for a datastore_rpc.Connection but in that case some options will be ignored, see option documentation below for details. """ @datastore_rpc.ConfigOption def produce_cursors(value): """If a Cursor should be returned with the fetched results. Raises: datastore_errors.BadArgumentError if value is not a bool. """ if not isinstance(value, bool): raise datastore_errors.BadArgumentError( 'produce_cursors argument should be bool (%r)' % (value,)) return value @datastore_rpc.ConfigOption def offset(value): """The number of results to skip before returning the first result. Only applies to the first request it is used with and is ignored if present on datastore_rpc.Connection.config. Raises: datastore_errors.BadArgumentError if value is not a integer or is less than zero. """ datastore_types.ValidateInteger(value, 'offset', datastore_errors.BadArgumentError, zero_ok=True) return value @datastore_rpc.ConfigOption def batch_size(value): """The number of results to attempt to retrieve in a batch. Raises: datastore_errors.BadArgumentError if value is not a integer or is not greater than zero. """ datastore_types.ValidateInteger(value, 'batch_size', datastore_errors.BadArgumentError) return value class QueryOptions(FetchOptions): """An immutable class that contains all options for running a query. This class contains options that control execution process (deadline, batch_size, read_policy, etc) and what part of the query results are returned (keys_only, projection, offset, limit, etc) Options that control the contents of the query results are specified on the datastore_query.Query directly. This class reserves the right to define configuration options of any name except those that start with 'user_'. External subclasses should only define function or variables with names that start with in 'user_'. Options are set by passing keyword arguments to the constructor corresponding to the configuration options defined below and in FetchOptions and datastore_rpc.Configuration. This object can be used as the default config for a datastore_rpc.Connection but in that case some options will be ignored, see below for details. """ ORDER_FIRST = datastore_pb.Query.ORDER_FIRST ANCESTOR_FIRST = datastore_pb.Query.ANCESTOR_FIRST FILTER_FIRST = datastore_pb.Query.FILTER_FIRST _HINTS = frozenset([ORDER_FIRST, ANCESTOR_FIRST, FILTER_FIRST]) @datastore_rpc.ConfigOption def keys_only(value): """If the query should only return keys. Raises: datastore_errors.BadArgumentError if value is not a bool. """ if not isinstance(value, bool): raise datastore_errors.BadArgumentError( 'keys_only argument should be bool (%r)' % (value,)) return value @datastore_rpc.ConfigOption def projection(value): """A list or tuple of property names to project. If None, the entire entity is returned. Specifying a projection: - may change the index requirements for the given query; - will cause a partial entity to be returned; - will cause only entities that contain those properties to be returned; A partial entities only contain the property name and value for properties in the projection (meaning and multiple will not be set). They will also only contain a single value for any multi-valued property. However, if a multi-valued property is specified in the order, an inequality property, or the projected properties, the entity will be returned multiple times. Once for each unique combination of values. However, projection queries are significantly faster than normal queries. Raises: datastore_errors.BadArgumentError if value is empty or not a list or tuple of strings. """ if isinstance(value, list): value = tuple(value) elif not isinstance(value, tuple): raise datastore_errors.BadArgumentError( 'projection argument should be a list or tuple (%r)' % (value,)) if not value: raise datastore_errors.BadArgumentError( 'projection argument cannot be empty') for prop in value: if not isinstance(prop, six.string_types + (six.binary_type,)): raise datastore_errors.BadArgumentError( 'projection argument should contain only strings (%r)' % (prop,)) return value @datastore_rpc.ConfigOption def limit(value): """Limit on the number of results to return. Raises: datastore_errors.BadArgumentError if value is not an integer or is less than zero. """ datastore_types.ValidateInteger(value, 'limit', datastore_errors.BadArgumentError, zero_ok=True) return value @datastore_rpc.ConfigOption def prefetch_size(value): """Number of results to attempt to return on the initial request. Raises: datastore_errors.BadArgumentError if value is not an integer or is not greater than zero. """ datastore_types.ValidateInteger(value, 'prefetch_size', datastore_errors.BadArgumentError, zero_ok=True) return value @datastore_rpc.ConfigOption def start_cursor(value): """Cursor to use a start position. Ignored if present on datastore_rpc.Connection.config. Raises: datastore_errors.BadArgumentError if value is not a Cursor. """ if not isinstance(value, Cursor): raise datastore_errors.BadArgumentError( 'start_cursor argument should be datastore_query.Cursor (%r)' % (value,)) return value @datastore_rpc.ConfigOption def end_cursor(value): """Cursor to use as an end position. Ignored if present on datastore_rpc.Connection.config. Raises: datastore_errors.BadArgumentError if value is not a Cursor. """ if not isinstance(value, Cursor): raise datastore_errors.BadArgumentError( 'end_cursor argument should be datastore_query.Cursor (%r)' % (value,)) return value @datastore_rpc.ConfigOption def hint(value): """Hint on how the datastore should plan the query. Raises: datastore_errors.BadArgumentError if value is not a known hint. """ if value not in QueryOptions._HINTS: raise datastore_errors.BadArgumentError('Unknown query hint (%r)' % (value,)) return value class Cursor(_BaseComponent): """An immutable class that represents a relative position in a query. The position denoted by a Cursor is relative to a result in a query even if the result has been removed from the given query. Usually to position immediately after the last result returned by a batch. A cursor should only be used on a query with an identical signature to the one that produced it or on a query with its sort order reversed. """ @datastore_rpc._positional(1) def __init__(self, urlsafe=None, _cursor_bytes=None): """Constructor. A Cursor constructed with no arguments points the first result of any query. If such a Cursor is used as an end_cursor no results will ever be returned. """ super(Cursor, self).__init__() if urlsafe is not None: if _cursor_bytes is not None: raise datastore_errors.BadArgumentError( 'Can only specify one of urlsafe and _cursor_bytes') _cursor_bytes = self._urlsafe_to_bytes(urlsafe) if _cursor_bytes is not None: self.__cursor_bytes = _cursor_bytes else: self.__cursor_bytes = six.binary_type() def __repr__(self): arg = six.ensure_str(self.to_websafe_string()) if arg: arg = '<%s>' % arg return '%s(%s)' % (self.__class__.__name__, arg) def reversed(self): """DEPRECATED. It is no longer necessary to call reversed() on cursors. A cursor returned by a query may also be used in a query whose sort order has been reversed. This method returns a copy of the original cursor. """ return Cursor(_cursor_bytes=self.__cursor_bytes) def to_bytes(self): """Serialize cursor as a byte string.""" return self.__cursor_bytes @staticmethod def from_bytes(cursor): """Gets a Cursor given its byte string serialized form. The serialized form of a cursor may change in a non-backwards compatible way. In this case cursors must be regenerated from a new Query request. Args: cursor: A serialized cursor as returned by .to_bytes. Returns: A Cursor. Raises: datastore_errors.BadValueError if the cursor argument does not represent a serialized cursor. """ return Cursor(_cursor_bytes=cursor) def urlsafe(self): """Serialize cursor as a websafe string. Returns: A base64-encoded serialized cursor. """ return base64.urlsafe_b64encode(self.to_bytes()) to_websafe_string = urlsafe @staticmethod def from_websafe_string(cursor): """Gets a Cursor given its websafe serialized form. The serialized form of a cursor may change in a non-backwards compatible way. In this case cursors must be regenerated from a new Query request. Args: cursor: A serialized cursor as returned by .to_websafe_string. Returns: A Cursor. Raises: datastore_errors.BadValueError if the cursor argument is not a string type of does not represent a serialized cursor. """ decoded_bytes = Cursor._urlsafe_to_bytes(cursor) return Cursor.from_bytes(decoded_bytes) @staticmethod def _urlsafe_to_bytes(cursor): if not isinstance(cursor, six.string_types + (six.binary_type,)): raise datastore_errors.BadValueError( 'cursor argument should be str or unicode (%r)' % (cursor,)) try: decoded_bytes = base64.urlsafe_b64decode( six.ensure_binary(cursor, 'ascii')) except (ValueError, TypeError) as e: raise datastore_errors.BadValueError( 'Invalid cursor %s. Details: %s' % (cursor, e)) return decoded_bytes def advance(self, offset, query, conn): """Advances a Cursor by the given offset. Args: offset: The amount to advance the current query. query: A Query identical to the one this cursor was created from. conn: The datastore_rpc.Connection to use. Returns: A new cursor that is advanced by offset using the given query. """ datastore_types.ValidateInteger(offset, 'offset', datastore_errors.BadArgumentError) if not isinstance(query, Query): raise datastore_errors.BadArgumentError( 'query argument should be datastore_query.Query (%r)' % (query,)) query_options = QueryOptions( start_cursor=self, offset=offset, limit=0, produce_cursors=True) return query.run(conn, query_options).next_batch( Batcher.AT_LEAST_OFFSET).cursor(0) def __setstate__(self, state): if '_Cursor__compiled_cursor' in state: self.__cursor_bytes = state['_Cursor__compiled_cursor'].SerializeToString() else: self.__dict__ = state class _QueryKeyFilter(_BaseComponent): """A class that implements the key filters available on a Query.""" @datastore_rpc._positional(1) def __init__(self, app=None, namespace=None, kind=None, ancestor=None): """Constructs a _QueryKeyFilter. If app/namespace and ancestor are not defined, the app/namespace set in the environment is used. Args: app: a string representing the required app id or None. namespace: a string representing the required namespace or None. kind: a string representing the required kind or None. ancestor: a entity_pb2.Reference representing the required ancestor or None. Raises: datastore_erros.BadArgumentError if app and ancestor.app() do not match or an unexpected type is passed in for any argument. """ if kind is not None: datastore_types.ValidateString( kind, 'kind', datastore_errors.BadArgumentError) if ancestor is not None: if not isinstance(ancestor, entity_pb2.Reference): raise datastore_errors.BadArgumentError( 'ancestor argument should be entity_pb2.Reference (%r)' % (ancestor,)) ancestor_app = six.ensure_binary(ancestor.app) if app is None: app = ancestor_app elif six.ensure_binary(app) != ancestor_app: raise datastore_errors.BadArgumentError( 'ancestor argument should match app ("%r" != "%r")' % (ancestor.app, app)) ancestor_namespace = six.ensure_binary(ancestor.name_space) if namespace is None: namespace = ancestor_namespace elif six.ensure_binary(namespace) != ancestor_namespace: raise datastore_errors.BadArgumentError( 'ancestor argument should match namespace ("%r" != "%r")' % (six.ensure_binary(namespace), ancestor_namespace)) pb = entity_pb2.Reference() pb.CopyFrom(ancestor) ancestor = pb self.__ancestor = ancestor self.__path = list(ancestor.path.element) else: self.__ancestor = None self.__path = None super(_QueryKeyFilter, self).__init__() self.__app = six.ensure_text(datastore_types.ResolveAppId(app), 'utf-8') self.__namespace = ( six.ensure_text(datastore_types.ResolveNamespace(namespace), 'utf-8')) self.__kind = kind @property def app(self): return self.__app @property def namespace(self): return self.__namespace @property def kind(self): return self.__kind @property def ancestor(self): return self.__ancestor def __call__(self, entity_or_reference): """Apply the filter. Accepts either an entity or a reference to avoid the need to extract keys from entities when we have a list of entities (which is a common case). Args: entity_or_reference: Either an entity_pb2.EntityProto or entity_pb2.Reference. """ if isinstance(entity_or_reference, entity_pb2.Reference): key = entity_or_reference elif isinstance(entity_or_reference, entity_pb2.EntityProto): key = entity_or_reference.key else: raise datastore_errors.BadArgumentError( 'entity_or_reference argument must be an entity_pb2.EntityProto ' + six.ensure_str('or entity_pb2.Reference (%r)' % (entity_or_reference), 'utf-8')) return (six.ensure_text(key.app, 'utf-8') == self.__app and six.ensure_text(key.name_space, 'utf-8') == self.__namespace and (not self.__kind or key.path.element[-1].type == self.__kind) and (not self.__path or key.path.element[0:len(self.__path)] == self.__path)) def _to_pb(self): """Returns an internal pb representation.""" pb = datastore_pb.Query() pb.app = self.__app datastore_types.SetNamespace(pb, self.__namespace) if self.__kind is not None: pb.kind = self.__kind if self.__ancestor: ancestor = pb.ancestor ancestor.CopyFrom(self.__ancestor) return pb def _to_pb_v1(self, adapter): """Returns a v1 internal proto representation of the query key filter. Args: adapter: A datastore_rpc.AbstractAdapter. Returns: A tuple (googledatastore.RunQueryRequest, googledatastore.Filter). The second tuple value is a Filter representing the ancestor portion of the query. If there is no ancestor constraint, this value will be None """ pb = googledatastore.RunQueryRequest() partition_id = pb.partition_id partition_id.project_id = ( adapter.get_entity_converter().app_to_project_id(self.__app)) if self.__namespace: partition_id.namespace_id = self.__namespace if self.__kind is not None: pb.query.kind.add().name = self.__kind ancestor_filter = None if self.__ancestor: ancestor_filter = googledatastore.Filter() ancestor_prop_filter = ancestor_filter.property_filter ancestor_prop_filter.op = ( googledatastore.PropertyFilter.HAS_ANCESTOR) prop_pb = ancestor_prop_filter.property prop_pb.name = datastore_types.KEY_SPECIAL_PROPERTY adapter.get_entity_converter().v3_to_v1_key( self.ancestor, ancestor_prop_filter.value.key_value) return pb, ancestor_filter class _BaseQuery(_BaseComponent): """A base class for query implementations.""" def run(self, conn, query_options=None): """Runs the query using provided datastore_rpc.Connection. Args: conn: The datastore_rpc.Connection to use query_options: Optional query options to use Returns: A Batcher that implicitly fetches query results asynchronously. Raises: datastore_errors.BadArgumentError if any of the arguments are invalid. """ return Batcher(query_options, self.run_async(conn, query_options)) def run_async(self, conn, query_options=None): """Runs the query using the provided datastore_rpc.Connection. Args: conn: the datastore_rpc.Connection on which to run the query. query_options: Optional QueryOptions with which to run the query. Returns: An async object that can be used to grab the first Batch. Additional batches can be retrieved by calling Batch.next_batch/next_batch_async. Raises: datastore_errors.BadArgumentError if any of the arguments are invalid. """ raise NotImplementedError def __getstate__(self): raise pickle.PicklingError( 'Pickling of %r is unsupported.' % self) class Query(_BaseQuery): """An immutable class that represents a query signature. A query signature consists of a source of entities (specified as app, namespace and optionally kind and ancestor) as well as a FilterPredicate, grouping and a desired ordering. """ @datastore_rpc._positional(1) def __init__(self, app=None, namespace=None, kind=None, ancestor=None, filter_predicate=None, group_by=None, order=None, read_time_us=None): """Constructor. Args: app: Optional app to query, derived from the environment if not specified. namespace: Optional namespace to query, derived from the environment if not specified. kind: Optional kind to query. ancestor: Optional ancestor to query, an entity_pb2.Reference. filter_predicate: Optional FilterPredicate by which to restrict the query. group_by: Optional list of properties to group the results by. order: Optional Order in which to return results. read_time_us: Optional timestamp to read the storage from. Internal use only. Raises: datastore_errors.BadArgumentError if any argument is invalid. """ super(Query, self).__init__() if filter_predicate is not None and not isinstance(filter_predicate, FilterPredicate): raise datastore_errors.BadArgumentError( 'filter_predicate should be datastore_query.FilterPredicate (%r)' % (filter_predicate,)) if isinstance(order, CompositeOrder): if order.size() == 0: order = None elif isinstance(order, Order): order = CompositeOrder([order]) elif order is not None: raise datastore_errors.BadArgumentError( 'order should be Order (%r)' % (order,)) if group_by is not None: if isinstance(group_by, list): group_by = tuple(group_by) elif not isinstance(group_by, tuple): raise datastore_errors.BadArgumentError( 'group_by argument should be a list or tuple (%r)' % (group_by,)) if not group_by: raise datastore_errors.BadArgumentError( 'group_by argument cannot be empty') for prop in group_by: if not isinstance(prop, six.string_types + (six.binary_type,)): raise datastore_errors.BadArgumentError( 'group_by argument should contain only strings (%r)' % (prop,)) self._key_filter = _QueryKeyFilter(app=app, namespace=namespace, kind=kind, ancestor=ancestor) self._order = order self._filter_predicate = filter_predicate self._group_by = group_by self._read_time_us = read_time_us @property def app(self): return self._key_filter.app @property def namespace(self): return self._key_filter.namespace @property def kind(self): return self._key_filter.kind @property def ancestor(self): return self._key_filter.ancestor @property def filter_predicate(self): return self._filter_predicate @property def order(self): return self._order @property def group_by(self): return self._group_by @property def read_time_us(self): return self._read_time_us def __repr__(self): args = [] args.append('app=%r' % six.ensure_str(self.app)) ns = self.namespace if ns: args.append('namespace=%r' % six.ensure_str(ns)) kind = self.kind if kind is not None: args.append('kind=%r' % six.ensure_str(kind)) ancestor = self.ancestor if ancestor is not None: websafe = base64.urlsafe_b64encode(ancestor.SerializeToString()) args.append('ancestor=<%s>' % six.ensure_str(websafe)) filter_predicate = self.filter_predicate if filter_predicate is not None: args.append('filter_predicate=%r' % filter_predicate) order = self.order if order is not None: args.append('order=%r' % order) group_by = self.group_by if group_by is not None: args.append('group_by=%r' % (tuple(six.ensure_str(x) for x in group_by),)) read_time_us = self.read_time_us if read_time_us is not None: args.append('read_time_us=%r' % (read_time_us,)) return '%s(%s)' % (self.__class__.__name__, ', '.join(args)) def run_async(self, conn, query_options=None): if not isinstance(conn, datastore_rpc.BaseConnection): raise datastore_errors.BadArgumentError( 'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,)) if not QueryOptions.is_configuration(query_options): query_options = QueryOptions(config=query_options) start_cursor = query_options.start_cursor if not start_cursor and query_options.produce_cursors: start_cursor = Cursor() if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1: req = self._to_pb_v1(conn, query_options) else: req = self._to_pb(conn, query_options) return Batch.create_async(self, query_options, conn, req, start_cursor=start_cursor) @classmethod def _from_pb(cls, query_pb): kind = query_pb.HasField('kind') and query_pb.kind or None ancestor = query_pb.HasField('ancestor') and query_pb.ancestor or None filter_predicate = None if query_pb.filter: filter_predicate = CompositeFilter( CompositeFilter.AND, [PropertyFilter._from_pb(filter_pb) for filter_pb in query_pb.filter]) order = None if query_pb.order: order = CompositeOrder( [PropertyOrder._from_pb(order_pb) for order_pb in query_pb.order]) group_by = None if query_pb.group_by_property_name: group_by = tuple( six.ensure_text(name) for name in query_pb.group_by_property_name) read_time_us = None if query_pb.HasField('read_time_us'): read_time_us = query_pb.read_time_us return Query( app=query_pb.app, namespace=query_pb.name_space, kind=kind, ancestor=ancestor, filter_predicate=filter_predicate, order=order, group_by=group_by, read_time_us=read_time_us) def _to_pb_v1(self, conn, query_options): """Returns a googledatastore.RunQueryRequest.""" v1_req, v1_ancestor_filter = self._key_filter._to_pb_v1(conn.adapter) v1_query = v1_req.query if self.filter_predicate: filter_predicate_pb = self._filter_predicate._to_pb_v1(conn.adapter) if self.filter_predicate and v1_ancestor_filter: comp_filter_pb = v1_query.filter.composite_filter comp_filter_pb.op = googledatastore.CompositeFilter.AND comp_filter_pb.filters.add().CopyFrom(filter_predicate_pb) comp_filter_pb.filters.add().CopyFrom(v1_ancestor_filter) elif self.filter_predicate: v1_query.filter.CopyFrom(filter_predicate_pb) elif v1_ancestor_filter: v1_query.filter.CopyFrom(v1_ancestor_filter) if self._order: for order in self._order._to_pb_v1(conn.adapter): v1_query.order.add().CopyFrom(order) if QueryOptions.keys_only(query_options, conn.config): prop_ref_pb = v1_query.projection.add().property prop_ref_pb.name = datastore_pbs.PROPERTY_NAME_KEY projection = QueryOptions.projection(query_options, conn.config) self._validate_projection_and_group_by(projection, self._group_by) if projection: for prop in projection: prop_ref_pb = v1_query.projection.add().property prop_ref_pb.name = prop if self._group_by: for group_by in self._group_by: v1_query.distinct_on.add().name = group_by limit = QueryOptions.limit(query_options, conn.config) if limit is not None: v1_query.limit.value = limit count = QueryOptions.batch_size(query_options, conn.config) if count is None: count = QueryOptions.prefetch_size(query_options, conn.config) if count is not None: pass if query_options.offset: v1_query.offset = query_options.offset if query_options.start_cursor is not None: v1_query.start_cursor = query_options.start_cursor.to_bytes() if query_options.end_cursor is not None: v1_query.end_cursor = query_options.end_cursor.to_bytes() conn._set_request_read_policy(v1_req, query_options) conn._set_request_transaction(v1_req) return v1_req def _to_pb(self, conn, query_options): """Returns the internal only pb representation.""" pb = self._key_filter._to_pb() if self._filter_predicate: for f in self._filter_predicate._to_pbs(): pb.filter.add().CopyFrom(f) if self._order: for order in self._order._to_pbs(): pb.order.add().CopyFrom(order) if QueryOptions.keys_only(query_options, conn.config): pb.keys_only = True projection = QueryOptions.projection(query_options, conn.config) self._validate_projection_and_group_by(projection, self._group_by) if projection: pb.property_name.extend(projection) if self._group_by: pb.group_by_property_name.extend(self._group_by) if QueryOptions.produce_cursors(query_options, conn.config): pb.compile = True limit = QueryOptions.limit(query_options, conn.config) if limit is not None: pb.limit = limit count = QueryOptions.prefetch_size(query_options, conn.config) if count is None: count = QueryOptions.batch_size(query_options, conn.config) if count is not None: pb.count = count if query_options.offset: pb.offset = query_options.offset if query_options.start_cursor is not None: try: pb.compiled_cursor.ParseFromString( query_options.start_cursor.to_bytes()) except message.DecodeError: raise datastore_errors.BadValueError('invalid cursor') if query_options.end_cursor is not None: try: pb.end_compiled_cursor.ParseFromString( query_options.end_cursor.to_bytes()) except message.DecodeError: raise datastore_errors.BadValueError('invalid cursor') if ((query_options.hint == QueryOptions.ORDER_FIRST and len(pb.order)) or (query_options.hint == QueryOptions.ANCESTOR_FIRST and pb.HasField('ancestor')) or (query_options.hint == QueryOptions.FILTER_FIRST and pb.filter)): pb.hint = query_options.hint if self.read_time_us is not None: pb.read_time_us = self.read_time_us conn._set_request_read_policy(pb, query_options) conn._set_request_transaction(pb) return pb def _validate_projection_and_group_by(self, projection, group_by): """Validates that a query's projection and group by match. Args: projection: A set of string property names in the projection. group_by: A set of string property names in the group by. Raises: datastore_errors.BadRequestError: if the projection and group by sets are not equal. """ if projection: if group_by: extra = set(projection) - set(group_by) if extra: raise datastore_errors.BadRequestError( 'projections includes properties not in the group_by argument: %s' % extra) elif group_by: raise datastore_errors.BadRequestError( 'cannot specify group_by without a projection') def apply_query(query, entities, _key=None): """Performs the given query on a set of in-memory results. This function can perform queries impossible in the datastore (e.g a query with multiple inequality filters on different properties) because all operations are done in memory. For queries that can also be executed on the the datastore, the results produced by this function may not use the same implicit ordering as the datastore. To ensure compatibility, explicit ordering must be used (e.g. 'ORDER BY ineq_prop, ..., __key__'). Order by __key__ should always be used when a consistent result is desired (unless there is a sort order on another globally unique property). Args: query: a datastore_query.Query to apply entities: a list of results, of arbitrary type, on which to apply the query. _key: a function that takes an element of the result array as an argument and must return an entity_pb2.EntityProto. If not specified, the identity function is used (and entities must be a list of entity_pb2.EntityProto). Returns: A subset of entities, filtered and ordered according to the query. """ if not isinstance(query, Query): raise datastore_errors.BadArgumentError( 'query argument must be a datastore_query.Query (%r)' % (query,)) if not isinstance(entities, list): raise datastore_errors.BadArgumentError( 'entities argument must be a list (%r)' % (entities,)) key = _key or (lambda x: x) filtered_results = [r for r in entities if query._key_filter(key(r))] if not query._order: if query._filter_predicate: return [r for r in filtered_results if query._filter_predicate(key(r))] return filtered_results names = query._order._get_prop_names() if query._filter_predicate: names |= query._filter_predicate._get_prop_names() exists_filter = _PropertyExistsFilter(names) value_maps = [] for result in filtered_results: value_map = _make_key_value_map(key(result), names) if exists_filter._apply(value_map) and ( not query._filter_predicate or query._filter_predicate._prune(value_map)): value_map['__result__'] = result value_maps.append(value_map) value_maps.sort(key=functools.cmp_to_key(query._order._cmp)) return [value_map['__result__'] for value_map in value_maps] class _AugmentedQuery(_BaseQuery): """A query that combines a datastore query with in-memory filters/results.""" @datastore_rpc._positional(2) def __init__(self, query, in_memory_results=None, in_memory_filter=None, max_filtered_count=None): """Constructor for _AugmentedQuery. Do not call directly. Use the utility functions instead (e.g. datastore_query.inject_results) Args: query: A datastore_query.Query object to augment. in_memory_results: a list of pre- sorted and filtered result to add to the stream of datastore results or None . in_memory_filter: a set of in-memory filters to apply to the datastore results or None. max_filtered_count: the maximum number of datastore entities that will be filtered out by in_memory_filter if known. """ if not isinstance(query, Query): raise datastore_errors.BadArgumentError( 'query argument should be datastore_query.Query (%r)' % (query,)) if (in_memory_filter is not None and not isinstance(in_memory_filter, FilterPredicate)): raise datastore_errors.BadArgumentError( 'in_memory_filter argument should be ' + six.ensure_str( 'datastore_query.FilterPredicate (%r)' % (in_memory_filter,), 'utf-8')) if (in_memory_results is not None and not isinstance(in_memory_results, list)): raise datastore_errors.BadArgumentError( 'in_memory_results argument should be a list of' + six.ensure_str('datastore_pv.EntityProto (%r)' % (in_memory_results,), 'utf-8')) datastore_types.ValidateInteger(max_filtered_count, 'max_filtered_count', empty_ok=True, zero_ok=True) self._query = query self._max_filtered_count = max_filtered_count self._in_memory_filter = in_memory_filter self._in_memory_results = in_memory_results @property def app(self): return self._query._key_filter.app @property def namespace(self): return self._query._key_filter.namespace @property def kind(self): return self._query._key_filter.kind @property def ancestor(self): return self._query._key_filter.ancestor @property def filter_predicate(self): return self._query._filter_predicate @property def order(self): return self._query._order @property def group_by(self): return self._query._group_by def run_async(self, conn, query_options=None): if not isinstance(conn, datastore_rpc.BaseConnection): raise datastore_errors.BadArgumentError( 'conn should be a datastore_rpc.BaseConnection (%r)' % (conn,)) if not QueryOptions.is_configuration(query_options): query_options = QueryOptions(config=query_options) if self._query._order: changes = {'keys_only': False} else: changes = {} if self._in_memory_filter or self._in_memory_results: in_memory_offset = query_options.offset in_memory_limit = query_options.limit if in_memory_limit is not None: if self._in_memory_filter is None: changes['limit'] = in_memory_limit elif self._max_filtered_count is not None: changes['limit'] = in_memory_limit + self._max_filtered_count else: changes['limit'] = None if in_memory_offset: changes['offset'] = None if changes.get('limit', None) is not None: changes['limit'] += in_memory_offset else: in_memory_offset = None else: in_memory_offset = None in_memory_limit = None modified_query_options = QueryOptions(config=query_options, **changes) if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1: req = self._query._to_pb_v1(conn, modified_query_options) else: req = self._query._to_pb(conn, modified_query_options) start_cursor = query_options.start_cursor if not start_cursor and query_options.produce_cursors: start_cursor = Cursor() return _AugmentedBatch.create_async(self, modified_query_options, conn, req, in_memory_offset=in_memory_offset, in_memory_limit=in_memory_limit, start_cursor=start_cursor) @datastore_rpc._positional(1) def inject_results(query, updated_entities=None, deleted_keys=None): """Creates a query object that will inject changes into results. Args: query: The datastore_query.Query to augment updated_entities: A list of entity_pb2.EntityProto's that have been updated and should take priority over any values returned by query. deleted_keys: A list of entity_pb2.Reference's for entities that have been deleted and should be removed from query results. Returns: A datastore_query.AugmentedQuery if in memory filtering is required, query otherwise. """ if not isinstance(query, Query): raise datastore_errors.BadArgumentError( 'query argument should be datastore_query.Query (%r)' % (query,)) overridden_keys = set() if deleted_keys is not None: if not isinstance(deleted_keys, list): raise datastore_errors.BadArgumentError( 'deleted_keys argument must be a list (%r)' % (deleted_keys,)) deleted_keys = list(six.moves.filter(query._key_filter, deleted_keys)) for key in deleted_keys: overridden_keys.add(datastore_types.ReferenceToKeyValue(key)) if updated_entities is not None: if not isinstance(updated_entities, list): raise datastore_errors.BadArgumentError( 'updated_entities argument must be a list (%r)' % (updated_entities,)) updated_entities = list( six.moves.filter(query._key_filter, updated_entities)) for entity in updated_entities: overridden_keys.add(datastore_types.ReferenceToKeyValue(entity.key)) updated_entities = apply_query(query, updated_entities) else: updated_entities = [] if not overridden_keys: return query return _AugmentedQuery(query, in_memory_filter=_IgnoreFilter(overridden_keys), in_memory_results=updated_entities, max_filtered_count=len(overridden_keys)) class _BatchShared(object): """Data shared among the batches of a query.""" def __init__(self, query, query_options, conn, augmented_query=None, initial_offset=None): self.__query = query self.__query_options = query_options self.__conn = conn self.__augmented_query = augmented_query self.__was_first_result_processed = False if initial_offset is None: initial_offset = query_options.offset or 0 self.__expected_offset = initial_offset self.__remaining_limit = query_options.limit @property def query(self): return self.__query @property def query_options(self): return self.__query_options @property def conn(self): return self.__conn @property def augmented_query(self): return self.__augmented_query @property def keys_only(self): return self.__keys_only @property def compiled_query(self): return self.__compiled_query @property def expected_offset(self): return self.__expected_offset @property def remaining_limit(self): return self.__remaining_limit @property def index_list(self): """Returns the list of indexes used by the query. Possibly None when the adapter does not implement pb_to_index. """ return self.__index_list def process_batch(self, batch): if self.conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1: skipped_results = batch.skipped_results num_results = len(batch.entity_results) else: skipped_results = batch.skipped_results num_results = len(batch.result) self.__expected_offset -= skipped_results if self.__remaining_limit is not None: self.__remaining_limit -= num_results if not self.__was_first_result_processed: self.__was_first_result_processed = True if self.conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1: result_type = batch.entity_result_type self.__keys_only = result_type == googledatastore.EntityResult.KEY_ONLY self.__compiled_query = None self.__index_list = None else: self.__keys_only = batch.keys_only if batch.HasField('compiled_query'): self.__compiled_query = batch.compiled_query else: self.__compiled_query = None try: self.__index_list = [ self.__conn.adapter.pb_to_index(index_pb) for index_pb in batch.index ] except NotImplementedError: self.__index_list = None class Batch(object): """A batch of results returned by a query. This class contains a batch of results returned from the datastore and relevant metadata. This metadata includes: query: The query that produced this batch query_options: The QueryOptions used to run the query. This does not contained any options passed to the .next_batch() call that created the current batch. start_cursor, end_cursor: These are the cursors that can be used with a query to re-fetch this batch. They can also be used to find all entities before or after the given batch (by use start_cursor as an end cursor or vice versa). start_cursor can also be advanced to point to a position within the batch using Cursor.advance(). skipped_results: the number of result skipped because of the offset given to the request that generated it. This can be set either on the original Query.run() request or in subsequent .next_batch() calls. more_results: If this is true there are more results that can be retrieved either by .next_batch() or Batcher.next(). This class is also able to fetch the next batch of the query using .next_batch(). As batches of results must be fetched serially, .next_batch() can only be called once. Additional calls to .next_batch() will return None. When there are no more batches .next_batch() will return None as well. Note that batches returned by iterating over Batcher will always return None for .next_batch() as the Bather handles fetching the next batch automatically. A Batch typically represents the result of a single RPC request. The datastore operates on a "best effort" basis so the batch returned by .next_batch() or Query.run_async().get_result() may not have satisfied the requested offset or number of results (specified through FetchOptions.offset and FetchOptions.batch_size respectively). To satisfy these restrictions additional batches may be needed (with FetchOptions that specify the remaining offset or results needed). The Batcher class hides these limitations. """ __skipped_cursor = None __end_cursor = None @classmethod @datastore_rpc._positional(5) def create_async(cls, query, query_options, conn, req, start_cursor): batch_shared = _BatchShared(query, query_options, conn) batch0 = cls(batch_shared, start_cursor=start_cursor) return batch0._make_query_rpc_call(query_options, req) @datastore_rpc._positional(2) def __init__(self, batch_shared, start_cursor=Cursor()): """Constructor. This class is constructed in stages (one when an RPC is sent and another when an rpc is completed) and should not be constructed directly!! Use Query.run_async().get_result() to create a Batch or Query.run() to use a batcher. This constructor does not perform verification. Args: batch_shared: Data shared between batches for a a single query run. start_cursor: Optional cursor pointing before this batch. """ self._batch_shared = batch_shared self.__start_cursor = start_cursor @property def query_options(self): """The QueryOptions used to retrieve the first batch.""" return self._batch_shared.query_options @property def query(self): """The query the current batch came from.""" return self._batch_shared.query @property def results(self): """A list of entities in this batch.""" return self.__results @property def keys_only(self): """Whether the entities in this batch only contain keys.""" return self._batch_shared.keys_only @property def index_list(self): """Returns the list of indexes used to peform this batch's query. Possibly None when the adapter does not implement pb_to_index. """ return self._batch_shared.index_list @property def start_cursor(self): """A cursor that points to the position just before the current batch.""" return self.__start_cursor @property def end_cursor(self): """A cursor that points to the position just after the current batch.""" return self.__end_cursor @property def skipped_results(self): """The number of results skipped because of an offset in the request. An offset is satisfied before any results are returned. The start_cursor points to the position in the query before the skipped results. """ return self._skipped_results @property def more_results(self): """Whether more results can be retrieved from the query.""" return self.__more_results def next_batch(self, fetch_options=None): """Synchronously get the next batch or None if there are no more batches. Args: fetch_options: Optional fetch options to use when fetching the next batch. Merged with both the fetch options on the original call and the connection. Returns: A new Batch of results or None if either the next batch has already been fetched or there are no more results. """ async_ = self.next_batch_async(fetch_options) if async_ is None: return None return async_.get_result() def _compiled_query(self): return self._batch_shared.compiled_query def cursor(self, index): """Gets the cursor that points just after the result at index - 1. The index is relative to first result in .results. Since start_cursor points to the position before the first skipped result, the range of indexes this function supports is limited to [-skipped_results, len(results)]. For example, using start_cursor=batch.cursor(i) and end_cursor=batch.cursor(j) will return the results found in batch.results[i:j]. Note that any result added in the range (i-1, j] will appear in the new query's results. Warning: Any index in the range (-skipped_results, 0) may cause continuation to miss or duplicate results if outside a transaction. Args: index: An int, the index relative to the first result before which the cursor should point. Returns: A Cursor that points to a position just after the result index - 1, which if used as a start_cursor will cause the first result to be batch.result[index]. """ if not isinstance(index, six.integer_types): raise datastore_errors.BadArgumentError( 'index argument should be an integer (%r)' % (index,)) if not -self._skipped_results <= index <= len(self.__results): raise datastore_errors.BadArgumentError( 'index argument must be in the inclusive range [%d, %d]' % (-self._skipped_results, len(self.__results))) if index == -self._skipped_results: return self.__start_cursor elif (index == 0 and self.__skipped_cursor): return self.__skipped_cursor elif index > 0 and self.__result_cursors: return self.__result_cursors[index - 1] elif index == len(self.__results): return self.__end_cursor else: return self.__start_cursor.advance(index + self._skipped_results, self._batch_shared.query, self._batch_shared.conn) def next_batch_async(self, fetch_options=None): """Asynchronously get the next batch or None if there are no more batches. Args: fetch_options: Optional fetch options to use when fetching the next batch. Merged with both the fetch options on the original call and the connection. Returns: An async object that can be used to get the next Batch or None if either the next batch has already been fetched or there are no more results. """ if not self.__datastore_cursor: return None fetch_options, next_batch = self._make_next_batch(fetch_options) if (fetch_options is not None and not FetchOptions.is_configuration(fetch_options)): raise datastore_errors.BadArgumentError('Invalid fetch options.') config = self._batch_shared.query_options.merge(fetch_options) conn = next_batch._batch_shared.conn requested_offset = 0 if fetch_options is not None and fetch_options.offset is not None: requested_offset = fetch_options.offset if conn._api_version == datastore_rpc._CLOUD_DATASTORE_V1: if self._batch_shared.expected_offset != requested_offset: raise datastore_errors.BadArgumentError( 'Cannot request the next batch with a different offset than ' ' expected. Expected: %s, Got: %s.' % (self._batch_shared.expected_offset, requested_offset)) limit = self._batch_shared.remaining_limit next_options = QueryOptions(offset=self._batch_shared.expected_offset, limit=limit, start_cursor=self.__datastore_cursor) config = config.merge(next_options) result = next_batch._make_query_rpc_call( config, self._batch_shared.query._to_pb_v1(conn, config)) else: result = next_batch._make_next_rpc_call(config, self._to_pb(fetch_options)) self.__datastore_cursor = None return result def _to_pb(self, fetch_options=None): req = datastore_pb.NextRequest() if FetchOptions.produce_cursors(fetch_options, self._batch_shared.query_options, self._batch_shared.conn.config): req.compile = True count = FetchOptions.batch_size(fetch_options, self._batch_shared.query_options, self._batch_shared.conn.config) if count is not None: req.count = count if fetch_options is not None and fetch_options.offset: req.offset = fetch_options.offset req.cursor.CopyFrom(self.__datastore_cursor) return req def _extend(self, next_batch): """Combines the current batch with the next one. Called by batcher.""" self.__datastore_cursor = next_batch.__datastore_cursor next_batch.__datastore_cursor = None self.__more_results = next_batch.__more_results if not self.__results: self.__skipped_cursor = next_batch.__skipped_cursor self.__results.extend(next_batch.__results) self.__result_cursors.extend(next_batch.__result_cursors) self.__end_cursor = next_batch.__end_cursor self._skipped_results += next_batch._skipped_results def _make_query_rpc_call(self, config, req): """Makes a RunQuery call that will modify the instance. Args: config: The datastore_rpc.Configuration to use for the call. req: The request to send with the call. Returns: A UserRPC object that can be used to fetch the result of the RPC. """ _api_version = self._batch_shared.conn._api_version if _api_version == datastore_rpc._CLOUD_DATASTORE_V1: return self._batch_shared.conn._make_rpc_call( config, 'RunQuery', req, googledatastore.RunQueryResponse(), self.__v1_run_query_response_hook) return self._batch_shared.conn._make_rpc_call(config, 'RunQuery', req, datastore_pb.QueryResult(), self.__query_result_hook) def _make_next_rpc_call(self, config, req): """Makes a Next call that will modify the instance. Args: config: The datastore_rpc.Configuration to use for the call. req: The request to send with the call. Returns: A UserRPC object that can be used to fetch the result of the RPC. """ return self._batch_shared.conn._make_rpc_call(config, 'Next', req, datastore_pb.QueryResult(), self.__query_result_hook) _need_index_header = 'The suggested index for this query is:' def __v1_run_query_response_hook(self, rpc): try: self._batch_shared.conn.check_rpc_success(rpc) except datastore_errors.NeedIndexError: raise batch = rpc.response.batch self._batch_shared.process_batch(batch) if batch.skipped_cursor: self.__skipped_cursor = Cursor(_cursor_bytes=batch.skipped_cursor) self.__result_cursors = [Cursor(_cursor_bytes=result.cursor) for result in batch.entity_results if result.cursor] if batch.end_cursor: self.__end_cursor = Cursor(_cursor_bytes=batch.end_cursor) self._skipped_results = batch.skipped_results if batch.more_results == googledatastore.QueryResultBatch.NOT_FINISHED: self.__more_results = True self.__datastore_cursor = self.__end_cursor or self.__skipped_cursor if self.__datastore_cursor == self.__start_cursor: raise datastore_errors.Timeout( 'The query was not able to make progress.') else: self._end() self.__results = self._process_v1_results(batch.entity_results) return self def __query_result_hook(self, rpc): """Internal method used as get_result_hook for RunQuery/Next operation.""" try: self._batch_shared.conn.check_rpc_success(rpc) except datastore_errors.NeedIndexError as exc: if isinstance(rpc.request, datastore_pb.Query): _, kind, ancestor, props = datastore_index.CompositeIndexForQuery( rpc.request) props = datastore_index.GetRecommendedIndexProperties(props) yaml = datastore_index.IndexYamlForQuery(kind, ancestor, props) xml = datastore_index.IndexXmlForQuery(kind, ancestor, props) raise datastore_errors.NeedIndexError( '\n'.join([str(exc), self._need_index_header, yaml]), original_message=str(exc), header=self._need_index_header, yaml_index=yaml, xml_index=xml) raise query_result = rpc.response self._batch_shared.process_batch(query_result) if query_result.HasField('skipped_results_compiled_cursor'): self.__skipped_cursor = Cursor( _cursor_bytes=query_result.skipped_results_compiled_cursor .SerializeToString()) self.__result_cursors = [ Cursor(_cursor_bytes=result.SerializeToString()) for result in query_result.result_compiled_cursor ] if query_result.HasField('compiled_cursor'): self.__end_cursor = Cursor( _cursor_bytes=query_result.compiled_cursor.SerializeToString()) self._skipped_results = query_result.skipped_results if query_result.more_results: self.__datastore_cursor = query_result.cursor self.__more_results = True else: self._end() self.__results = self._process_results(query_result.result) return self def _end(self): """Changes the internal state so that no more batches can be produced.""" self.__datastore_cursor = None self.__more_results = False def _make_next_batch(self, fetch_options): """Creates the object to store the next batch. Args: fetch_options: The datastore_query.FetchOptions passed in by the user or None. Returns: A tuple containing the fetch options that should be used internally and the object that should be used to contain the next batch. """ return fetch_options, Batch(self._batch_shared, start_cursor=self.__end_cursor) def _process_results(self, results): """Converts the datastore results into results returned to the user. Args: results: A list of entity_pb2.EntityProto's returned by the datastore Returns: A list of results that should be returned to the user. """ converter = self._batch_shared.conn.adapter.pb_to_query_result return [converter(result, self._batch_shared.query_options) for result in results] def _process_v1_results(self, results): """Converts the datastore results into results returned to the user. Args: results: A list of googledatastore.EntityResults. Returns: A list of results that should be returned to the user. """ converter = self._batch_shared.conn.adapter.pb_v1_to_query_result return [converter(result.entity, self._batch_shared.query_options) for result in results] def __getstate__(self): raise pickle.PicklingError( 'Pickling of datastore_query.Batch is unsupported.') class _AugmentedBatch(Batch): """A batch produced by a datastore_query._AugmentedQuery.""" @classmethod @datastore_rpc._positional(5) def create_async(cls, augmented_query, query_options, conn, req, in_memory_offset, in_memory_limit, start_cursor): initial_offset = 0 if in_memory_offset is not None else None batch_shared = _BatchShared(augmented_query._query, query_options, conn, augmented_query, initial_offset=initial_offset) batch0 = cls(batch_shared, in_memory_offset=in_memory_offset, in_memory_limit=in_memory_limit, start_cursor=start_cursor) return batch0._make_query_rpc_call(query_options, req) @datastore_rpc._positional(2) def __init__(self, batch_shared, in_memory_offset=None, in_memory_limit=None, next_index=0, start_cursor=Cursor()): """A Constructor for datastore_query._AugmentedBatch. Constructed by datastore_query._AugmentedQuery. Should not be called directly. """ super(_AugmentedBatch, self).__init__(batch_shared, start_cursor=start_cursor) self.__in_memory_offset = in_memory_offset self.__in_memory_limit = in_memory_limit self.__next_index = next_index @property def query(self): """The query the current batch came from.""" return self._batch_shared.augmented_query def cursor(self, index): raise NotImplementedError def _extend(self, next_batch): super(_AugmentedBatch, self)._extend(next_batch) self.__in_memory_limit = next_batch.__in_memory_limit self.__in_memory_offset = next_batch.__in_memory_offset self.__next_index = next_batch.__next_index def _process_v1_results(self, results): """Process V4 results by converting to V3 and calling _process_results.""" v3_results = [] is_projection = bool(self.query_options.projection) for v1_result in results: v3_entity = entity_pb2.EntityProto() self._batch_shared.conn.adapter.get_entity_converter().v1_to_v3_entity( v1_result.entity, v3_entity, is_projection) v3_results.append(v3_entity) return self._process_results(v3_results) def _process_results(self, results): in_memory_filter = self._batch_shared.augmented_query._in_memory_filter if in_memory_filter: results = list(filter(in_memory_filter, results)) in_memory_results = self._batch_shared.augmented_query._in_memory_results if in_memory_results and self.__next_index < len(in_memory_results): original_query = super(_AugmentedBatch, self).query if original_query._order: if results: next_result = in_memory_results[self.__next_index] next_key = original_query._order.key(next_result) i = 0 while i < len(results): result = results[i] result_key = original_query._order.key(result) while next_key <= result_key: results.insert(i, next_result) i += 1 self.__next_index += 1 if self.__next_index >= len(in_memory_results): break next_result = in_memory_results[self.__next_index] next_key = original_query._order.key(next_result) i += 1 elif results or not super(_AugmentedBatch, self).more_results: results = in_memory_results + results self.__next_index = len(in_memory_results) if self.__in_memory_offset: assert not self._skipped_results offset = min(self.__in_memory_offset, len(results)) if offset: self._skipped_results += offset self.__in_memory_offset -= offset results = results[offset:] if self.__in_memory_limit is not None: results = results[:self.__in_memory_limit] self.__in_memory_limit -= len(results) if self.__in_memory_limit <= 0: self._end() return super(_AugmentedBatch, self)._process_results(results) def _make_next_batch(self, fetch_options): in_memory_offset = FetchOptions.offset(fetch_options) augmented_query = self._batch_shared.augmented_query if in_memory_offset and (augmented_query._in_memory_filter or augmented_query._in_memory_results): fetch_options = FetchOptions(offset=0) else: in_memory_offset = None return (fetch_options, _AugmentedBatch(self._batch_shared, in_memory_offset=in_memory_offset, in_memory_limit=self.__in_memory_limit, start_cursor=self.end_cursor, next_index=self.__next_index)) class Batcher(object): """A class that implements the Iterator interface for Batches. Typically constructed by a call to Query.run(). The class hides the "best effort" nature of the datastore by potentially making multiple requests to the datastore and merging the resulting batches. This is accomplished efficiently by prefetching results and mixing both non-blocking and blocking calls to the datastore as needed. Iterating through batches is almost always more efficient than pulling all results at once as RPC latency is hidden by asynchronously prefetching results. The batches produce by this class cannot be used to fetch the next batch (through Batch.next_batch()) as before the current batch is returned the request for the next batch has already been sent. """ ASYNC_ONLY = None AT_LEAST_OFFSET = 0 AT_LEAST_ONE = object() def __init__(self, query_options, first_async_batch): """Constructor. Although this class can be manually constructed, it is preferable to use Query.run(query_options). Args: query_options: The QueryOptions used to create the first batch. first_async_batch: The first batch produced by Query.run_async(query_options). """ self.__next_batch = first_async_batch self.__initial_offset = QueryOptions.offset(query_options) or 0 self.__skipped_results = 0 def next(self): """Get the next batch. See .next_batch().""" return self.next_batch(self.AT_LEAST_ONE) def __next__(self): return self.next() def next_batch(self, min_batch_size): """Get the next batch. The batch returned by this function cannot be used to fetch the next batch (through Batch.next_batch()). Instead this function will always return None. To retrieve the next batch use .next() or .next_batch(N). This function may return a batch larger than min_to_fetch, but will never return smaller unless there are no more results. Special values can be used for min_batch_size: ASYNC_ONLY - Do not perform any synchrounous fetches from the datastore even if the this produces a batch with no results. AT_LEAST_OFFSET - Only pull enough results to satifiy the offset. AT_LEAST_ONE - Pull batches until at least one result is returned. Args: min_batch_size: The minimum number of results to retrieve or one of (ASYNC_ONLY, AT_LEAST_OFFSET, AT_LEAST_ONE) Returns: The next Batch of results. """ if min_batch_size in (Batcher.ASYNC_ONLY, Batcher.AT_LEAST_OFFSET, Batcher.AT_LEAST_ONE): exact = False else: exact = True datastore_types.ValidateInteger(min_batch_size, 'min_batch_size', datastore_errors.BadArgumentError) if not self.__next_batch: raise StopIteration batch = self.__next_batch.get_result() self.__next_batch = None self.__skipped_results += batch.skipped_results if min_batch_size is not Batcher.ASYNC_ONLY: if min_batch_size is Batcher.AT_LEAST_ONE: min_batch_size = 1 needed_results = min_batch_size - len(batch.results) while (batch.more_results and (self.__skipped_results < self.__initial_offset or needed_results > 0)): if batch.query_options.batch_size: batch_size = max(batch.query_options.batch_size, needed_results) elif exact: batch_size = needed_results else: batch_size = None self.__next_batch = batch.next_batch_async(FetchOptions( offset=max(0, self.__initial_offset - self.__skipped_results), batch_size=batch_size)) next_batch = self.__next_batch.get_result() self.__next_batch = None self.__skipped_results += next_batch.skipped_results needed_results = max(0, needed_results - len(next_batch.results)) batch._extend(next_batch) self.__next_batch = batch.next_batch_async() return batch def __getstate__(self): raise pickle.PicklingError( 'Pickling of datastore_query.Batcher is unsupported.') def __iter__(self): return self class ResultsIterator(six.Iterator): """An iterator over the results from Batches obtained from a Batcher. ResultsIterator implements Python's iterator protocol, so results can be accessed with the for-statement: > it = ResultsIterator(Query(kind='Person').run()) > for person in it: > print 'Hi, %s!' % person['name'] At any time ResultsIterator.cursor() can be used to grab the Cursor that points just after the last result returned by the iterator. """ __current_batch = None __current_pos = 0 __last_cursor = None def __init__(self, batcher): """Constructor. Args: batcher: A datastore_query.Batcher """ if not isinstance(batcher, Batcher): raise datastore_errors.BadArgumentError( 'batcher argument should be datastore_query.Batcher (%r)' % (batcher,)) self.__batcher = batcher def index_list(self): """Returns the list of indexes used to perform the query. Possibly None when the adapter does not implement pb_to_index. """ return self._ensure_current_batch().index_list def cursor(self): """Returns a cursor that points just after the last result returned. If next() throws an exception, this function returns the end_cursor from the last successful batch or throws the same exception if no batch was successful. """ return (self.__last_cursor or self._ensure_current_batch().cursor(self.__current_pos)) def _ensure_current_batch(self): if not self.__current_batch: self.__current_batch = self.__batcher.next_batch(Batcher.AT_LEAST_OFFSET) self.__current_pos = 0 return self.__current_batch def _compiled_query(self): """Returns the compiled query associated with the iterator. Internal only do not use. """ return self._ensure_current_batch()._compiled_query() def __next__(self): """Returns the next query result.""" while (not self.__current_batch or self.__current_pos >= len(self.__current_batch.results)): try: next_batch = self.__batcher.next_batch(Batcher.AT_LEAST_OFFSET) except: if self.__current_batch: self.__last_cursor = self.__current_batch.end_cursor raise self.__current_pos = 0 self.__current_batch = next_batch result = self.__current_batch.results[self.__current_pos] self.__current_pos += 1 return result def __iter__(self): return self def next(self): return self.__next__()
32.482311
81
0.690634
14,487
110,180
4.991648
0.064748
0.019291
0.033009
0.026385
0.433955
0.362184
0.289653
0.244033
0.222543
0.191222
0
0.003428
0.232274
110,180
3,391
82
32.49189
0.851468
0.297205
0
0.351882
0
0
0.05479
0.00705
0
0
0
0
0.001637
1
0.118385
false
0.000546
0.010366
0.027278
0.278232
0.000546
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f47a1ea7f8990d7f8f0d9190441ddb6344e10412
1,785
py
Python
parsing/tests/test_utils.py
davesque/parsing.py
ff8b20e53b94e79571971ef23f0e5091e2786566
[ "MIT" ]
1
2020-11-14T13:06:42.000Z
2020-11-14T13:06:42.000Z
parsing/tests/test_utils.py
davesque/parsing.py
ff8b20e53b94e79571971ef23f0e5091e2786566
[ "MIT" ]
null
null
null
parsing/tests/test_utils.py
davesque/parsing.py
ff8b20e53b94e79571971ef23f0e5091e2786566
[ "MIT" ]
null
null
null
from __future__ import unicode_literals import unittest from ..utils import compose, flatten, truncate, join, unary, equals class TestEquals(unittest.TestCase): def test_it_should_return_a_function_that_compares_against_x(self): self.assertTrue(equals(234)(234)) self.assertFalse(equals(234)(123)) class TestUnary(unittest.TestCase): def test_it_should_convert_a_function_into_a_unary_version_of_itself(self): self.assertEqual(unary(lambda x, y: x + y)([1, 2]), 3) class TestJoin(unittest.TestCase): def test_it_should_join_a_sequence_into_a_string(self): self.assertEqual(join(list('arst')), 'arst') self.assertEqual(join(map(str, [1, 2, 3, 4])), '1234') class TestTruncate(unittest.TestCase): def test_it_should_truncate_a_string(self): self.assertEqual(truncate('arst'), 'arst') self.assertEqual(truncate('arstarstar'), 'arstarstar') self.assertEqual(truncate('arstarstars'), 'arstarstar...') self.assertEqual(truncate('arstarstarstarstarstarstarstarst'), 'arstarstar...') class TestCompose(unittest.TestCase): def test_it_should_compose_the_given_functions(self): f = compose( lambda x: x + 1, lambda x: x * 2, lambda x: x ** 3, ) self.assertEqual(f(1), 3) self.assertEqual(f(2), 17) self.assertEqual(f(3), 55) class TestFlatten(unittest.TestCase): def test_it_should_flatten_an_arbitrarily_nested_list(self): self.assertEqual( flatten([1, 2, [3, 4, [5, 6]]]), [1, 2, 3, 4, 5, 6], ) heavily_nested = reduce(lambda a, i: (a, i), range(1000)) self.assertEqual( flatten(heavily_nested), list(range(1000)), )
30.775862
87
0.652101
223
1,785
4.982063
0.336323
0.162016
0.10261
0.124212
0.225023
0.178218
0
0
0
0
0
0.038765
0.219608
1,785
57
88
31.315789
0.758794
0
0
0.04878
0
0
0.061064
0.017927
0
0
0
0
0.341463
1
0.146341
false
0
0.073171
0
0.365854
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f47b6c51761fb432f29fb2e6eb1f0ea2e885172e
1,807
py
Python
Array/Final450/Move_Negative_Nums_To_One_End/relative_order_matters/move_negative_nums_to_one_end--insertion_sort_modified.py
prash-kr-meena/GoogleR
27aca71e51cc2442e604e07ab00406a98d8d63a4
[ "Apache-2.0" ]
null
null
null
Array/Final450/Move_Negative_Nums_To_One_End/relative_order_matters/move_negative_nums_to_one_end--insertion_sort_modified.py
prash-kr-meena/GoogleR
27aca71e51cc2442e604e07ab00406a98d8d63a4
[ "Apache-2.0" ]
null
null
null
Array/Final450/Move_Negative_Nums_To_One_End/relative_order_matters/move_negative_nums_to_one_end--insertion_sort_modified.py
prash-kr-meena/GoogleR
27aca71e51cc2442e604e07ab00406a98d8d63a4
[ "Apache-2.0" ]
null
null
null
from Utils.Array import input_array # Time : O(n2) # Space : O(1) Constant space """ Ill be having 2 pointers here one of them will move through the array looking for -ve numbers to operate on and another will be pointing to the correct location where i can put the -ve elements, after i find them also this same location will denote the starting of the 1st +ve number in the array, --> as we will be going to move them forward Finally when you find a -ve number, store it temporarily do the swapping, to move all the +ve numbers forward by one step to, make place for the stored -ve number then finally put that number in its correct position and move the pointer to store future -ve numbers """ def rearrange_via_modified_insertion_sort(A): # walking_index = 0 index_to_place_nums = 0 # for placing -ve nums that i find for walking_index in range(0, len(A)): # go through the array if A[walking_index] >= 0: # +ve num, so move on continue # -ve num found_num = A[walking_index] # temporary storage # move all the +ve numbers, before it forward by one step ptr = walking_index - 1 while ptr >= index_to_place_nums: # till it reaches the first +ve number A[ptr + 1] = A[ptr] ptr -= 1 # go back one step # reached, now put the -ve found, at its correct position A[index_to_place_nums] = found_num index_to_place_nums += 1 # updating, for the index of next -ve number if __name__ == "__main__": arr = input_array() rearrange_via_modified_insertion_sort(arr) print(arr) """ 12 11 -13 -5 6 -7 5 -3 -6 -1 2 -3 4 5 6 -7 8 9 2 3 -1 -4 -6 # Reverse 4 3 2 1 0 -1 -2 -3 # Reverse containing 0 """
34.09434
105
0.646375
301
1,807
3.757475
0.415282
0.035367
0.04244
0.056587
0.091954
0
0
0
0
0
0
0.036378
0.285003
1,807
52
106
34.75
0.839009
0.203652
0
0
0
0
0.012862
0
0
0
0
0
0
1
0.058824
false
0
0.058824
0
0.117647
0.058824
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f47c09e34304fe10a016d16f624d1fb84ab59f99
2,786
py
Python
python_test/test_epoll/test_epoll.py
zhtsh/test-examples
ed5a45bf8546a9bd7fc35e38f9679be385d0d9e6
[ "Apache-2.0" ]
null
null
null
python_test/test_epoll/test_epoll.py
zhtsh/test-examples
ed5a45bf8546a9bd7fc35e38f9679be385d0d9e6
[ "Apache-2.0" ]
null
null
null
python_test/test_epoll/test_epoll.py
zhtsh/test-examples
ed5a45bf8546a9bd7fc35e38f9679be385d0d9e6
[ "Apache-2.0" ]
null
null
null
# coding=utf8 import socket import select from datetime import datetime from datetime import timedelta EOL = b'\n\n' response = b'HTTP/1.0 200 OK\nDate: Mon, 1 Jan 1996 01:01:01 GMT\n' response += b'Content-Type: text/plain\nContent-Length: 13\n\n' response += b'Hello, world!\n' # 创建套接字对象并绑定监听端口 serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serversocket.bind(('0.0.0.0', 8080)) serversocket.listen(1) serversocket.setblocking(0) # 创建epoll对象,并注册socket对象的 epoll可读事件 epoll = select.epoll() epoll.register(serversocket.fileno(), select.EPOLLIN) try: connections = {} requests = {} responses = {} while True: # 主循环,epoll的系统调用,一旦有网络IO事件发生,poll调用返回。这是和select系统调用的关键区别 events = epoll.poll(1) # 通过事件通知获得监听的文件描述符,进而处理 for fileno, event in events: # 注册监听的socket对象可读,获取连接,并注册连接的可读事件 if fileno == serversocket.fileno(): connection, address = serversocket.accept() connection.setblocking(0) epoll.register(connection.fileno(), select.EPOLLIN) connections[connection.fileno()] = connection requests[connection.fileno()] = b'' responses[connection.fileno()] = response elif event & select.EPOLLIN: # 连接对象可读,处理客户端发生的信息,并注册连接对象可写 try: requests[fileno] += connections[fileno].recv(1024) if EOL in requests[fileno]: epoll.modify(fileno, event | select.EPOLLOUT) print(requests[fileno]) except Exception as e: print(e) epoll.unregister(fileno) del connections[fileno] elif event & select.EPOLLOUT: # 连接对象可写事件发生,发送数据到客户端 try: byteswritten = connections[fileno].send(responses[fileno]) # responses[fileno] = responses[fileno][byteswritten:] # if len(responses[fileno]) == 0: # epoll.modify(fileno, 0) # connections[fileno].shutdown(socket.SHUT_RDWR) except Exception as e: print(e) # epoll.modify(fileno, 0) epoll.unregister(fileno) del connections[fileno] elif event & select.EPOLLHUP: epoll.unregister(fileno) connections[fileno].close() del connections[fileno] finally: epoll.unregister(serversocket.fileno()) epoll.close() serversocket.close()
38.164384
79
0.561378
260
2,786
5.996154
0.4
0.076331
0.019243
0.014112
0.105837
0.105837
0.105837
0.071841
0.071841
0
0
0.021253
0.34135
2,786
72
80
38.694444
0.828338
0.145011
0
0.240741
0
0.018519
0.055314
0.01176
0
0
0
0
0
1
0
false
0
0.074074
0
0.074074
0.055556
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f47cd9858ae9886cfca8b27e46c09a635662d571
2,771
py
Python
20.2-Donut/Donut2.py
Kehvarl/AdventOfCode2019
f72cfeefdfbde365bc9a5b722d5875d556379cf2
[ "MIT" ]
1
2020-09-27T23:02:46.000Z
2020-09-27T23:02:46.000Z
20.2-Donut/Donut2.py
Kehvarl/AdventOfCode2019
f72cfeefdfbde365bc9a5b722d5875d556379cf2
[ "MIT" ]
null
null
null
20.2-Donut/Donut2.py
Kehvarl/AdventOfCode2019
f72cfeefdfbde365bc9a5b722d5875d556379cf2
[ "MIT" ]
1
2019-12-09T17:10:48.000Z
2019-12-09T17:10:48.000Z
import collections from pprint import pprint example1 = open("input.txt", "r").read() # grid = [[val for val in line] for line in example1.split("\n")] grid = example1.split("\n") length = 0 for line in grid: length = max(len(line), length) out = [] for line in grid: out.append(line[::-1].zfill(length)[::-1]) grid = out scanned = [] neighbors = [(0, 1), (0, -1), (1, 0), (-1, 0)] def find_dot(dot_x, dot_y): for (_dx, _dy) in neighbors: if 0 <= dot_x + _dx < len(grid[0]) and 0 <= dot_y + _dy < len(grid): if grid[dot_y + _dy][dot_x + _dx] == ".": return (dot_x + _dx, dot_y + _dy), (dot_x - _dx, dot_y - _dy) # (dot), (tag) return False # Find portals # For each portal: # Inner edge: recurse # Outer edge: return portals = {} portal_links = {} height = len(grid) - 1 width = len(grid[0]) - 1 for y in range(len(grid)): for x in range(len(grid[0])): if grid[y][x].isalpha(): portal = find_dot(x, y) if portal: dot, (tag_x, tag_y) = portal dot_x, dot_y = dot edge = dot_x == 2 or dot_x == width - 2 or dot_y == 2 or dot_y == height - 2 tag = "".join(sorted(grid[y][x] + grid[tag_y][tag_x])) if not portals.get(tag): portals[tag] = [] portals[tag].append(((x, y), dot, edge)) gx, gy, sx, sy = (0, 0, 0, 0) for link in portals: ends = portals[link] if len(ends) == 2: (a, (a_x, a_y), a_edge), (b, (b_x, b_y), b_edge) = ends portal_links[a] = (b_x, b_y, a_edge, link) portal_links[b] = (a_x, a_y, b_edge, link) elif link == "ZZ": goal, (gx, gy), ge = ends[0] elif link == "AA": start, (sx, sy), se = ends[0] pprint(portals) print(portal_links) bfs = collections.deque([((sx, sy), 0, 0)]) seen = {(sx, sy, 0)} running = True while running: pos, level, dist = bfs.popleft() if pos == (gx, gy) and level == 0: print(dist) running = False break for neighbor in neighbors: dx, dy = neighbor tx, ty = pos tx, ty = tx + dx, ty + dy t_level = level if (tx, ty) in portal_links: px, py, p_edge, link = portal_links[(tx, ty)] # print(link, (tx, ty), (px, py), p_edge) if p_edge and t_level > 0: t_level -= 1 tx, ty = px, py elif not p_edge: t_level += 1 tx, ty = px, py if (tx, ty, t_level) in seen: continue seen.add((tx, ty, t_level)) if grid[ty][tx] == '.': p = (tx, ty) s = (p, t_level, dist + 1) bfs.append(s) print("complete")
24.741071
93
0.498015
425
2,771
3.103529
0.223529
0.030326
0.018196
0.02047
0.052312
0.052312
0.043215
0
0
0
0
0.021906
0.341032
2,771
111
94
24.963964
0.700438
0.067124
0
0.051282
0
0
0.010089
0
0
0
0
0
0
1
0.012821
false
0
0.025641
0
0.064103
0.064103
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f47e72619d39a8c165d31a3169ddc7283ecd466a
845
py
Python
OR_Client_Library/openrefine_client/tests/test_history.py
idaks/OpenRefine-Provenance-Tools
cc469c3eb8e56c8b0f4616cc501546db3c4176ea
[ "MIT" ]
null
null
null
OR_Client_Library/openrefine_client/tests/test_history.py
idaks/OpenRefine-Provenance-Tools
cc469c3eb8e56c8b0f4616cc501546db3c4176ea
[ "MIT" ]
null
null
null
OR_Client_Library/openrefine_client/tests/test_history.py
idaks/OpenRefine-Provenance-Tools
cc469c3eb8e56c8b0f4616cc501546db3c4176ea
[ "MIT" ]
null
null
null
#!/usr/bin/env python """ test_history.py """ # Copyright (c) 2011 Paul Makepeace, Real Programmers. All rights reserved. import unittest from OR_Client_Library.openrefine_client.google.refine.history import * class HistoryTest(unittest.TestCase): def test_init(self): response = { u"code": "ok", u"historyEntry": { u"id": 1303851435223, u"description": "Split 4 cells", u"time": "2011-04-26T16:45:08Z" } } he = response['historyEntry'] entry = HistoryEntry(he['id'], he['time'], he['description']) self.assertEqual(entry.id, 1303851435223) self.assertEqual(entry.description, 'Split 4 cells') self.assertEqual(entry.time, '2011-04-26T16:45:08Z') if __name__ == '__main__': unittest.main()
26.40625
75
0.60355
95
845
5.231579
0.568421
0.090543
0.120724
0.088531
0.080483
0.080483
0
0
0
0
0
0.095847
0.259172
845
31
76
27.258065
0.698083
0.130178
0
0
0
0
0.190083
0
0
0
0
0
0.157895
1
0.052632
false
0
0.105263
0
0.210526
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f47f72a41b188aa9caae89718d01a31bf276031b
6,160
py
Python
tests/batch/test_get_batch.py
Remmeauth/remme-core-cli
94cc09fe9d2e718b45273dde68d6c672c4773f6a
[ "MIT" ]
null
null
null
tests/batch/test_get_batch.py
Remmeauth/remme-core-cli
94cc09fe9d2e718b45273dde68d6c672c4773f6a
[ "MIT" ]
94
2019-03-27T09:34:28.000Z
2019-08-27T05:32:33.000Z
tests/batch/test_get_batch.py
Remmeauth/remme-core-cli
94cc09fe9d2e718b45273dde68d6c672c4773f6a
[ "MIT" ]
6
2019-06-06T15:16:38.000Z
2020-02-24T12:55:55.000Z
""" Provide tests for command line interface's get batch command. """ import json import pytest from click.testing import CliRunner from cli.constants import ( DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING, FAILED_EXIT_FROM_COMMAND_CODE, PASSED_EXIT_FROM_COMMAND_CODE, ) from cli.entrypoint import cli from cli.utils import dict_to_pretty_json BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE = 'ccb529856e538325b435c6a75261702d1bdb52d3873b29189a722330cda628a6' \ '62028a7b39d1f5475cb78f5fc12efb986a35553ce8f1b63580b97fc6ab9e9655' def test_get_batch(): """ Case: get a batch by identifier. Expect: batch is returned. """ runner = CliRunner() result = runner.invoke(cli, [ 'batch', 'get', '--id', BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE, '--node-url', DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING, ]) assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code assert isinstance(json.loads(result.output), dict) def test_get_batch_with_invalid_id(): """ Case: get a batch by its invalid identifier. Expect: the following identifier is invalid error message. """ invalid_batch_id = 'abcefg' runner = CliRunner() result = runner.invoke(cli, [ 'batch', 'get', '--id', invalid_batch_id, '--node-url', DEV_BRANCH_NODE_IP_ADDRESS_FOR_TESTING, ]) expected_error_message = { 'errors': { 'id': [ f'The following identifier `{invalid_batch_id}` is invalid.', ], }, } assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code assert dict_to_pretty_json(expected_error_message) in result.output def test_get_batch_without_node_url(mocker): """ Case: get a batch by its identifier without passing node URL. Expect: batch is returned from a node on localhost. """ batch_id = '6f200995e766da7218ec2a3d0aeabbe1151128063cdf4e954cd08390a879b28e' \ '085a06f8708d2e6bb34f6501e8ddc981f0353627c1d4f90c80a656a8090c8751' expected_result = { "data": { "header": { "signer_public_key": "03d425d2d17b64e3ef8fee028089a567fbb05bd556f98c0b6fb62bc5750ea62b8f", "transaction_ids": [ "5a84ff8747e16d15a988a8b13134d24981a6b516bb41042e6ea95c47f6c9429c" "1c6fdf787ca2ea7fb8725b2bc2d0cd6aa3836aadfe85354deb714e048d41b4d7", ], }, "header_signature": "57692f2bcc9be7fe2b59c052d5938eb92bd7be8a36487c1c7efc2c5758bf108e" "232892987e898071e5ea13b4cbe283e96ac45d8f63cd9065522df7b85b050977", "transactions": [ { "header": { "batcher_public_key": "03d425d2d17b64e3ef8fee028089a567fbb05bd556f98c0b6fb62bc5750ea62b8f", "family_name": "sawtooth_settings", "family_version": "1.0", "inputs": [ "000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c1c0cbf0fbcaf64c0b", "000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c12840f169a04216b7", ], "outputs": [ "000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c1c0cbf0fbcaf64c0b", ], "signer_public_key": "03d425d2d17b64e3ef8fee028089a567fbb05bd556f98c0b6fb62bc5750ea62b8f", }, "header_signature": "5a84ff8747e16d15a988a8b13134d24981a6b516bb41042e6ea95c47f6c9429c" "1c6fdf787ca2ea7fb8725b2bc2d0cd6aa3836aadfe85354deb714e048d41b4d7", "payload": "CAESgAEKJnNhd3Rvb3RoLnNldHRpbmdzLnZvdGUuYyaXplZF9rZXlzEkIwM2Q0MjVkMmQxN2I2NGUzZWY4Zm" "VlMDI4MDg5YTU2N2ZiYjA1YmQ1NTZmOThjMGI2ZmIJjNMGVhNjJiOGYaEjB4ZDU0NzJhOTY1NWJkYTNmNg==", }, ], }, } mock_get_batch_by_id = mocker.patch('cli.batch.service.loop.run_until_complete') mock_get_batch_by_id.return_value = expected_result runner = CliRunner() result = runner.invoke(cli, [ 'batch', 'get', '--id', batch_id, ]) assert PASSED_EXIT_FROM_COMMAND_CODE == result.exit_code assert expected_result.get('data') == json.loads(result.output).get('result') def test_get_batch_with_invalid_node_url(): """ Case: get a batch by its identifier by passing an invalid node URL. Expect: the following node URL is invalid error message. """ invalid_node_url = 'my-node-url.com' runner = CliRunner() result = runner.invoke(cli, [ 'batch', 'get', '--id', BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE, '--node-url', invalid_node_url, ]) expected_error_message = { 'errors': f'Please check if your node running at http://{invalid_node_url}:8080.', } assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code assert dict_to_pretty_json(expected_error_message) in result.output @pytest.mark.parametrize('node_url_with_protocol', ['http://masternode.com', 'https://masternode.com']) def test_get_batch_node_url_with_protocol(node_url_with_protocol): """ Case: get a batch by its identifier by passing node URL with an explicit protocol. Expect: the following node URL contains a protocol error message. """ runner = CliRunner() result = runner.invoke(cli, [ 'batch', 'get', '--id', BATCH_IDENTIFIER_PRESENTED_ON_THE_TEST_NODE, '--node-url', node_url_with_protocol, ]) expected_error = { 'errors': { 'node_url': [ f'Pass the following node URL `{node_url_with_protocol}` without protocol (http, https, etc.).', ], }, } assert FAILED_EXIT_FROM_COMMAND_CODE == result.exit_code assert dict_to_pretty_json(expected_error) in result.output
34.606742
118
0.63961
537
6,160
7.013035
0.242086
0.040892
0.027881
0.035316
0.348115
0.294211
0.263675
0.237918
0.237918
0.187201
0
0.148123
0.27776
6,160
177
119
34.80226
0.698359
0.099513
0
0.507813
0
0
0.347171
0.239713
0
0
0
0
0.078125
1
0.039063
false
0.03125
0.046875
0
0.085938
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f4838193c2db95eaa11b6561ddf47a01a31acc59
690
py
Python
pyllusion/movement/movement_circles.py
RebeccaHirst/Pyllusion
9944076e38bced0eabb49c607482b71809150bdb
[ "MIT" ]
null
null
null
pyllusion/movement/movement_circles.py
RebeccaHirst/Pyllusion
9944076e38bced0eabb49c607482b71809150bdb
[ "MIT" ]
null
null
null
pyllusion/movement/movement_circles.py
RebeccaHirst/Pyllusion
9944076e38bced0eabb49c607482b71809150bdb
[ "MIT" ]
null
null
null
import numpy as np from .movement_matrix import movement_matrix from ..image import image_circles def movement_circles(n=50, duration=2, fps=30, width=500, height=500, **kwargs): """ >>> import pyllusion as ill >>> >>> images = ill.movement_circles(n=50, duration=4, fps=30, color="black", size=0.05) >>> #ill.images_to_gif(images, path="mygif.gif", fps=30) """ n_frames = int(duration * fps) x, y = movement_matrix(n_frames=n_frames, **kwargs) # Generate PIL images images = [] for i in range(n_frames): images.append( image_circles(width=width, height=height, n=n, x=x[i], y=y[i], **kwargs) ) return images
27.6
89
0.631884
101
690
4.188119
0.445545
0.066194
0.07565
0.085106
0.122931
0
0
0
0
0
0
0.038961
0.218841
690
24
90
28.75
0.745826
0.282609
0
0
0
0
0
0
0
0
0
0
0
1
0.083333
false
0
0.25
0
0.416667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f484180dc11ca61b16fecb37c23ed96a63de8738
6,853
py
Python
sce.py
hzwfl2/Semantic-consistent-Embedding
d3712cc6f27febbf654e1eb8c43c0b48376a9be1
[ "MIT" ]
2
2021-12-22T07:39:30.000Z
2022-01-02T14:45:39.000Z
sce.py
hch-xmu/Semantic-consistent-Embedding
2e408267095079d70daff6b391209aabb3d9acd3
[ "MIT" ]
null
null
null
sce.py
hch-xmu/Semantic-consistent-Embedding
2e408267095079d70daff6b391209aabb3d9acd3
[ "MIT" ]
3
2021-12-16T12:56:10.000Z
2022-01-18T02:03:31.000Z
#%% import matplotlib.pyplot as plt import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC,LinearSVC from torch import device from torch.optim import optimizer from torch.utils.data import DataLoader, Dataset from read_data import create_data #%% class my_dataset(Dataset): def __init__(self,data,attribute_label): super(my_dataset,self).__init__() self.data=data self.attribute_label=attribute_label def __len__(self): return self.data.shape[0] def __getitem__(self, index): batch_data=self.data[index] batch_label=self.attribute_label[index] return batch_data,batch_label #%% device=torch.device('cuda') np.random.seed(904) def pre_model(model, traindata, train_attributelabel, testdata, testlabel, attribute_matrix): model_dict = {'rf': RandomForestClassifier(n_estimators=100),'NB': GaussianNB(),'SVC_linear': SVC(kernel='linear'),'LinearSVC':LinearSVC()} res_list = [] for i in range(train_attributelabel.shape[1]): clf = model_dict[model] if max(train_attributelabel[:, i]) != 0: clf.fit(traindata, train_attributelabel[:, i]) res = clf.predict(testdata) else: res = np.zeros(testdata.shape[0]) res_list.append(res.T) test_pre_attribute = np.mat(np.row_stack(res_list)).T label_lis = [] for i in range(test_pre_attribute.shape[0]): pre_res = test_pre_attribute[i, :] loc = (np.sum(np.square(attribute_matrix - pre_res), axis=1)).argmin() label_lis.append(np.unique(testlabel)[loc]) label_lis = np.mat(np.row_stack(label_lis)) return test_pre_attribute,label_lis, testlabel #%% def off_diagonal(x): n, m = x.shape assert n == m return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten() #%% class Embedding_Net(nn.Module): def __init__(self,dim,lambda_): super(Embedding_Net,self).__init__() self.l11=nn.Linear(6,dim[0]) self.l12=nn.Linear(dim[0],dim[1]) self.l13=nn.Linear(2*dim[1],6) self.l21=nn.Linear(4,dim[0]) self.l22=nn.Linear(dim[0],dim[1]) self.l23=nn.Linear(2*dim[1],4) self.bn1=nn.BatchNorm1d(dim[0]) self.bn2=nn.BatchNorm1d(dim[1]) self.lambda_=lambda_ def compability_loss(self,z1,z2): N,D=z1.shape c=self.bn2(z1).T @ self.bn2(z2)/N on_diag=torch.diagonal(c).add_(-1).pow_(2).sum() off_diag=off_diagonal(c).pow_(2).sum() loss=on_diag+self.lambda_[3]*off_diag return loss def compute_loss(self,z1,z2,x,a,x_,a_): loss_R1=self.lambda_[0]*F.mse_loss(a,a_) loss_R2=self.lambda_[1]*F.mse_loss(x,x_) loss_CM=self.compability_loss(z1,z2) loss_CM=self.lambda_[2]*loss_CM loss=loss_R1+loss_R2+loss_CM return loss_R1,loss_R2,loss_CM,loss def transform(self,x,a): z1=self.l11(x) z1=torch.relu(self.bn1(z1)) z1=self.l12(z1) z2=self.l21(a) z2=torch.relu(self.bn1(z2)) z2=self.l22(z2) return z1,z2 def reconstruction(self,z1,z2): f1=torch.cat([z1,z2],dim=1) f2=torch.cat([z2,z1],dim=1) x_=self.l13(f1) a_=torch.sigmoid(self.l23(f2)) return x_,a_ def forward(self,x,a): z1,z2=self.transform(x,a) x_,a_=self.reconstruction(z1,z2) loss_R1,loss_R2,loss_CM,loss=self.compute_loss(z1,z2,x,a,x_,a_) package={'z1':z1,'z2':z2,'x':x,'x_':x_,'r1':loss_R1, 'r2':loss_R2,'cm':loss_CM,'loss':loss} return package #%% datapath='data/classData.csv' modes=['NB'] #'rf' test_classes={'test_class':[2,3]} for key,value in test_classes.items(): print('========================================{}:[{}:{}]========================================='.format(modes,key,value)) df = pd.read_csv(datapath) df['fault_type'] = df['G'].astype('str') + df['C'].astype('str') + df['B'].astype('str') + df['A'].astype('str') traindata,trainlabel,train_attributelabel, train_attributematrix,testdata,testlabel,test_attributelabel,test_attributematrix,attribute_matrix=create_data(df,value) _,y_pre,y_true=pre_model(modes[0], traindata, train_attributelabel, testdata, testlabel, test_attributematrix) original_acc=accuracy_score(y_pre,y_true) traindata=torch.from_numpy(traindata).float().to(device) label=torch.from_numpy(trainlabel.squeeze()).long().to(device) testdata=torch.from_numpy(testdata).float().to(device) batch_size=400 trainset=my_dataset(traindata,torch.from_numpy(train_attributelabel).float().to(device)) train_loader=DataLoader(trainset,batch_size=batch_size,shuffle=True) lambda_=[1,1e-5,1,0.25] dim=[6,12] model=Embedding_Net(dim,lambda_=lambda_) model.to(device) optimizer=optim.RMSprop(model.parameters(),lr=1e-2) L1,L2,L3,L=[],[],[],[] model.train() accs=[] best_acc=0 for epoch in range(200): model.train() for batch,(batch_data,batch_label) in enumerate(train_loader): optimizer.zero_grad() package=model(batch_data,batch_label) loss_R1,loss_R2,loss_CM,loss=package['r1'],package['r2'],package['cm'],package['loss'] loss.backward() optimizer.step() L1.append(loss_R1.item()) L2.append(loss_R2.item()) L3.append(loss_CM.item()) L.append(loss.item()) model.eval() with torch.no_grad(): train_package=model(traindata,torch.from_numpy(train_attributelabel).float().to(device)) f_train=train_package['z1'] f_train=torch.cat([f_train,traindata],dim=1).detach().cpu().numpy() test_package=model(testdata,torch.from_numpy(test_attributelabel).float().to(device)) f_test=test_package['z1'] f_test=torch.cat([f_test,testdata],dim=1).detach().cpu().numpy() test_preattribute,label_lis, testlabel=pre_model(modes[0], f_train, train_attributelabel, f_test, testlabel, test_attributematrix) acc=accuracy_score(label_lis, testlabel) accs.append(acc) if acc>best_acc: best_acc=acc print('epoch:{:d}, best_acc:{:.4f}'.format(epoch,best_acc)) print('finished! FDAT:{:.4f}, SCE:{:.4f}'.format(original_acc,best_acc)) # %%
33.758621
168
0.618707
954
6,853
4.232704
0.220126
0.010896
0.020802
0.011887
0.116147
0.072808
0.053492
0.027241
0.027241
0
0
0.031585
0.223844
6,853
203
169
33.758621
0.72758
0.002773
0
0.013333
0
0
0.040906
0.013736
0
0
0
0
0.006667
1
0.073333
false
0
0.1
0.006667
0.246667
0.02
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f484cdb74eddcab3519034cf17a9751d9384ce4d
1,876
py
Python
graphsage/partition_predict.py
colirain/GraphSAGE
a63145ff18f87cb69340c7b457c34839e9124086
[ "MIT" ]
null
null
null
graphsage/partition_predict.py
colirain/GraphSAGE
a63145ff18f87cb69340c7b457c34839e9124086
[ "MIT" ]
null
null
null
graphsage/partition_predict.py
colirain/GraphSAGE
a63145ff18f87cb69340c7b457c34839e9124086
[ "MIT" ]
null
null
null
import tensorflow as tf import numpy as np from graphsage.models import FCPartition from graphsage.partition_train import construct_placeholders from graphsage.utils import load_graph_data, load_embedded_data, load_embedded_idmap flags = tf.app.flags FLAGS = flags.FLAGS # flags.DEFINE_integer('dim_1', 128, 'Size of output dim (final is 2x this, if using concat)') # DIR = 'trained_models' # MODEL = 'partition' # with tf.Session() as sess: # new_saver = tf.train.import_meta_graph(DIR+'/'+MODEL+'.ckpt.meta') # new_saver.restore(sess, tf.train.latest_checkpoint(DIR + '/./')) # new_saver.run() # print(new_saver) def predict(train_data, id_map): num_classes = 3 placeholders = construct_placeholders(num_classes) placeholders['features'] = train_data # feed_dict = dict() # train_data = train_data.astype('float32') # feed_dict.update({placeholders['features']: train_data}) dim = [] # print("f:{}".format(len(train_data[0]))) dim.append(len(train_data[0])) dim.append(FLAGS.dim_1) dim.append(num_classes) model = FCPartition(placeholders, dim) sess = tf.Session() model.load(sess) results = model.predict() results_np = results.eval(session=sess) # print(results.eval(session=sess)) # print(results_np.shape) id_map = id_map.astype('int') results_np = np.expand_dims(results_np, axis=1) results_np = np.insert(results_np, 0, id_map, axis=1) results_np = results_np[results_np[:,0].argsort()] print(results_np) np.save(FLAGS.outDir+'/predict_predict.npy', results_np) def main(): print("load data ...") train_data = load_embedded_data(FLAGS.train_prefix) id_map = load_embedded_idmap(FLAGS.train_prefix) predict(train_data, id_map) if __name__ == '__main__': main()
30.754098
95
0.678038
254
1,876
4.744094
0.334646
0.082158
0.039834
0.033195
0.121992
0.087137
0
0
0
0
0
0.009908
0.192964
1,876
61
96
30.754098
0.785997
0.299041
0
0
0
0
0.041935
0
0
0
0
0
0
1
0.060606
false
0
0.151515
0
0.212121
0.060606
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f484e0eafc21497bc2d0dc913be6480e2eceab78
13,307
py
Python
scripts/generate_XML_files/DS1/annotatedsen_to_xml.py
AmmarQaseem/CPI-Pipeline-test
3866883c54d7bd77753ee4b72997949bdcf76359
[ "PostgreSQL", "ISC", "Intel" ]
null
null
null
scripts/generate_XML_files/DS1/annotatedsen_to_xml.py
AmmarQaseem/CPI-Pipeline-test
3866883c54d7bd77753ee4b72997949bdcf76359
[ "PostgreSQL", "ISC", "Intel" ]
null
null
null
scripts/generate_XML_files/DS1/annotatedsen_to_xml.py
AmmarQaseem/CPI-Pipeline-test
3866883c54d7bd77753ee4b72997949bdcf76359
[ "PostgreSQL", "ISC", "Intel" ]
null
null
null
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ Copyright (c) 2015, Elham Abbasian <e_abbasian@yahoo.com>, Kersten Doering <kersten.doering@gmail.com> This parser reads annotated sentences (output from get_relations.py) in a tab-separated format to generate a unified XML format (Tikk et al., 2010. A comprehensive benchmark of kernel methods to extract protein-protein interactions from literature. PLoS Comput. Biol). """ # module to make use of regular expressions import re # set the default encoding to utf8 and ignore all decoding/encoding steps. # (ToDo: check whether the encoding command is needed - debug) import sys reload(sys) sys.setdefaultencoding("utf-8") # optparse - Parser for command-line options from optparse import OptionParser # import this function to add quotation arround the input text and ignore the extra quotations inside the sentence text #from xml.sax.saxutils import escape # (ToDo: not needed - debug) from xml.sax.saxutils import quoteattr ### MAIN PART OF THE SCRIPT ### if __name__=="__main__": # configure parsing of command-line arguments parser= OptionParser() parser.add_option("-i", "--input", dest="i", help='name of the input file',default="training_dataset_sorted.csv") parser.add_option("-o", "--output", dest="o", help='name of the output file',default="DS1.xml") (options,args)=parser.parse_args() # save parameters in an extra variable input_file= options.i output_file = options.o # open input file with annotated sentences infile = open(input_file,"r") # open output file outfile = open(output_file,"w") #example for the input format: #18227838-359 The mood stabilizers <compound-id="28486,3028194">lithium</compound-id> and <compound-id="3121">valproate</compound-id> activate the <protein-id="P29323">ERK</protein-id> pathway in prefrontal cortex and hippocampus and potentiate <protein-id="P29323">ERK</protein-id> pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis. lithium__ERK__no_interaction valproate__ERK__interaction #example for the output format """ <?xml version="1.0" encoding="UTF-8"> <corpus source="DS1"> <document id="DS1.d0" origId="18227838"> <sentence id="DS1.d0.s0" origId="18227838-359" text="The mood stabilizers lithium and valproate activate the ERK pathway in prefrontal cortex and hippocampus and potentiate ERK pathway-mediated neurite growth, neuronal survival and hippocampal neurogenesis."/> <entity id="DS1.d0.s0.e0" origId="28486,3028194" charOffset="x1-y1" type="compound" text="lithium"/> <entity id="DS1.d0.s0.e1" origId="3121" charOffset="x2-y2" type="compound" text="valproate"/> <entity id="DS1.d0.s0.e2" origId="P29323" charOffset="x3-y3" type="protein" text="ERK"/> <interaction id="DS1.d0.s0.i0" e1="DS1.do.s0.e0" e2="DS1.do.s0.e2" type="no_interaction" directed="False" /> <interaction id="DS1.d0.s0.i1" e1="DS1.do.s0.e1" e2="DS1.do.s0.e2" type="interaction" directed="False" /> </sentence> [...] </document> [...] </corpus> """ # add XML header and define corpus source outfile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>"+"\n") outfile.write("<corpus source=\"DS1\">"+"\n") # variable to store and compare the last read PubMed ID to notice whether there are multiple sentences with the same PubMed ID or not # the document ID refers to the PubMed ID (origID) pre_pmid="" # doc_num counts the number of created documents doc_num =0 # read lines in CSV file for line in infile : # tab-separated format temp = line.strip().split("\t") # get PubMed ID, sentences ID, and the sentence itself # (ToDo: use a split command instead of this regular expression - debug) curr_pmid = re.match('(\d{8})',temp[0]).group(0) pmid_sent_num = temp[0] sentence_text = temp[1] # find all annotated proteins and compounds by matching their tags pro_positions= [(a.start(), a.end()) for a in list(re.finditer('<protein-id="(.*?)">(.*?)</protein-id>',sentence_text))] cmp_positions = [(a.start(), a.end()) for a in list(re.finditer('<compound-id="(.*?)">(.*?)</compound-id>',sentence_text))] # join the two lists positions = pro_positions + cmp_positions positions.sort() #Initialize the list with the number of identified tags entity_list =[] entity_list=[0]*len(positions) # iterate over all identified positions of the identified tags for i in range(len(positions)): # initialze the second dimension of the list with a length of four (entity_type,entity_id,entity_text,entity_charoffset) entity_list[i]=[0]*4 # store these four elements with grouping in the regular expression obj = re.match('<(protein|compound)-id="(.*?)">(.*?)</(protein-id|compound-id)>',sentence_text[positions[i][0]:positions[i][1]]) entity_list[i][0]=obj.group(1) #entity_type entity_list[i][1]=obj.group(2) #entity_id entity_list[i][2]=obj.group(3) #entity_text entity_list[i][2]=entity_list[i][2].replace("[","(").replace("]",")") # the entity_charoffset will be assign later after having the pure sentence text generated (without any tags) # the sentence without any tags will be generated by deleting all tags via text concatenation # initialize (ToDo: initialization like this not needed - debug) pur_sent_text = sentence_text # enumerate over the list of positions (index, value) for i,e in reversed(list(enumerate(positions))): pur_sent_text = pur_sent_text[0:positions[i][0]]+entity_list[i][2]+pur_sent_text[positions[i][1]:] # get the character offset of all identified synonyms # decode the sentences to UTF8 to prevent the usage of more than one character for special letters, symbols, etc. # make use of a list of repeated synonyms and synonym positions repeated_syn_pos =[] rep_syn =[] for i in range(len(entity_list)) : # check whether this is the fist occurrence of the current synonym if not entity_list[i][2] in rep_syn : # get the list of positions of all occurences of the current synonym u_pur_sent_text = pur_sent_text.decode("utf8") charoffset_value = [(a.start(), a.end()) for a in list(re.finditer(re.escape(entity_list[i][2]),u_pur_sent_text))] # check whether it occures only once such that the charoffsetone directly be assigned if len(charoffset_value) == 1 : entity_list[i][3] = str(charoffset_value[0][0])+"-"+str(charoffset_value[0][1]) else: # if it occures more than one time, the charoffset has to be assigned according to the first pair of positions entity_list[i][3] = str(charoffset_value[0][0])+"-"+str(charoffset_value[0][1]) # append this synonym to the rep_syn list to store all repeated synonyms in this sentence rep_syn.append(entity_list[i][2]) # delete the fist pair of positions from the list charoffset_value = charoffset_value[1:] # add the rest of positions pairs for the current synonym to another list for j in range(len(charoffset_value)): repeated_syn_pos.append([entity_list[i][2],charoffset_value[j][0],charoffset_value[j][1]]) else: # this case refers to at least the second occurrence of the synonym # for each repeated synonym, assign the first position pair from the repeated_syn_pos list for k in range(len(repeated_syn_pos)): if repeated_syn_pos[k][0] == entity_list[i][2]: break entity_list[i][3] = str(repeated_syn_pos[k][1])+"-"+str(repeated_syn_pos[k][2]) # get pairs and their interaction status (separated by a double underscore) listof_int_noint = temp[2:] interaction_list=[0]*len(listof_int_noint) for i in range(len(listof_int_noint)): interaction_list[i]=listof_int_noint[i].split('__') # interaction/no_interaction corresponds to True/False TF_int_list=[0]*len(interaction_list) for intid in range(len(interaction_list)) : if interaction_list[intid][2]=="interaction" : TF_int_list[intid]="True" else : TF_int_list[intid]="False" # debug: # print TF_int_list # build XML structure # check whether the PubMed ID changed in comparision to the last parsed sentence if curr_pmid == pre_pmid : # if this is the case, only the sentence ID has to be increased sent_num +=1 # add sentence ID using the current document number # (doc_num has to be decreased by one, because this index is automatically increased after each sentence) # all openning and closing squared brackets ([,]) should be replaced with round brackets, because they will make problems in the tokenization step of the (preprocessing) pipeline pur_sent_text = pur_sent_text.replace("[","(").replace("]",")") outfile.write(" <sentence id=\"DS1.d"+str(doc_num-1)+".s"+str(sent_num)+"\" origId=\""+str(pmid_sent_num)+"\" text="+quoteattr(pur_sent_text)+">"+"\n") # build entity tags according to the list identified tags from the CSV file (entity_list) for i in range(0,len(entity_list)) : outfile.write(" <entity id=\"DS1.d"+str(doc_num-1)+".s"+str(sent_num)+".e"+str(i)+"\" origId=\""+entity_list[i][1]+"\" charOffset=\""+entity_list[i][3]+"\" type=\""+entity_list[i][0]+"\" text=\""+entity_list[i][2]+"\"/>"+"\n") # insert types of interaction for each pair of entities # get the index of the synonym interactions in entity_list origId = "DS1.d"+str(doc_num-1)+".s"+str(sent_num) for int_id in range(len(interaction_list)) : for ent_id in range(len(entity_list)): if interaction_list[int_id][0] in entity_list[ent_id]: break first_entity=ent_id for k in range(len(entity_list)): if interaction_list[int_id][1] in entity_list[k]: break second_entity=k outfile.write(" <pair e1=\""+origId+".e"+str(first_entity)+"\" e2=\""+origId+".e"+str(second_entity)+"\" id=\""+origId+".i"+str(int_id)+"\" interaction=\""+TF_int_list[int_id]+"\" />"+"\n") # close sentence tag outfile.write(" </sentence>\n") # if the current PubMed ID changed in comparison to the last parsed sentences else : if not doc_num == 0 : outfile.write(" </document>\n") sent_num =0 # a new document tag has to be opened and the sentences can be added outfile.write(" <document id=\"DS1.d"+str(doc_num)+"\" origId=\""+str(curr_pmid)+"\">"+"\n") # replace squared brackets ([,]) with round brackets pur_sent_text = pur_sent_text.replace("[","(").replace("]",")") outfile.write(" <sentence id=\"DS1.d"+str(doc_num)+".s"+str(sent_num)+"\" origId=\""+str(pmid_sent_num)+"\" text="+quoteattr(pur_sent_text)+">"+"\n") # now have to make entity tags according to entity_list data. for i in range(0,len(entity_list)) : outfile.write(" <entity id=\"DS1.d"+str(doc_num)+".s"+str(sent_num)+".e"+str(i)+"\" origId=\""+entity_list[i][1]+"\" charOffset=\""+entity_list[i][3]+"\" type=\""+entity_list[i][0]+"\" text=\""+entity_list[i][2]+"\"/>"+"\n") # build entity tags origId = "DS1.d"+str(doc_num)+".s"+str(sent_num) for int_id in range(len(interaction_list)) : for ent_id in range(len(entity_list)): if interaction_list[int_id][0] in entity_list[ent_id]: break first_entity=ent_id for k in range(len(entity_list)): if interaction_list[int_id][1] in entity_list[k]: break second_entity=k outfile.write(" <pair e1=\""+origId+".e"+str(first_entity)+"\" e2=\""+origId+".e"+str(second_entity)+"\" id=\""+origId+".i"+str(int_id)+"\" interaction=\""+TF_int_list[int_id]+"\" />"+"\n") # close sentence tag outfile.write(" </sentence>\n") # set new PubMed ID as the last parsed document ID and increase document index pre_pmid = curr_pmid doc_num+=1 # close document tag outfile.write("</document>\n") # close corpus tag outfile.write("</corpus>\n") # close files infile.close() outfile.close()
58.364035
425
0.618622
1,814
13,307
4.409592
0.211687
0.048756
0.031629
0.016502
0.292787
0.24303
0.230404
0.217652
0.204651
0.185523
0
0.022917
0.252348
13,307
227
426
58.621145
0.781084
0.362441
0
0.307018
0
0
0.182658
0.02854
0.017544
0
0
0.008811
0
1
0
false
0
0.035088
0
0.035088
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f484e4f5510b4a5e4e942079f6f30e54e25d0b89
488
py
Python
tests/test_add_contact.py
SergeyDorokhov/python_training
e15e561fe7ad055048643adcfc88b3f2d55530ca
[ "Apache-2.0" ]
null
null
null
tests/test_add_contact.py
SergeyDorokhov/python_training
e15e561fe7ad055048643adcfc88b3f2d55530ca
[ "Apache-2.0" ]
null
null
null
tests/test_add_contact.py
SergeyDorokhov/python_training
e15e561fe7ad055048643adcfc88b3f2d55530ca
[ "Apache-2.0" ]
null
null
null
def test_add_contact(app, db, json_contacts, check_ui): contact = json_contacts list_before = db.get_contact_list() contact.id_contact = app.contact.get_next_id(list_before) app.contact.create(contact) assert len(list_before) + 1 == len(db.get_contact_list()) list_after = db.get_contact_list() list_before.append(contact) assert sorted(list_before) == sorted(list_after) if check_ui: assert sorted(list_after) == sorted(app.contact.get_list())
44.363636
67
0.727459
72
488
4.597222
0.319444
0.151057
0.108761
0.145015
0.120846
0
0
0
0
0
0
0.002439
0.159836
488
11
67
44.363636
0.804878
0
0
0
0
0
0
0
0
0
0
0
0.272727
1
0.090909
false
0
0
0
0.090909
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f485580fbee3d8993b0b04b4d71777a8883725b7
1,182
py
Python
website/members/urls.py
eamanu/asoc_members
bf2e99e9c63c60a59bdfd10ca1812d78851cbde6
[ "MIT" ]
null
null
null
website/members/urls.py
eamanu/asoc_members
bf2e99e9c63c60a59bdfd10ca1812d78851cbde6
[ "MIT" ]
null
null
null
website/members/urls.py
eamanu/asoc_members
bf2e99e9c63c60a59bdfd10ca1812d78851cbde6
[ "MIT" ]
null
null
null
from django.conf import settings from django.conf.urls.static import static from django.urls import path from members import views urlpatterns = [ path('solicitud-alta/', views.signup_initial, name='signup'), path('solicitud-alta/persona/', views.signup_form_person, name='signup_person'), path('solicitud-alta/organizacion', views.signup_form_organization, name='signup_organization'), path('solicitud-alta/gracias', views.signup_thankyou, name='signup_thankyou'), path('reportes/', views.reports_main, name='reports_main'), path('reportes/deudas', views.report_debts, name='report_debts'), path('reportes/completos', views.report_complete, name='report_complete'), path('reportes/incompletos', views.report_missing, name='report_missing'), path('reportes/ingcuotas', views.report_income_quotas, name='report_income_quotas'), path('reportes/ingdinero', views.report_income_money, name='report_income_money'), path('reportes/miembros', views.members_list, name="members_list"), path('reportes/miembros/<pk>/', views.member_detail, name='member_detail'), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
47.28
88
0.755499
147
1,182
5.857143
0.319728
0.111498
0.078978
0
0
0
0
0
0
0
0
0
0.102369
1,182
24
89
49.25
0.811499
0
0
0
0
0
0.334179
0.080372
0
0
0
0
0
1
0
false
0
0.210526
0
0.210526
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f485c8b7834281c5e46b0be30ec91fef7f0a76cd
2,482
py
Python
Benchmarking/Keras/Tensorflow/TF_dataforcomparisongraphss.py
vais-ral/CCPi-ML
ca9baeb0dd5db3a97ac8ab9e33e03aeae42ebfa4
[ "Apache-2.0" ]
null
null
null
Benchmarking/Keras/Tensorflow/TF_dataforcomparisongraphss.py
vais-ral/CCPi-ML
ca9baeb0dd5db3a97ac8ab9e33e03aeae42ebfa4
[ "Apache-2.0" ]
null
null
null
Benchmarking/Keras/Tensorflow/TF_dataforcomparisongraphss.py
vais-ral/CCPi-ML
ca9baeb0dd5db3a97ac8ab9e33e03aeae42ebfa4
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Wed Jul 18 14:04:03 2018 @author: zyv57124 """ import scipy.io as sio import tensorflow as tf from tensorflow import keras import numpy as np import matplotlib import matplotlib.pyplot as plt from tensorflow.python.training import gradient_descent from time import time class TimingCallback(keras.callbacks.Callback): def __init__(self): self.logs=[] def on_epoch_begin(self, epoch, logs={}): self.starttime=time() def on_epoch_end(self, epoch, logs={}): self.logs.append(time()-self.starttime) #Load data ------------------------------------------------------ def loadMATData(file1): return sio.loadmat(file1) #Load Data------------------------------------------------------- data = loadMATData('ex3data1.mat') features = data['X'] labels = data['y'] filter = labels ==10 labels[filter] = 0 #shuffle data--------------------------------------------------- ran = np.arange(features.shape[0]) np.random.shuffle(ran) features = features[ran] labels = labels[ran] training_features = features[:3500] training_labels = labels[:3500] test_features = features[3501:] test_labels = labels[3501:] for i in np.arange(0,500, 10): #TF Neaural Network Builder-------------------------------------- model = keras.Sequential([ keras.layers.Dense(400, activation=tf.nn.relu), keras.layers.Dense(25, activation=tf.nn.relu), keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.01), loss='sparse_categorical_crossentropy', metrics=['accuracy']) predictions = model.predict(test_features) cb=TimingCallback() history = model.fit(training_features, training_labels, batch_size=i+1, epochs=100, verbose=2, callbacks=[cb]) #Store eoch number and loss values in .txt file loss_data = (history.history['loss']) f = open("TF_loss_data_batchnum_"+str(i+1)+".txt","w") for xx in range(1,len(loss_data)+1): if xx==1: delta_loss = 'Nan' else: delta_loss = (loss_data[xx-2] - loss_data[xx-1]) #Epoch #Loss #Batch size #Time #Change in loss f.write(str(xx) + "," + str(loss_data[xx-1]) + "," + str(i+1) + "," + str(cb.logs[xx-1]) + "," + str(delta_loss) + "\n" ) f.close()
17.236111
144
0.580983
304
2,482
4.638158
0.440789
0.034043
0.034043
0.024113
0.048227
0.048227
0.048227
0
0
0
0
0.036885
0.213537
2,482
144
145
17.236111
0.685451
0.193392
0
0
0
0
0.046851
0.0267
0
0
0
0
0
1
0.081633
false
0
0.163265
0.020408
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f485da5cf70dcae9f004e6210259cc3b9e4d5254
402
py
Python
Easy/two-numbers-sum/solution-1.py
MCFrank16/python-algo
dd48f6c5b9f4a941a18fc4620164c807c0e1d35e
[ "MIT" ]
null
null
null
Easy/two-numbers-sum/solution-1.py
MCFrank16/python-algo
dd48f6c5b9f4a941a18fc4620164c807c0e1d35e
[ "MIT" ]
null
null
null
Easy/two-numbers-sum/solution-1.py
MCFrank16/python-algo
dd48f6c5b9f4a941a18fc4620164c807c0e1d35e
[ "MIT" ]
null
null
null
# solution 1: Brute Force # time complexity: O(n^2) # space complexity: O(1) def twoNumberSum(arr, n): for i in range(len(arr) - 1): firstNum = arr[i] for j in range(i + 1, len(arr)): secondNum = arr[j] if firstNum + secondNum == n: return [firstNum, secondNum] return [] print(twoNumberSum([3,5,-4,8,11,1,-1,6], 10))
23.647059
45
0.524876
57
402
3.701754
0.54386
0.104265
0
0
0
0
0
0
0
0
0
0.059259
0.328358
402
16
46
25.125
0.722222
0.174129
0
0
0
0
0
0
0
0
0
0
0
1
0.111111
false
0
0
0
0.333333
0.111111
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f488b98251360b04f0d4a4065b27efc58a8ffeb9
8,448
py
Python
data_extraction/scripts/bnf_adr_extraction.py
elpidakon/CRESCENDDI
ab9e65621d331689f4aaeeb08902f29d90b7d1b9
[ "MIT" ]
null
null
null
data_extraction/scripts/bnf_adr_extraction.py
elpidakon/CRESCENDDI
ab9e65621d331689f4aaeeb08902f29d90b7d1b9
[ "MIT" ]
null
null
null
data_extraction/scripts/bnf_adr_extraction.py
elpidakon/CRESCENDDI
ab9e65621d331689f4aaeeb08902f29d90b7d1b9
[ "MIT" ]
null
null
null
# Kontsioti, Maskell, Dutta & Pirmohamed, A reference set of clinically relevant # adverse drug-drug interactions (2021) # Code to extract single-drug side effect data from the BNF website from bs4 import BeautifulSoup import urllib import os, csv import numpy as np import pandas as pd import re from tqdm import tqdm URL_BEGINNING = 'https://bnf.nice.org.uk/drug/' print('beginning scrape for individual drugs...') # Fetch the HTML containing the full list of APIs. r = urllib.request.urlopen(URL_BEGINNING).read() soup1 = BeautifulSoup(r, 'lxml') # Extract the full URL list. URL_list = [] for s in soup1.find_all('div', {'class': 'span11'}): for ai in s(href=True): temp = URL_BEGINNING + ai['href'] URL_list.append(temp) print(URL_list) # Create an empty dataframe for storing the extracted data for APIs. scraped_API_count = 0 scraped_API = pd.DataFrame(np.nan, index = range(0,160000), columns = ['API', 'AE', 'Frequency'], dtype = str) row_count = 0 # Empty list to store API mappings to their drug class (if applicable). API_to_drugclass = [] # Scrape individual drug (API) side effects. HIGHEST_API_ID = len(URL_list) for id in tqdm(range(0, HIGHEST_API_ID)): # Try to fetch the HTML for each API. try: l = urllib.request.urlopen(URL_list[id]).read() # If the page returns a 404 error, skip this id. except urllib.error.HTTPError as e: if e.getcode() == 404: continue raise # Add one to the count of succesfully scraped products. scraped_API_count += 1 soup2 = BeautifulSoup(l, 'lxml') API = soup2.find('h1', id= '').span.getText() # Extract the relevant information to a dataframe. # In case the API contains a side effect section. if soup2.find('section', {'id':'sideEffects'}): ae_list = soup2.find_all('span', {'class': 'sideEffect'}) for a in ae_list: adv_event = a.getText() scraped_API.at[row_count, 'API'] = API scraped_API.at[row_count,'AE'] = adv_event freq = a.parent.parent.parent.h4.getText() scraped_API.at[row_count, 'Frequency'] = freq row_count += 1 # Check if the drug belongs to a specific drug class. If yes, extract # the drug class name and the link to the corresponding webpage. if soup2.find('section', {'id':'sideEffects'}).find('a', href = re.compile(r'.*/drug-class/.*')): temp = [] temp.append(API) drug_class = soup2.find('a', href = re.compile(r'.*/drug-class/.*')).span.getText() temp.append(drug_class) li = soup2.find('section', {'id':'sideEffects'}).find('a', href = re.compile(r'.*/drug-class/.*'))['href'] drug_class_link = 'https://bnf.nice.org.uk' + str(li) temp.append(drug_class_link) API_to_drugclass.append(temp) # In case the API does not contain a side effect section. else: adv_event = 'NO AEs MENTIONED' scraped_API.at[row_count, 'API'] = API scraped_API.at[row_count,'AE'] = adv_event scraped_API.at[row_count,'Frequency'] = '' row_count += 1 # Remove empty rows from the dataframe that contains the extracted data. scraped_API_dropna = scraped_API[~scraped_API.isin(['n']).any(axis=1)] # Remove spaces at the beginning and at the end of the text fields. scraped_API_dropna['API'] = scraped_API_dropna['API'].str.strip() scraped_API_dropna['AE'] = scraped_API_dropna['AE'].str.strip() scraped_API_dropna['Frequency'] = scraped_API_dropna['Frequency'].str.strip() print('BNF individual side effects succesfully scraped.') print('beginning scrape for drug classes...') # Create a dataframe with drug names, drug classes and related URLs (where applicable). API_class_df = pd.DataFrame(API_to_drugclass, columns = ['API','Drug_Class','Link']) # Create a list with all the links for the drug class webpages. class_links = API_class_df['Link'].unique().tolist() # Scrape drug class side effects. HIGHEST_DRUG_CLASS_ID = len(class_links) scraped_class_count = 0 # Create an empty dataframe for storing the extracted data for drug classes. scraped_class = pd.DataFrame(np.nan, index = range(0,160000), columns = ['Drug_Class', 'AE', 'Frequency'], dtype = str) row_count_2 = 0 for id in tqdm(range(0, HIGHEST_DRUG_CLASS_ID)): # Try to fetch the HTML for each drug class. try: l = urllib.request.urlopen(class_links[id]).read() # If the page returns a 404 error, skip this id. except urllib.error.HTTPError as e: if e.getcode() == 404: continue raise # Add one to the count of succesfully scraped drug classes. scraped_class_count += 1 soup3 = BeautifulSoup(l, 'lxml') # Extract the drug class name. class_name = soup3.find('h1', id= '').span.getText() # Extract the relevant information to a dataframe. class_ae_list = soup3.find_all('span', {'class': 'sideEffect'}) for a in class_ae_list: adv_event = a.getText() scraped_class.at[row_count_2, 'Drug_Class'] = class_name scraped_class.at[row_count_2,'AE'] = adv_event freq = a.parent.parent.parent.h4.getText() scraped_class.at[row_count_2, 'Frequency'] = freq row_count_2 += 1 # Remove empty rows from the dataframe that contains the extracted data. scraped_class_dropna = scraped_class[~scraped_class.isin(['n']).any(axis=1)] # Remove spaces at the beginning and at the end of the text fields. scraped_class_dropna['Drug_Class'] = scraped_class_dropna['Drug_Class'].str.strip() scraped_class_dropna['AE'] = scraped_class_dropna['AE'].str.strip() scraped_class_dropna['Frequency'] = scraped_class_dropna['Frequency'].str.strip() print('BNF drug class side effects succesfully scraped.') print('combine extracted data...') ## Combine both tables by adding drug class side effects to the individual ## ingredients of each drug class. # Create a dictionary that contains all drug classes as keys and side effects # with associated frequencies as values. AEs_by_class_dict = scraped_class_dropna.groupby('Drug_Class')[['AE', 'Frequency']].apply(lambda g: list(map(tuple, g.values.tolist()))).to_dict() # Remove URL column API_class_df.drop(columns = 'Link', inplace = True) # Create a dataframe with drug class as the index of APIs (if available) # and add their drug class side effects and associated frequencies. API_class_df['Drug_Class'] = API_class_df['Drug_Class'].str.strip() API_class_df.set_index('Drug_Class', inplace = True) API_class_df['AE_freq_tuple'] = API_class_df.index.to_series().map(AEs_by_class_dict) API_class_df.reset_index(inplace=True) # Create a new dataframe to store drug class side effect data for each API. AEs_from_class_df = API_class_df.explode('AE_freq_tuple').reset_index(drop=True) AEs_from_class_df[['AE', 'Frequency']] = pd.DataFrame(AEs_from_class_df['AE_freq_tuple'].tolist(), index = AEs_from_class_df.index) AEs_from_class_df['from_drug_class'] = 'Yes' AEs_from_class_df.drop(columns = ['AE_freq_tuple','Drug_Class'], inplace = True) # Fill NAs in Frequency column if no side effects are mentioned. scraped_API_dropna.loc[scraped_API_dropna.AE == 'NO AEs MENTIONED', 'Frequency'] = 'N/A' # Fill NAs in drug class indicator if no side effects are mentioned. Otherwise, put 'No'. scraped_API_dropna['from_drug_class'] = np.where(scraped_API_dropna['AE'] == 'NO AEs MENTIONED', 'N/A', 'No') # Concatenate the two dataframes to get a final one. final_df = pd.concat([scraped_API_dropna, AEs_from_class_df]) # Remove any rows that do not contain side effects. final_df = final_df[final_df.AE != 'NO AEs MENTIONED'] # Convert dataframe to lowercase. final_df = final_df.apply(lambda x: x.astype(str).str.lower()) # Sort alphabetically. final_df = final_df.sort_values(by=['API', 'from_drug_class']) # Remove any duplicates. final_df.drop_duplicates(subset = ['API', 'AE', 'Frequency'], keep = 'first', inplace = True) # Rename columns. final_df.columns = ['Drug_name', 'AE', 'Frequency', 'from_drug_class'] FILE_NAME = 'data_extraction/output/bnf_single_data.csv' print('saving to file...') # Save the dataset to a csv file. final_df.to_csv(FILE_NAME, index=False, encoding = "utf-8")
43.546392
147
0.68608
1,249
8,448
4.456365
0.196157
0.059828
0.034495
0.017607
0.416098
0.318721
0.268954
0.23392
0.208049
0.193676
0
0.009978
0.1933
8,448
193
148
43.772021
0.80675
0.289536
0
0.176991
0
0
0.172396
0.007292
0
0
0
0
0
1
0
false
0
0.061947
0
0.061947
0.061947
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f489d029eb3e215d049f6f2f3cc368f56d30226f
1,080
py
Python
core/forms.py
nicoknoll/howimetcorona
c55198118b2c31ee8b76c023b5a9fc4454cc1e08
[ "Apache-2.0" ]
1
2020-03-21T09:47:17.000Z
2020-03-21T09:47:17.000Z
core/forms.py
nicoknoll/howimetcorona
c55198118b2c31ee8b76c023b5a9fc4454cc1e08
[ "Apache-2.0" ]
5
2020-03-20T20:12:16.000Z
2021-09-22T18:46:48.000Z
core/forms.py
nicoknoll/howimetcorona
c55198118b2c31ee8b76c023b5a9fc4454cc1e08
[ "Apache-2.0" ]
null
null
null
from django import forms class BaseFileForm(forms.Form): # we try to minify the file to only submit the data points_file = forms.FileField( required=False, widget=forms.FileInput(attrs={'required': 'required'}), label="Location History File (.json)" ) points_data = forms.CharField(widget=forms.HiddenInput(), required=False) def clean(self): points_file = self.cleaned_data.get('points_file') points_data = self.cleaned_data.get('points_data') if not points_file and not points_data: raise forms.ValidationError({'points_file': 'File is required.'}) return self.cleaned_data class ReportForm(BaseFileForm): symptoms_at = forms.DateField(widget=forms.TextInput(attrs={ 'placeholder': 'YYYY-MM-DD', 'pattern': '[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}', 'title': 'YYYY-MM-DD' })) is_verified = forms.BooleanField(required=False) class CheckForm(BaseFileForm): pass class DeleteForm(forms.Form): delete_token = forms.CharField(label="Delete token")
28.421053
77
0.662963
136
1,080
5.154412
0.470588
0.071327
0.064194
0.051355
0.068474
0
0
0
0
0
0
0.012806
0.20463
1,080
37
78
29.189189
0.80326
0.04537
0
0
0
0.04
0.174927
0.029155
0
0
0
0
0
1
0.04
false
0.04
0.04
0
0.48
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f48bfbdf82f8ea69c9578103bcb880d230cfe368
718
py
Python
papers/wdmerger_I/plots/sponge.py
AMReX-Astro/wdmerger
9f575efacc8d373b6d2961f731e30bf59ee15ffd
[ "MIT" ]
2
2019-01-23T21:12:02.000Z
2021-12-14T07:34:38.000Z
papers/wdmerger_I/plots/sponge.py
AMReX-Astro/wdmerger
9f575efacc8d373b6d2961f731e30bf59ee15ffd
[ "MIT" ]
1
2017-08-05T06:25:41.000Z
2017-08-05T06:25:41.000Z
papers/wdmerger_I/plots/sponge.py
AMReX-Astro/wdmerger
9f575efacc8d373b6d2961f731e30bf59ee15ffd
[ "MIT" ]
2
2018-12-25T01:05:59.000Z
2020-12-28T10:01:59.000Z
# This Python program is used to create a plot displaying the sponge # function we use in the CASTRO hydrodynamics for the wdmerger problem. import numpy as np import matplotlib.pyplot as plt def sponge(r): sp rs = 0.75 rt = 0.85 r = np.linspace(0.0, 1.0, 1000) f = np.zeros(len(r)) idx = np.where(r < rs) f[idx] = 0.0 idx = np.where(r < rt) idx = np.where(r[idx] >= rs) f[idx] = 0.5 * (1.0 - np.cos(np.pi * (r[idx] - rs) / (rt - rs))) idx = np.where(r >= rt) f[idx] = 1.0 plt.plot(r, 1.0 - f, linewidth=4.0) plt.xlabel('Radius', fontsize=20) plt.ylabel(r'$1 - f_S$', fontsize=20) plt.xlim([0.0, 1.0]) plt.ylim([-0.05, 1.05]) plt.tick_params(labelsize=16) plt.tight_layout() plt.savefig('sponge.eps')
18.894737
71
0.635097
145
718
3.124138
0.455172
0.022075
0.0883
0.09713
0.057395
0
0
0
0
0
0
0.072635
0.175487
718
37
72
19.405405
0.692568
0.190808
0
0
0
0
0.043253
0
0
0
0
0
0
1
0.043478
false
0
0.086957
0
0.130435
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f48c4c17d15169f83e1e0f82eed8e69642feb9a8
753
py
Python
Python/110-1/Midterm Additional HW/005.py
JenFuChen/NKUST
bd80a449eddfdaf75709379d2e904ff70d409666
[ "MIT" ]
3
2021-11-07T17:33:54.000Z
2021-12-28T08:31:20.000Z
Python/110-1/Midterm Additional HW/005.py
JenFuChen/NKUST
bd80a449eddfdaf75709379d2e904ff70d409666
[ "MIT" ]
null
null
null
Python/110-1/Midterm Additional HW/005.py
JenFuChen/NKUST
bd80a449eddfdaf75709379d2e904ff70d409666
[ "MIT" ]
null
null
null
# 005 印出菱形 while(1): level = int(input()) if(level <= 0): break L = 2*level-1 mid = int((L - 1) / 2) inspa = mid * 2 - 1 for i in range(L): spa = level - i - 1 if spa >= 0: print(" " * spa, end='') print('*', end='') if spa < 0: spa = -spa print(" " * spa, end='') print('*', end='') if(i > 0 and i <= mid): for j in range(i*2-1): print(" ", end='') print('*', end='') if(i > 0 and i > mid and i != L-1): inspa = inspa - 2 for j in range(inspa): print(" ", end='') print('*', end='') print()
25.965517
44
0.332005
91
753
2.747253
0.252747
0.192
0.176
0.156
0.292
0.292
0.176
0.176
0.176
0
0
0.051813
0.487384
753
28
45
26.892857
0.595855
0.010624
0
0.307692
0
0
0.011189
0
0
0
0
0
0
1
0
false
0
0
0
0
0.346154
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f48c7224abe2e2f0a451d9341ea395ac8a419de0
1,978
py
Python
dynamo/plot/pseudotime.py
davisidarta/dynamo-release
0dbd769f52ea07f3cdaa8fb31022ceb89938c382
[ "BSD-3-Clause" ]
null
null
null
dynamo/plot/pseudotime.py
davisidarta/dynamo-release
0dbd769f52ea07f3cdaa8fb31022ceb89938c382
[ "BSD-3-Clause" ]
null
null
null
dynamo/plot/pseudotime.py
davisidarta/dynamo-release
0dbd769f52ea07f3cdaa8fb31022ceb89938c382
[ "BSD-3-Clause" ]
null
null
null
import numpy as np from ..tools.utils import update_dict from .utils import save_fig def plot_direct_graph(adata, layout=None, figsize=[6, 4], save_show_or_return='show', save_kwargs={}, ): df_mat = adata.uns["df_mat"] import matplotlib.pyplot as plt import networkx as nx edge_color = "gray" G = nx.from_pandas_edgelist( df_mat, source="source", target="target", edge_attr="weight", create_using=nx.DiGraph(), ) G.nodes() W = [] for n, nbrs in G.adj.items(): for nbr, eattr in nbrs.items(): W.append(eattr["weight"]) options = { "width": 300, "arrowstyle": "-|>", "arrowsize": 1000, } plt.figure(figsize=figsize) if layout is None: # pos : dictionary, optional # A dictionary with nodes as keys and positions as values. # If not specified a spring layout positioning will be computed. # See :py:mod:`networkx.drawing.layout` for functions that # compute node positions. g = nx.draw( G, with_labels=True, node_color="skyblue", node_size=100, edge_color=edge_color, width=W / np.max(W) * 5, edge_cmap=plt.cm.Blues, options=options, ) else: raise Exception("layout", layout, " is not supported.") if save_show_or_return == "save": s_kwargs = {"path": None, "prefix": 'plot_direct_graph', "dpi": None, "ext": 'pdf', "transparent": True, "close": True, "verbose": True} s_kwargs = update_dict(s_kwargs, save_kwargs) save_fig(**s_kwargs) elif save_show_or_return == "show": plt.tight_layout() plt.show() elif save_show_or_return == "return": return g
28.257143
86
0.532356
230
1,978
4.4
0.504348
0.031621
0.039526
0.063241
0.063241
0
0
0
0
0
0
0.010196
0.35541
1,978
69
87
28.666667
0.783529
0.128918
0
0
0
0
0.098657
0
0
0
0
0
0
1
0.018519
false
0
0.092593
0
0.12963
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f48d18e383286d35c87dd89bd5701bc78cbbbad7
4,327
py
Python
ocean_lib/web3_internal/utils.py
joshualyguessennd/ocean.py
23274698df4aae078d53b12d768c721af16f6e80
[ "Apache-2.0" ]
null
null
null
ocean_lib/web3_internal/utils.py
joshualyguessennd/ocean.py
23274698df4aae078d53b12d768c721af16f6e80
[ "Apache-2.0" ]
1
2021-02-16T18:31:53.000Z
2021-02-16T18:31:53.000Z
ocean_lib/web3_internal/utils.py
joshualyguessennd/ocean.py
23274698df4aae078d53b12d768c721af16f6e80
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 Ocean Protocol Foundation # SPDX-License-Identifier: Apache-2.0 import json import logging import os from collections import namedtuple import eth_account import eth_keys import eth_utils from eth_keys import KeyAPI from eth_utils import big_endian_to_int from ocean_lib.web3_internal.web3_provider import Web3Provider from web3 import Web3 Signature = namedtuple("Signature", ("v", "r", "s")) logger = logging.getLogger(__name__) def generate_multi_value_hash(types, values): """ Return the hash of the given list of values. This is equivalent to packing and hashing values in a solidity smart contract hence the use of `soliditySha3`. :param types: list of solidity types expressed as strings :param values: list of values matching the `types` list :return: bytes """ assert len(types) == len(values) return Web3.soliditySha3(types, values) def prepare_prefixed_hash(msg_hash): """ :param msg_hash: :return: """ return generate_multi_value_hash( ["string", "bytes32"], ["\x19Ethereum Signed Message:\n32", msg_hash] ) def add_ethereum_prefix_and_hash_msg(text): """ This method of adding the ethereum prefix seems to be used in web3.personal.sign/ecRecover. :param text: str any str to be signed / used in recovering address from a signature :return: hash of prefixed text according to the recommended ethereum prefix """ prefixed_msg = f"\x19Ethereum Signed Message:\n{len(text)}{text}" return Web3.sha3(text=prefixed_msg) def get_public_key_from_address(web3, account): """ :param web3: :param account: :return: """ _hash = web3.sha3(text="verify signature.") signature = web3.personal.sign(_hash, account.address, account.password) signature = split_signature(web3, web3.toBytes(hexstr=signature)) signature_vrs = Signature( signature.v % 27, big_endian_to_int(signature.r), big_endian_to_int(signature.s) ) prefixed_hash = prepare_prefixed_hash(_hash) pub_key = KeyAPI.PublicKey.recover_from_msg_hash( prefixed_hash, KeyAPI.Signature(vrs=signature_vrs) ) assert ( pub_key.to_checksum_address() == account.address ), "recovered address does not match signing address." return pub_key def to_32byte_hex(web3, val): """ :param web3: :param val: :return: """ return web3.toBytes(val).rjust(32, b"\0") def split_signature(web3, signature): """ :param web3: :param signature: signed message hash, hex str :return: """ assert len(signature) == 65, ( f"invalid signature, " f"expecting bytes of length 65, got {len(signature)}" ) v = web3.toInt(signature[-1]) r = to_32byte_hex(web3, int.from_bytes(signature[:32], "big")) s = to_32byte_hex(web3, int.from_bytes(signature[32:64], "big")) if v != 27 and v != 28: v = 27 + v % 2 return Signature(v, r, s) def get_wallet(index): name = "PARITY_ADDRESS" if not index else f"PARITY_ADDRESS{index}" pswrd_name = "PARITY_PASSWORD" if not index else f"PARITY_PASSWORD{index}" key_name = "PARITY_KEY" if not index else f"PARITY_KEY{index}" encrypted_key_name = ( "PARITY_ENCRYPTED_KEY" if not index else f"PARITY_ENCRYPTED_KEY{index}" ) keyfile_name = "PARITY_KEYFILE" if not index else f"PARITY_KEYFILE{index}" address = os.getenv(name) if not address: return None pswrd = os.getenv(pswrd_name) key = os.getenv(key_name) encr_key = os.getenv(encrypted_key_name) key_file = os.getenv(keyfile_name) if key_file and not encr_key: with open(key_file) as _file: encr_key = json.loads(_file.read()) from ocean_lib.web3_internal.wallet import Wallet return Wallet( Web3Provider.get_web3(), private_key=key, encrypted_key=encr_key, address=Web3.toChecksumAddress(address), password=pswrd, ) def privateKeyToAddress(private_key: str) -> str: return eth_account.Account().privateKeyToAccount(private_key).address def privateKeyToPublicKey(private_key: str): private_key_bytes = eth_utils.decode_hex(private_key) private_key_object = eth_keys.keys.PrivateKey(private_key_bytes) return private_key_object.public_key
29.040268
95
0.697712
592
4,327
4.890203
0.271959
0.031088
0.017271
0.02418
0.097064
0.064594
0.042832
0.026252
0.026252
0
0
0.021178
0.203374
4,327
148
96
29.236486
0.818683
0.181419
0
0
0
0
0.126254
0.034808
0
0
0
0
0.036585
1
0.109756
false
0.036585
0.146341
0.012195
0.378049
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f48e86cd3da483fb8b0fe253866faf1ceee934c8
8,444
py
Python
src/main.py
ketsonroberto/PBDO
cdc1c5275bc17753be5c06a216f92391b6f1f1ab
[ "MIT" ]
null
null
null
src/main.py
ketsonroberto/PBDO
cdc1c5275bc17753be5c06a216f92391b6f1f1ab
[ "MIT" ]
null
null
null
src/main.py
ketsonroberto/PBDO
cdc1c5275bc17753be5c06a216f92391b6f1f1ab
[ "MIT" ]
null
null
null
# THIS IS A FILE TO TEST THE CODE. DO NOT USE IT AS PART OF THE CODE. import matplotlib.pyplot as plt import numpy as np from StochasticMechanics import Stochastic from scipy.optimize import minimize from Performance import PerformanceOpt from Hazards import Stationary from Building import * from BuildingProperties import * from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from scipy import optimize freq = np.linspace(0.00001, 20, 500) gamma = np.ones((ndof)) * [0.5] nu = np.ones((ndof)) * [0.5] alpha = np.ones((ndof)) * [1] m = np.ones((ndof)) * [1] c = np.ones((ndof)) * [1] k = np.ones((ndof)) * [200] a = np.ones((ndof)) * [0.8] #0.01 ksi = np.ones((ndof)) * [0.05] # ksi = [0.05, 0.05] im_max = 30 B_max = 1 # S1 = np.ones(ndof) # Ps = Stationary(power_spectrum_object='white_noise', ndof=ndof) # power_spectrum = Ps.power_spectrum_excitation(freq=freq, S0=S1) # Von Karman Ps = Stationary(power_spectrum_object='windpsd', ndof=ndof) power_spectrum, U = Ps.power_spectrum_excitation(u10=6.2371, freq=freq, z=z) # plt.semilogy(freq/(2*np.pi), power_spectrum[:,0]) # plt.show() # columns["area"] = 0.001 # columns.update({"area": 0.001}) ks = [] ms = [] msf = [] #cost = [] nlc = 100 lc = np.linspace(0.05, 2, nlc) # fig, (ax1, ax2, ax3) = plt.subplots(1, 3) # fig.suptitle('Mass and Stiffness') # ax1.plot(lc,ms) # ax1.plot(lc,msf) # ax2.plot(lc,ks) # ax3.plot(ks,cost) # plt.show() columns = update_columns(columns=columns, lx=0.4, ly=0.4) Building = Structure(building, columns, slabs, core, concrete, steel) k_story = Building.stiffness_story() m_story = Building.mass_storey(top_story=False) m_story_f = Building.mass_storey(top_story=True) k = np.ones(ndof) * [k_story] m = np.ones(ndof) * [m_story] m[-1] = m_story_f length = 0.3 size_col = np.ones(ndof) * [length] Sto = Stochastic(power_spectrum=power_spectrum, model='bouc_wen', ndof=ndof, freq=freq) #Opt = PerformanceOpt(power_spectrum=power_spectrum, model='bouc_wen', freq=freq, tol=1e-5, maxiter=100, # design_life=1) # design_life = 50 # total_cost = Opt.objective_function(size_col=size_col, ksi=ksi, im_max=im_max, B_max=B_max, gamma=gamma, nu=nu, # alpha=alpha, a=a) #CostFailure = Costs(building=building, columns=columns, slabs=slabs, core=core, concrete=concrete, # steel=steel, cost=cost) #size_col = np.ones(ndof) * [0.5] #size_col = np.array([1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]) #size_col = np.array([0.1, 0.2, 0.3]) args=[ksi, im_max, B_max, gamma, nu, alpha, a] sizea = 0.1 sizeb = 1 wa = 0.1 wb=100 npar = 10 nw = 10 X = np.zeros((npar * nw, 3 * ndof + 1)) y = np.zeros((npar * nw, 2 * ndof)) ct=0 ct1=0 for kk in range(npar): size_col = sizea+(sizeb-sizea)*np.random.rand(ndof) M, C, K, m, c, k = Sto.get_MCK(size_col=size_col, args=args, columns=columns) for i in range(nw): im = wa + (wb - wa) * np.random.rand(1)[0] idd = 0 for j in np.arange(0, 3 * ndof, 3): X[ct, j] = m[idd] X[ct, j + 1] = c[idd] X[ct, j + 2] = k[idd] idd = idd + 1 X[ct, -1] = im ct = ct + 1 Ps = Stationary(power_spectrum_object='windpsd', ndof=ndof) power_spectrum, ub = Ps.power_spectrum_excitation(u10=im, freq=freq, z=z) Var, Vard = Sto.statistical_linearization(M=M, C=C, K=K, power_sp=power_spectrum, tol=0.01, maxiter=100, gamma=gamma, nu=nu, alpha=alpha, a=a) idd = 0 for j in np.arange(0, 2 * ndof, 2): y[ct1, j] = Var[idd][0] y[ct1, j + 1] = Vard[idd][0] idd = idd + 1 ct1 = ct1 + 1 print(np.shape(y)) from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct, ConstantKernel) kernels_U = [None, ConstantKernel(1.0, (1e-4, 1e4)) * RBF(1, (1e-4, 1e4)), 1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1), 1.0 * ExpSineSquared(length_scale=1.0, periodicity=1, length_scale_bounds=(1.0e-5, 100.0), periodicity_bounds=(1.0, 10.0)), ConstantKernel(0.1, (0.01, 10.0)) * (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.0, 10.0)) ** 2), 1.0 * Matern(length_scale=1.0, nu=1.5)] gp = GaussianProcessRegressor(kernel=kernels_U[0], n_restarts_optimizer=10, normalize_y=False) gp.fit(X, y) r2 = gp.score(X, y) print(r2) yp = gp.predict(np.array(X[2].reshape(1, -1))) val = X[2] val[-1]=100.0 print(val) yp = gp.predict(val.reshape(1, -1)) print(yp) #print(np.shape(X)) #print(np.shape(y)) #nn_architecture = [ # {"input_dim": 10, "output_dim": 25, "activation": "relu"}, # {"input_dim": 25, "output_dim": 50, "activation": "relu"}, # {"input_dim": 50, "output_dim": 50, "activation": "relu"}, # {"input_dim": 50, "output_dim": 25, "activation": "relu"}, # {"input_dim": 25, "output_dim": 6, "activation": "relu"}, #] #from neural import NeuralNets #from sklearn.model_selection import train_test_split #NN = NeuralNets(nn_architecture) #TEST_SIZE = 0.1 #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=132) ##print(X_train) #params_values, cost_history = NN.train(X=np.transpose(X_train), Y=np.transpose(y_train), epochs=1000, # learning_rate=1, verbose=True) """ b0 = np.linspace(0.1, 0.5, 20) cost_f = [] cost_i = [] cost_t = [] mm = [] pp = [] args=[ksi, im_max, B_max, gamma, nu, alpha, a] for i in range(len(b0)): Cf = CostFailure.cost_damage(b=b0[i], col_size=size_col[0], L=columns["height"], ncolumns=columns["quantity"], dry_wall_area=dry_wall_area) Ci = CostFailure.initial_cost_stiffness(col_size=b0[i], par0=25.55133, par1=0.33127) scol = np.array([b0[i], b0[i]]) Ct = Opt.objective_function(size_col=scol, args=args) #mom, phi = Building.compression(col_size=b0[i], L=columns["height"]) cost_f.append(Cf) cost_i.append(Ci) cost_t.append(Ct) fig = plt.figure() plt.plot(b0, cost_t,'-o') plt.show() #fig = plt.figure() #plt.plot(phi, mom,'-o') #plt.show() """ """ b0 = np.linspace(0.05,0.5,5) b1 = np.linspace(0.05,0.5,5) B0, B1 = np.meshgrid(b0, b1) args=[ksi, im_max, B_max, gamma, nu, alpha, a] tc = np.zeros((5, 5)) for i in range(len(b0)): print(i) for j in range(len(b1)): size_col = np.array([b0[i], b1[j]]) resp = Opt.objective_function(size_col=size_col, args=args) tc[i,j] = resp Z = tc.reshape(B0.shape) Z = np.array(Z) nd = np.unravel_index(np.argmin(Z, axis=None), Z.shape) print([B0[nd], B1[nd]]) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') surf = ax.plot_surface(B0, B1, np.log(Z), cmap=plt.cm.get_cmap('plasma'),linewidth=0, antialiased=False) ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.set_zlabel('Z Label') fig.colorbar(surf, shrink=0.5, aspect=5) plt.show() """ #size_col = np.ones(ndof) * [0.2] #args=[ksi, im_max, B_max, gamma, nu, alpha, a] ##args = {"ksi": ksi, "im_max": im_max, "B_max": B_max, "gamma": gamma, "nu": nu, "alpha": alpha, "a": a} #bnds = [] #for i in range(ndof): # bnds.append((0.1, 1)) #bnds=tuple(bnds) ###from scipy import optimize ###res = optimize.fmin(Opt.objective_function, x0=size_col) #res = minimize(Opt.objective_function, x0=size_col, args=args, bounds=bnds) ###from scipy.optimize import basinhopping ###minimizer_kwargs = {"method": "BFGS", "args": args} ###ret = basinhopping(Opt.objective_function, x0=size_col, minimizer_kwargs=minimizer_kwargs, niter=200) #print(res) ### Global methods. ###from scipy.optimize import rosen, shgo ###from scipy.optimize import dual_annealing ###ret = dual_annealing(Opt.objective_function, bounds=bnds) ###print((ret.x, ret.fun)) #c = Opt.linear_damping(m=m, k=k, ksi=ksi) #M, C, K = Opt.create_mck(m=m, c=c, k=k, gamma=gamma, nu=nu, alpha=alpha, a=a) #financial_loss_rate = Opt.stochastic_financial_loss(M=M, C=C, K=K, stiff=k, im_max=im_max, # B_max=B_max, size_col=size_col, Nim=1, NB=1, gamma=gamma, nu=nu, # alpha=alpha, a=a)
30.157143
114
0.620441
1,376
8,444
3.679506
0.227471
0.027652
0.027652
0.012443
0.245309
0.196721
0.156627
0.121865
0.095398
0.095398
0
0.052365
0.206182
8,444
279
115
30.265233
0.702969
0.363809
0
0.061224
0
0
0.005922
0
0
0
0
0
0
1
0
false
0
0.132653
0
0.132653
0.040816
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f48e9c0665ea9a8d85811305b04f10d8aba4b991
777
py
Python
categorical_embedder/embedders/core/aux/custom_object_handler.py
erelcan/categorical-embedder
376b8779500af2aa459c879f8e525f2ef25d6b31
[ "Apache-2.0" ]
3
2020-12-19T10:52:58.000Z
2021-06-08T09:06:44.000Z
categorical_embedder/embedders/core/aux/custom_object_handler.py
erelcan/categorical-embedder
376b8779500af2aa459c879f8e525f2ef25d6b31
[ "Apache-2.0" ]
null
null
null
categorical_embedder/embedders/core/aux/custom_object_handler.py
erelcan/categorical-embedder
376b8779500af2aa459c879f8e525f2ef25d6b31
[ "Apache-2.0" ]
null
null
null
from categorical_embedder.embedders.core.aux.custom_layers import get_custom_layer_class from categorical_embedder.embedders.core.aux.loss_factory import get_loss_function def prepare_custom_objects(custom_object_info): custom_objects = {} custom_objects.update(_prepare_custom_layers(custom_object_info["layer_info"])) if not custom_object_info["has_implicit_loss"]: custom_objects.update(_prepare_custom_loss(custom_object_info["loss_info"])) return custom_objects def _prepare_custom_layers(layer_info): custom_layers = {} for layer_name in layer_info: custom_layers[layer_name] = get_custom_layer_class(layer_name) return custom_layers def _prepare_custom_loss(loss_info): return {"loss": get_loss_function(loss_info)}
35.318182
88
0.804376
107
777
5.336449
0.271028
0.126095
0.112084
0.112084
0.248687
0.136602
0
0
0
0
0
0
0.119691
777
21
89
37
0.834795
0
0
0
0
0
0.05148
0
0
0
0
0
0
1
0.2
false
0
0.133333
0.066667
0.533333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f48f23b7a5506d60b9ac1a5607df61a337660101
10,406
py
Python
osprofiler/cmd/shell.py
charliebr30/osprofiler
cffca4e29e373e3f09f2ffdd458761183a851569
[ "Apache-2.0" ]
null
null
null
osprofiler/cmd/shell.py
charliebr30/osprofiler
cffca4e29e373e3f09f2ffdd458761183a851569
[ "Apache-2.0" ]
1
2017-04-15T22:16:06.000Z
2017-04-15T22:16:06.000Z
osprofiler/cmd/shell.py
shwsun/osprofiler
46d29fc5ab8a4068217e399883f39cdd443a7500
[ "Apache-2.0" ]
null
null
null
# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Command-line interface to the OpenStack Profiler. """ import argparse import inspect import sys from oslo_config import cfg import osprofiler from osprofiler.cmd import cliutils from osprofiler.cmd import commands from osprofiler import exc from osprofiler import opts class OSProfilerShell(object): def __init__(self, argv): args = self._get_base_parser().parse_args(argv) opts.set_defaults(cfg.CONF) if not (args.os_auth_token and args.ceilometer_url): if not args.os_username: raise exc.CommandError( "You must provide a username via either --os-username or " "via env[OS_USERNAME]") if not args.os_password: raise exc.CommandError( "You must provide a password via either --os-password or " "via env[OS_PASSWORD]") if self._no_project_and_domain_set(args): # steer users towards Keystone V3 API raise exc.CommandError( "You must provide a project_id via either --os-project-id " "or via env[OS_PROJECT_ID] and a domain_name via either " "--os-user-domain-name or via env[OS_USER_DOMAIN_NAME] or " "a domain_id via either --os-user-domain-id or via " "env[OS_USER_DOMAIN_ID]") if not args.os_auth_url: raise exc.CommandError( "You must provide an auth url via either --os-auth-url or " "via env[OS_AUTH_URL]") args.func(args) def _get_base_parser(self): parser = argparse.ArgumentParser( prog="osprofiler", description=__doc__.strip(), add_help=True ) parser.add_argument("-v", "--version", action="version", version=osprofiler.__version__) self._append_ceilometer_args(parser) self._append_identity_args(parser) self._append_subcommands(parser) return parser def _append_ceilometer_args(self, parent_parser): parser = parent_parser.add_argument_group("ceilometer") parser.add_argument( "--ceilometer-url", default=cliutils.env("CEILOMETER_URL"), help="Defaults to env[CEILOMETER_URL].") parser.add_argument( "--ceilometer-api-version", default=cliutils.env("CEILOMETER_API_VERSION", default="2"), help="Defaults to env[CEILOMETER_API_VERSION] or 2.") def _append_identity_args(self, parent_parser): # FIXME(fabgia): identity related parameters should be passed by the # Keystone client itself to avoid constant update in all the services # clients. When this fix is merged this method can be made obsolete. # Bug: https://bugs.launchpad.net/python-keystoneclient/+bug/1332337 parser = parent_parser.add_argument_group("identity") parser.add_argument("-k", "--insecure", default=False, action="store_true", help="Explicitly allow osprofiler to " "perform \"insecure\" SSL (https) requests. " "The server's certificate will " "not be verified against any certificate " "authorities. This option should be used with " "caution.") # User related options parser.add_argument("--os-username", default=cliutils.env("OS_USERNAME"), help="Defaults to env[OS_USERNAME].") parser.add_argument("--os-user-id", default=cliutils.env("OS_USER_ID"), help="Defaults to env[OS_USER_ID].") parser.add_argument("--os-password", default=cliutils.env("OS_PASSWORD"), help="Defaults to env[OS_PASSWORD].") # Domain related options parser.add_argument("--os-user-domain-id", default=cliutils.env("OS_USER_DOMAIN_ID"), help="Defaults to env[OS_USER_DOMAIN_ID].") parser.add_argument("--os-user-domain-name", default=cliutils.env("OS_USER_DOMAIN_NAME"), help="Defaults to env[OS_USER_DOMAIN_NAME].") parser.add_argument("--os-project-domain-id", default=cliutils.env("OS_PROJECT_DOMAIN_ID"), help="Defaults to env[OS_PROJECT_DOMAIN_ID].") parser.add_argument("--os-project-domain-name", default=cliutils.env("OS_PROJECT_DOMAIN_NAME"), help="Defaults to env[OS_PROJECT_DOMAIN_NAME].") # Project V3 or Tenant V2 related options parser.add_argument("--os-project-id", default=cliutils.env("OS_PROJECT_ID"), help="Another way to specify tenant ID. " "This option is mutually exclusive with " " --os-tenant-id. " "Defaults to env[OS_PROJECT_ID].") parser.add_argument("--os-project-name", default=cliutils.env("OS_PROJECT_NAME"), help="Another way to specify tenant name. " "This option is mutually exclusive with " " --os-tenant-name. " "Defaults to env[OS_PROJECT_NAME].") parser.add_argument("--os-tenant-id", default=cliutils.env("OS_TENANT_ID"), help="This option is mutually exclusive with " " --os-project-id. " "Defaults to env[OS_PROJECT_ID].") parser.add_argument("--os-tenant-name", default=cliutils.env("OS_TENANT_NAME"), help="Defaults to env[OS_TENANT_NAME].") # Auth related options parser.add_argument("--os-auth-url", default=cliutils.env("OS_AUTH_URL"), help="Defaults to env[OS_AUTH_URL].") parser.add_argument("--os-auth-token", default=cliutils.env("OS_AUTH_TOKEN"), help="Defaults to env[OS_AUTH_TOKEN].") parser.add_argument("--os-cacert", metavar="<ca-certificate-file>", dest="os_cacert", default=cliutils.env("OS_CACERT"), help="Path of CA TLS certificate(s) used to verify" " the remote server\"s certificate. Without this " "option ceilometer looks for the default system CA" " certificates.") parser.add_argument("--os-cert", help="Path of certificate file to use in SSL " "connection. This file can optionally be " "prepended with the private key.") parser.add_argument("--os-key", help="Path of client key to use in SSL " "connection. This option is not necessary " "if your key is prepended to your cert file.") # Service Catalog related options parser.add_argument("--os-service-type", default=cliutils.env("OS_SERVICE_TYPE"), help="Defaults to env[OS_SERVICE_TYPE].") parser.add_argument("--os-endpoint-type", default=cliutils.env("OS_ENDPOINT_TYPE"), help="Defaults to env[OS_ENDPOINT_TYPE].") parser.add_argument("--os-region-name", default=cliutils.env("OS_REGION_NAME"), help="Defaults to env[OS_REGION_NAME].") def _append_subcommands(self, parent_parser): subcommands = parent_parser.add_subparsers(help="<subcommands>") for group_cls in commands.BaseCommand.__subclasses__(): group_parser = subcommands.add_parser(group_cls.group_name) subcommand_parser = group_parser.add_subparsers() for name, callback in inspect.getmembers( group_cls(), predicate=inspect.ismethod): command = name.replace("_", "-") desc = callback.__doc__ or "" help_message = desc.strip().split("\n")[0] arguments = getattr(callback, "arguments", []) command_parser = subcommand_parser.add_parser( command, help=help_message, description=desc) for (args, kwargs) in arguments: command_parser.add_argument(*args, **kwargs) command_parser.set_defaults(func=callback) def _no_project_and_domain_set(self, args): if not (args.os_project_id or (args.os_project_name and (args.os_user_domain_name or args.os_user_domain_id)) or (args.os_tenant_id or args.os_tenant_name)): return True else: return False def main(args=None): if args is None: args = sys.argv[1:] try: OSProfilerShell(args) except exc.CommandError as e: print(e.message) return 1 if __name__ == "__main__": main()
42.129555
79
0.548818
1,126
10,406
4.867673
0.221137
0.035577
0.080642
0.065864
0.369823
0.24813
0.090312
0.031381
0.01642
0.01642
0
0.003449
0.35912
10,406
246
80
42.300813
0.818414
0.104939
0
0.05814
0
0
0.279328
0.044813
0
0
0
0.004065
0
1
0.040698
false
0.034884
0.052326
0
0.122093
0.005814
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
f48f3252e9a2f94d57cf6c129396083ea3b2d577
3,695
py
Python
bmt/util.py
patrickkwang/bmt-lite
bf97f6155702a8eb38daf5a45df34b0ce1cb1a4b
[ "MIT" ]
null
null
null
bmt/util.py
patrickkwang/bmt-lite
bf97f6155702a8eb38daf5a45df34b0ce1cb1a4b
[ "MIT" ]
null
null
null
bmt/util.py
patrickkwang/bmt-lite
bf97f6155702a8eb38daf5a45df34b0ce1cb1a4b
[ "MIT" ]
null
null
null
"""Utilities.""" from functools import wraps import re from typing import Callable, List, Optional, TypeVar, Union from .data import ( all_classes, all_slots, ) def pascal_to_snake(s: str, sep: str = "_") -> str: """Convert Pascal case to snake case. Assumes that a) all words are either all-lowercase or all-uppercase b) all 1-letter words are lowercase c) there are no adjacent 1-letter words d) there are no adjacent uppercase words Examples: PhenotypicFeature -> phenotypic_feature RNAProduct -> RNA_product FeedACamel -> feed_a_camel Optionally specify `sep` (default "_"). """ # add an underscore before each capital letter underscored = re.sub( r"(?<!^)(?=[A-Z])", sep, s, ) # collapse any adjacent one-letter words collapsed = re.sub( r"(?<![a-zA-Z])[A-Z](?:_[A-Z](?=$|_))+", lambda match: match.group(0).replace("_", ""), underscored, ) # lower-case any words containing only one uppercase letter lowercased = re.sub( r"(?<![A-Z])[A-Z](?![A-Z])", lambda match: match.group(0).lower(), collapsed, ) return lowercased def snake_to_pascal(s: str, sep: str = "_") -> str: """Convert snake case to Pascal case. This is the inverse of pascal_to_snake() when its assumptions are true. Optionally specify `sep` (default "_"). """ return re.sub( fr"(?:^|{sep})([a-zA-Z])", lambda match: match.group(1).upper(), s ) def guess_casing(s: str) -> str: """Guess snake case or Pascal case.""" if "_" in s: return "snake" if any(c.isupper() for c in s): return "pascal" return "snake" def normalize(s: str) -> str: """Normalize string input.""" if s.startswith("biolink:"): s = s[8:] if "_" in s: # it's snake case return s.replace("_", " ") if " " in s: return s return pascal_to_snake(s, " ") T = TypeVar("T") def listify(func: Callable) -> Callable: """Expand function to take list of arguments.""" @wraps(func) def wrapper(arg: Union[T, List[T]], **kwargs) -> Union[T, List[T]]: """Apply function to each element in list.""" if isinstance(arg, list): return [ func(el, **kwargs) for el in arg ] else: return func(arg, **kwargs) return wrapper @listify def format(s: str, case: Optional[str] = None, **kwargs) -> str: """Format space-case string as biolink CURIE.""" if isinstance(case, str) and case.lower() == "pascal": return "biolink:" + snake_to_pascal(s, " ") elif isinstance(case, str) and case.lower() == "snake": return "biolink:" + s.replace(" ", "_") else: return "biolink:" + s def with_formatting(): """Add format conversions to method.""" def decorator(func: Callable) -> Callable: """Generate decorator.""" @wraps(func) def wrapper(self, s: str, *args, formatted=False, **kwargs): """Wrap in format conversions.""" case = guess_casing(s) normalized = normalize(s) output: Union[str, List[str]] = func(self, normalized, *args, **kwargs) if formatted: if normalized in all_classes: output = format(output, case="pascal") elif normalized in all_slots: output = format(output, case="snake") else: output = format(output, case=case) return output return wrapper return decorator
27.781955
83
0.558863
448
3,695
4.537946
0.310268
0.011805
0.005903
0.01033
0.092966
0.074766
0.026562
0.026562
0.026562
0.026562
0
0.002324
0.301218
3,695
132
84
27.992424
0.78505
0.256834
0
0.1375
0
0
0.068399
0.030951
0
0
0
0
0
1
0.125
false
0
0.05
0
0.3875
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
be2e1617c4a15afe6886703b261c4b500fdae5e3
7,960
py
Python
sktime/utils/time_series.py
brettkoonce/sktime
6336247bad0dac8692aa4b911c267f401dea4163
[ "BSD-3-Clause" ]
1
2020-09-11T06:26:08.000Z
2020-09-11T06:26:08.000Z
sktime/utils/time_series.py
brettkoonce/sktime
6336247bad0dac8692aa4b911c267f401dea4163
[ "BSD-3-Clause" ]
2
2020-04-20T12:26:42.000Z
2020-04-22T17:09:14.000Z
sktime/utils/time_series.py
brettkoonce/sktime
6336247bad0dac8692aa4b911c267f401dea4163
[ "BSD-3-Clause" ]
1
2022-02-14T18:19:01.000Z
2022-02-14T18:19:01.000Z
__author__ = ["Markus Löning"] __all__ = [ "compute_relative_to_n_timepoints", "time_series_slope", "fit_trend", "remove_trend", "add_trend" ] import numpy as np from sklearn.utils import check_array from sktime.utils.validation.forecasting import check_time_index def compute_relative_to_n_timepoints(n_timepoints, n="sqrt"): """ Get number of intervals from number of time points for various allowed input arguments. Helpful to compute number of intervals relative to time series length, e.g. using floats or functions. Parameters ---------- n_timepoints : int n : {int, float, str, callable} Returns ------- n_intervals_ : int Computed number of intervals """ # check input: n_timepoints if not np.issubdtype(type(n_timepoints), np.dtype(int).type): raise ValueError( f"`n_timepoints` must be an integer, but found: " f"{type(n_timepoints)}") if not n_timepoints >= 1: raise ValueError( f"`n_timepoints` must be >= 1, but found: {n_timepoints}") # compute number of splits allowed_strings = ["sqrt", "log"] # integer if np.issubdtype(type(n), np.dtype(int).type): if not n <= n_timepoints: raise ValueError( f"If `n_intervals` is an integer, it must be smaller " f"than `n_timepoints`, but found: `n_intervals`={n} " f"and `n_timepoints`={n_timepoints}") if n < 1: raise ValueError(f"If `n_intervals` is an integer, " f"`n_intervals` must be >= 1, but found: {n}") n_intervals_ = n # function elif callable(n): n_intervals_ = n(n_timepoints) # string elif isinstance(n, str): if n not in allowed_strings: raise ValueError( f"If `n_intervals` is a string, `n_intervals` must be " f"in {allowed_strings}, but found: {n}") str_func_map = { "sqrt": np.sqrt, "log": np.log } func = str_func_map[n] n_intervals_ = func(n_timepoints) # float elif isinstance(n, float): if not (0 < n <= 1): raise ValueError( f"If `n_intervals` is a float, `n_intervals` must be > 0 " f"and <= 1, but found: {n}") n_intervals_ = n * n_timepoints else: raise ValueError( f"`n_intervals` must be either one of the allowed string options " f"in " f"{allowed_strings}, an integer or a float number.") # make sure n_intervals is an integer and there is at least one interval n_intervals_ = np.maximum(1, np.int(n_intervals_)) return n_intervals_ def time_series_slope(y): """ Compute slope of time series (y) using ordinary least squares. Parameters ---------- y : array_like Time-series. axis : int Axis along which the time-series slope is computed. Returns ------- slope : float Slope of time-series. """ y = np.asarray(y).ravel() len_series = len(y) if len_series < 2: return 0 else: x = np.arange(len_series) # time index x_mean = (len_series - 1) / 2 # faster than x.mean() return (np.mean(x * y) - x_mean * np.mean(y)) / ( np.mean(x ** 2) - x_mean ** 2) def fit_trend(x, order=0): """Fit linear regression with polynomial terms of given order x : array_like, shape=[n_samples, n_obs] Time series data, each sample is fitted separately order : int The polynomial order of the trend, zero is constant (mean), one is linear trend, two is quadratic trend, and so on. Returns ------- coefs : ndarray, shape=[n_samples, order + 1] Fitted coefficients of polynomial order for each sample, one column means order zero, two columns mean order 1 (linear), three columns mean order 2 (quadratic), etc See Also ------- add_trend remove_trend """ x = check_array(x) if order == 0: coefs = np.mean(x, axis=1).reshape(-1, 1) else: n_obs = x.shape[1] index = np.arange(n_obs) poly_terms = np.vander(index, N=order + 1) # linear least squares fitting using numpy's optimised routine, # assuming samples in columns # coefs = np.linalg.pinv(poly_terms).dot(x.T).T coefs, _, _, _ = np.linalg.lstsq(poly_terms, x.T, rcond=None) # returning fitted coefficients in expected format with samples in rows coefs = coefs.T return coefs def remove_trend(x, coefs, time_index=None): """Remove trend from an array with a trend of given order along axis 0 or 1 Parameters ---------- x : array_like, shape=[n_samples, n_obs] Time series data, each sample is de-trended separately coefs : ndarray, shape=[n_samples, order + 1] Fitted coefficients for each sample, single column means order zero, two columns mean order 1 (linear), three columns mean order 2 (quadratic), etc time_index : array-like, shape=[n_obs], optional (default=None) Time series index for which to add the trend components Returns ------- xt : ndarray The de-trended series is the residual of the linear regression of the data on the trend of given order. See Also -------- fit_trend add_trend References ---------- Adapted from statsmodels (0.9.0), see https://www.statsmodels.org/dev/_modules/statsmodels/tsa/tsatools.html #detrend """ x = check_array(x) # infer order from shape of given coefficients order = coefs.shape[1] - 1 # special case, remove mean if order == 0: xt = x - coefs return xt else: if time_index is None: # if no time index is given, create range index n_obs = x.shape[1] time_index = np.arange(n_obs) else: # validate given time index time_index = check_time_index(time_index) if not len(time_index) == x.shape[1]: raise ValueError( 'Length of passed index does not match length of passed x') poly_terms = np.vander(time_index, N=order + 1) xt = x - np.dot(poly_terms, coefs.T).T return xt def add_trend(x, coefs, time_index=None): """Add trend to array for given fitted coefficients along axis 0 or 1, inverse function to `remove_trend()` Parameters ---------- x : array_like, shape=[n_samples, n_obs] Time series data, each sample is treated separately coefs : array-like, shape=[n_samples, order + 1] fitted coefficients of polynomial order for each sample, one column means order zero, two columns mean order 1 (linear), three columns mean order 2 (quadratic), etc time_index : array-like, shape=[n_obs], optional (default=None) Time series index for which to add the trend components Returns ------- xt : ndarray The series with added trend. See Also ------- fit_trend remove_trend """ x = check_array(x) # infer order from shape of given coefficients order = coefs.shape[1] - 1 # special case, add mean if order == 0: xt = x + coefs else: if time_index is None: n_obs = x.shape[1] time_index = np.arange(n_obs) else: # validate given time index time_index = check_time_index(time_index) if not len(time_index) == x.shape[1]: raise ValueError( 'Length of passed index does not match length of passed x') poly_terms = np.vander(time_index, N=order + 1) xt = x + np.dot(poly_terms, coefs.T).T return xt
29.157509
79
0.593593
1,083
7,960
4.226223
0.184672
0.045226
0.02447
0.019664
0.471488
0.426917
0.396329
0.354381
0.347389
0.309591
0
0.009079
0.308166
7,960
272
80
29.264706
0.822045
0.415578
0
0.366071
0
0
0.195489
0.014333
0
0
0
0
0
1
0.044643
false
0.017857
0.026786
0
0.133929
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0