id
stringlengths
3
8
content
stringlengths
100
981k
106172
import torch import random from torch.utils.data import Dataset, DataLoader from collections import defaultdict import os import unicodedata import re import time from collections import defaultdict from tqdm import tqdm import numpy as np from transformers import * from helpers import * class DatasetWebQSP(Dataset): def __init__(self, data, entities, entity2idx, transformer_name, kg_model): self.data = data self.entities = entities self.entity2idx = entity2idx self.pos_dict = defaultdict(list) self.neg_dict = defaultdict(list) self.index_array = list(self.entities.keys()) self.transformer_name = transformer_name self.pre_trained_model_name = get_pretrained_model_name(transformer_name) self.tokenizer = None self.set_tokenizer() self.max_length = 64 self.kg_model = kg_model def set_tokenizer(self): if self.transformer_name == 'RoBERTa': self.tokenizer = RobertaTokenizer.from_pretrained(self.pre_trained_model_name) elif self.transformer_name == 'XLNet': self.tokenizer = XLNetTokenizer.from_pretrained(self.pre_trained_model_name) elif self.transformer_name == 'ALBERT': self.tokenizer = AlbertTokenizer.from_pretrained(self.pre_trained_model_name) elif self.transformer_name == 'SentenceTransformer': self.tokenizer = AutoTokenizer.from_pretrained(self.pre_trained_model_name) elif self.transformer_name == 'Longformer': self.tokenizer = LongformerTokenizer.from_pretrained(self.pre_trained_model_name) else: print('Incorrect transformer specified:', self.transformer_name) exit(0) def __len__(self): return len(self.data) def pad_sequence(self, arr, max_len=128): num_to_add = max_len - len(arr) for _ in range(num_to_add): arr.append('<pad>') return arr def toOneHot(self, indices): indices = torch.LongTensor(indices) batch_size = len(indices) vec_len = len(self.entity2idx) one_hot = torch.FloatTensor(vec_len) one_hot.zero_() # one_hot = -torch.ones(vec_len, dtype=torch.float32) one_hot.scatter_(0, indices, 1) return one_hot def __getitem__(self, index): data_point = self.data[index] question_text = data_point[1] question_tokenized, attention_mask = self.tokenize_question(question_text) head_id = self.entity2idx[data_point[0].strip()] tail_ids = [] for tail_name in data_point[2]: tail_name = tail_name.strip() #TODO: dunno if this is right way of doing things if tail_name in self.entity2idx: tail_ids.append(self.entity2idx[tail_name]) tail_onehot = self.toOneHot(tail_ids) return question_tokenized, attention_mask, head_id, tail_onehot def tokenize_question(self, question): if self.transformer_name != "SentenceTransformer": question = f"<s>{question}</s>" question_tokenized = self.tokenizer.tokenize(question) question_tokenized = self.pad_sequence(question_tokenized, self.max_length) question_tokenized = torch.tensor(self.tokenizer.encode( question_tokenized, # Question to encode add_special_tokens = False # Add '[CLS]' and '[SEP]', as per original paper )) attention_mask = [] for q in question_tokenized: # 1 means padding token if q == 1: attention_mask.append(0) else: attention_mask.append(1) return question_tokenized, torch.tensor(attention_mask, dtype=torch.long) else: encoded_que = self.tokenizer.encode_plus(question, padding='max_length', max_length=self.max_length, return_tensors='pt') return encoded_que['input_ids'][0], encoded_que['attention_mask'][0] # def _collate_fn(batch): # print(len(batch)) # exit(0) # question_tokenized = batch[0] # attention_mask = batch[1] # head_id = batch[2] # tail_onehot = batch[3] # question_tokenized = torch.stack(question_tokenized, dim=0) # attention_mask = torch.stack(attention_mask, dim=0) # return question_tokenized, attention_mask, head_id, tail_onehot class DataLoaderWebQSP(DataLoader): def __init__(self, *args, **kwargs): super(DataLoaderWebQSP, self).__init__(*args, **kwargs) # self.collate_fn = _collate_fn
106210
from django.utils.translation import ugettext_lazy as _ MEETINGS_CONTRIBUTION_TYPES = [ ('talk', _('Talk')), ('poster', _('Poster')) ] MEETINGS_PAYMENT_CHOICES = ( ('cash', _('cash')), ('wire', _('wire transfer')), ) MEETINGS_PARTICIPANT_DETAIL_KEYS = [] MEETINGS_ABSTRACT_MAX_LENGTH = 2000
106215
class Solution: def minMeetingRooms(self, intervals: List[List[int]]) -> int: if not intervals: return 0 result, curr = 0, 0 for i, val in sorted( x for interval in intervals for x in [(interval[0], 1), (interval[1], -1)] ): curr += val result = max(curr, result) return result # import heapq # # # class Solution: # def minMeetingRooms(self, intervals: List[List[int]]) -> int: # if not intervals: return 0 # q = [] # for interval in sorted(intervals, key=lambda x: x[0]): # if not q: # heapq.heappush(q, interval[1]) # else: # if interval[0] >= q[0]: # heapq.heappop(q) # heapq.heappush(q, interval[1]) # return len(q)
106239
import sys settings_file_path = "../quisk_settings.json" #hamlib_port = 4575 # Standard port for Quisk control. Set the port in Hamlib to 4575 too. hamlib_port = 4532 # Default port for rig 2. Use this if you can not set the Hamlib port. if sys.platform == "win32": pass elif 0: digital_input_name = 'pulse' digital_output_name ='' else: digital_input_name = 'hw:Loopback,0' digital_output_name = digital_input_name
106263
from keras import backend as K from overrides import overrides from ..masked_layer import MaskedLayer class CollapseToBatch(MaskedLayer): """ Reshapes a higher order tensor, taking the first ``num_to_collapse`` dimensions after the batch dimension and folding them into the batch dimension. For example, a tensor of shape (2, 4, 5, 3), collapsed with ``num_to_collapse = 2``, would become a tensor of shape (40, 3). We perform identical computation on the input mask, if there is one. This is essentially what Keras' ``TimeDistributed`` layer does (and then undoes) to apply a layer to a higher-order tensor, and that's the intended use for this layer. However, ``TimeDistributed`` cannot handle distributing across dimensions with unknown lengths at graph compilation time. This layer works even in that case. So, if your actual tensor shape at graph compilation time looks like (None, None, None, 3), or (None, 4, None, 3), you can still use this layer (and :class:`~deep_qa.layers.backend.expand_from_batch.ExpandFromBatch`) to get the same result as ``TimeDistributed``. If your shapes are fully known at graph compilation time, just use ``TimeDistributed``, as it's a nicer API for the same functionality. Inputs: - tensor with ``ndim >= 3`` Output: - tensor with ``ndim = input_ndim - num_to_collapse``, with the removed dimensions folded into the first (batch-size) dimension Parameters ---------- num_to_collapse: int The number of dimensions to fold into the batch size. """ def __init__(self, num_to_collapse: int, **kwargs): self.num_to_collapse = num_to_collapse super(CollapseToBatch, self).__init__(**kwargs) @overrides def call(self, inputs, mask=None): return self.__collapse_tensor(inputs) @overrides def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument if mask is None: return None return self.__collapse_tensor(mask) @overrides def compute_output_shape(self, input_shape): return (None,) + input_shape[1 + self.num_to_collapse:] @overrides def get_config(self): base_config = super(CollapseToBatch, self).get_config() config = {'num_to_collapse': self.num_to_collapse} config.update(base_config) return config def __collapse_tensor(self, tensor): # If we were to call K.int_shape(inputs), we would get something back that has None in it # (other than the batch dimension), because the shape is not fully known at graph # compilation time. We can't do a reshape with more than one unknown dimension, which is # why we're doing this whole layer in the first place instead of just using # TimeDistributed. tf.reshape will let us pass in a tensor that has the shape, instead of # just some ints. So we can use tf.shape(tensor) to get the actual runtime shape of the # tensor _as a tensor_, which we then pass to tf.reshape(). new_shape = K.concatenate([[-1], K.shape(tensor)[1 + self.num_to_collapse:]], 0) return K.reshape(tensor, new_shape)
106273
import pytest pytestmark = pytest.mark.asyncio async def test_clean_query_prefix(client): response = await client.get("/api/clean_query/192.0.2/24") assert response.status_code == 200 assert response.json() == {"cleanedValue": "192.0.2.0/24", "category": "prefix"} async def test_clean_query_prefix_misaligned(client): response = await client.get("/api/clean_query/192.0.2.3/24") assert response.status_code == 200 assert response.json() == {"cleanedValue": "192.0.2.0/24", "category": "prefix"} async def test_clean_query_asn_bare(client): response = await client.get("/api/clean_query/192") assert response.status_code == 200 assert response.json() == {"cleanedValue": "AS192", "category": "asn"} async def test_clean_query_asn(client): response = await client.get("/api/clean_query/AS64500") assert response.status_code == 200 assert response.json() == {"cleanedValue": "AS64500", "category": "asn"} async def test_clean_query_as_set(client): response = await client.get("/api/clean_query/foobar") assert response.status_code == 200 assert response.json() == {"cleanedValue": "FOOBAR", "category": "as-set"} async def test_clean_query_invalid(client): response = await client.get("/api/clean_query/--invalid-💩") assert response.status_code == 400 assert "valid prefix" in response.text async def test_clean_query_prefix_too_large(client): response = await client.get("/api/clean_query/1172.16.58.3/8") assert response.status_code == 400 assert "the minimum prefix length" in response.text response = await client.get("/api/clean_query/2001::/16") assert response.status_code == 400 assert "the minimum prefix length" in response.text
106327
import numpy as np import logging import time from stereovis.framed.algorithms import StereoMRF from spinn_utilities.progress_bar import ProgressBar import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt logger = logging.getLogger(__file__) class FramebasedStereoMatching(object): def __init__(self, resolution, max_disparity, algorithm='mrf', inputs=None): if algorithm == 'mrf': # reverse the resolution order since x-dimension corresponds to n_cols and y to n_rows # and the shape initialisation of numpy is (n_rows, n_cols) which is (y, x) x, y = resolution self.algorithm = StereoMRF(dim=(y, x), n_levels=max_disparity) if inputs is not None: # this means that the operational mode is offline and hence one can initialise the frame iterator self.frames_left = np.asarray(inputs['left']) self.frames_right = np.asarray(inputs['right']) self.frames_timestamps = np.asarray(inputs['ts']) # initialise the placeholder for the depth-resolved inputs self.depth_frames = [] else: raise NotImplementedError("Only MRF is supported.") def get_timestamps(self): return self.frames_timestamps def get_output(self): self.depth_frames = np.asarray(self.depth_frames) return self.depth_frames def run_one_frame(self, image_left, image_right, prior=None, **kwargs): """ Run one single frame of the frame-based stereo matching. Should be used when running online. Args: image_left: a numpy array representing the left image image_right: a numpy array representing the right image prior: optional, a numpy array with disparity values Keyword Args: prior_trust_factor: float, value between 0 and 1 for the prior influence prior_influence_mode: str, can be 'const' or `adaptive` for the prior incorporation strategy n_iter: int, number of iteration to run the algorithm Returns: A numpy array representing the depth map resolved by the algorithm. """ depth_map = self.algorithm.lbp(image_left, image_right, prior, **kwargs) self.depth_frames.append(depth_map) return depth_map def run(self, prior_info=None): """ Run the frame-based stereo matching on all frames and priors. Args: prior_info: optional, a list of priors a subset of which is used to initialise the algorithm. Returns: """ n_frames = len(self.frames_timestamps) if prior_info is not None: if len(prior_info['ts']) > n_frames: # pick the n closest ones (where n is the number of frames) prior_indices = [np.searchsorted(prior_info['ts'], t_frame, side="left") for t_frame in self.frames_timestamps] priors = prior_info['priors'][prior_indices] else: priors = prior_info['priors'] assert len(priors) == len(self.frames_left) == len(self.frames_right) pb = ProgressBar(n_frames, "Starting offline frame-based stereo matching with prior initialisation.") start_timer = time.time() for i, (left, right, prior) in enumerate(zip(self.frames_left, self.frames_right, priors)): self.run_one_frame(left, right, prior, prior_trust_factor=1.0, prior_influence_mode='adaptive', n_iter=10) pb.update() end_timer = time.time() pb.end() else: pb = ProgressBar(n_frames, "Starting offline frame-based stereo matching without prior initialisation.") start_timer = time.time() for i, (left, right) in enumerate(zip(self.frames_left, self.frames_right)): self.run_one_frame(left, right) plt.imsave('output/checkerboard_downsampled/left_{}.png'.format(i), left) plt.imsave('output/checkerboard_downsampled/right_{}.png'.format(i), right) plt.imsave('output/checkerboard_downsampled/result_{}.png'.format(i), self.depth_frames[i]) pb.update() end_timer = time.time() pb.end() logger.info("Frame-based stereo matching took {}s per image pair on average.".format((end_timer - start_timer) / n_frames))
106330
from docker.errors import DockerException, ImageNotFound from pytest import raises from yellowbox import build_image def test_valid_image_build(docker_client): with build_image(docker_client, "yellowbox", path=".", dockerfile="tests/resources/valid_dockerfile/Dockerfile") \ as image: container = docker_client.containers.create(image) container.start() container.wait() # wait for the container to end and close container.remove() # out of contextmanager, image should be deleted with raises(ImageNotFound): docker_client.containers.create('yellowbox:test') def test_invalid_parse_image_build(docker_client): with raises(DockerException): with build_image(docker_client, "yellowbox", path=".", dockerfile="tests/resources/invalid_parse_dockerfile/Dockerfile"): pass def test_invalid_run_image_build(docker_client): with raises(DockerException): with build_image(docker_client, "yellowbox", path=".", dockerfile="tests/resources/invalid_run_dockerfile/Dockerfile"): pass
106335
import attr @attr.s class Ellipsoid: """Ellipsoid used for mesh calculations Args: a (float): semi-major axis b (float): semi-minor axis """ a: float = attr.ib() b: float = attr.ib() e2: float = attr.ib(init=False) def __attrs_post_init__(self): self.e2 = 1 - (self.b ** 2 / self.a ** 2)
106361
from string import ascii_lowercase LETTERS = {letter: str(index) for index, letter in enumerate(ascii_lowercase, start=1)} def alphabet_position(text): text = text.lower() numbers = [LETTERS[character] for character in text if character in LETTERS] return ' '.join(numbers) def cifrario(one,two): n = int(one) + int(two) r = n / 26 if r >= 1 and r < 2: r = 1 elif r <= 0.9 and r > 0: r = 0 fx = n - (26 * r) return fx #mettere che fx ritorna uguale un numero def letterToNumber(fx): a = fx +96 print(chr(a).upper()) z=input("Insert first letter: ") v= input("Insert second letter: ") c = cifrario(alphabet_position(z),alphabet_position(v)) letterToNumber(c)
106367
from bitmovin_api_sdk.encoding.encodings.muxings.ts.ts_api import TsApi from bitmovin_api_sdk.encoding.encodings.muxings.ts.customdata.customdata_api import CustomdataApi from bitmovin_api_sdk.encoding.encodings.muxings.ts.drm.drm_api import DrmApi from bitmovin_api_sdk.encoding.encodings.muxings.ts.ts_muxing_list_query_params import TsMuxingListQueryParams
106368
import pandas as pd import numpy as np from misc import data_io DATA_DIR = 'data/ut-interaction/' """ Folder structure <'set1' or 'set2'>/keypoints <video_name>/ <video_name>_<frame_num>_keypoints.json ... Ex: DATA_DIR + 'set1/keypoints/0_1_4/0_1_4_000000000042_keypoints.json' """ VIDEOS = [ ['0_1_4','1_1_2','2_1_1','3_1_3','4_1_0','5_1_5','6_2_4','7_2_5','8_2_0', '9_2_2','10_2_1','11_2_3','12_3_4','13_3_2','14_3_1','15_3_3','16_3_5', '17_3_0','18_4_4','19_4_1','20_4_2','21_4_0','22_4_3','23_4_5','24_5_0', '25_5_4','26_5_2','27_5_1','28_5_3','29_5_5','30_6_2','31_6_5','32_6_1', '33_6_3','34_6_0','35_7_0','36_7_5','37_7_4','38_7_2','39_7_3','40_7_1', '41_8_0','42_8_2','43_8_4','44_8_4','45_8_5','46_8_3','47_8_1','48_9_3', '49_9_5','50_9_2','51_9_4','52_9_0','53_9_1','54_10_0','55_10_4','56_10_5', '57_10_3','58_10_1','59_10_2'], #set1 ['0_11_4','1_11_2','2_11_5','3_11_0','4_11_3','5_11_1','6_12_0','7_12_3', '8_12_5','9_12_1','10_12_4','11_12_2','12_13_4','13_13_2','14_13_1', '15_13_3','16_13_5','17_13_0','18_14_0','19_14_1','20_14_5','21_14_3', '22_14_4','23_14_2','24_15_1','25_15_0','26_15_4','27_15_2','28_15_3', '29_15_5','30_16_3','31_16_0','32_16_1','33_16_4','34_16_2','35_16_5', '36_17_1','37_17_0','38_17_3','39_17_5','40_17_4','41_17_2','42_18_2', '43_18_4','44_18_1','45_18_3','46_18_5','47_18_0','48_19_0','49_19_1', '50_19_4','51_19_3','52_19_5','53_19_2','54_20_1','55_20_0','56_20_5', '57_20_3','58_20_4','59_20_2'] #set2 ] ACTIONS = ['Hand Shaking','Hugging','Kicking','Pointing','Punching','Pushing'] def get_ground_truth(data_dir=DATA_DIR): video_lst, setid_lst, seq_lst, path_lst, action_lst = [], [], [], [], [] for set_id, set_videos in enumerate(VIDEOS): video_lst = video_lst + set_videos setid_lst = setid_lst + len(set_videos)*[set_id+1] for video in set_videos: num, seq, action = video.split('_') seq_lst.append(int(seq)) action_lst.append(int(action)) path = '{}set{}/keypoints/{}/'.format(data_dir, set_id+1, video) path_lst.append(path) dataframe_dict = {'video_id': video_lst, 'setid': setid_lst, 'seq': seq_lst, 'path': path_lst, 'action': action_lst} ground_truth = pd.DataFrame(dataframe_dict).set_index('video_id') return ground_truth def get_folds(setid): if setid == 1: folds = np.arange(10) elif setid == 2: folds = np.arange(10, 20) else: raise ValueError("setid must be 1 or 2, value entered: "+str(setid)) return folds def get_train_gt(fold_num): if fold_num < 0 or fold_num > 19: raise ValueError("fold_num must be within 0 and 19, value entered: "+str(fold_num)) if fold_num < 10: setid = 1 sequences = np.arange(10) fold_sequences = sequences[sequences != fold_num] + 1 else: setid = 2 sequences = np.arange(10, 20) fold_sequences = sequences[sequences != fold_num] + 1 ground_truth = get_ground_truth() gt_split = ground_truth[ground_truth.setid == setid] gt_split = gt_split[gt_split.seq.isin(fold_sequences)] return gt_split def get_val_gt(fold_num): if fold_num < 0 or fold_num > 19: raise ValueError("fold_num must be within 0 and 19, value entered: "+str(fold_num)) if fold_num < 10: setid = 1 sequences = np.arange(10) fold_sequences = sequences[sequences == fold_num] + 1 else: setid = 2 sequences = np.arange(10, 20) fold_sequences = sequences[sequences == fold_num] + 1 ground_truth = get_ground_truth() gt_split = ground_truth[ground_truth.setid == setid] gt_split = gt_split[gt_split.seq.isin(fold_sequences)] return gt_split def get_train(fold_num, **kwargs): if fold_num < 0 or fold_num > 19: raise ValueError("fold_num must be within 0 and 19, value entered: "+str(fold_num)) if fold_num < 10: setid = 1 sequences = np.arange(10) fold_sequences = sequences[sequences != fold_num] + 1 else: setid = 2 sequences = np.arange(10, 20) fold_sequences = sequences[sequences != fold_num] + 1 return get_seqs(setid, fold_sequences, **kwargs) def get_val(fold_num, **kwargs): if fold_num < 0 or fold_num > 19: raise ValueError("fold_num must be within 0 and 19, value entered: "+str(fold_num)) if fold_num < 10: setid = 1 sequences = np.arange(10) fold_sequences = sequences[sequences == fold_num] + 1 else: setid = 2 sequences = np.arange(10, 20) fold_sequences = sequences[sequences == fold_num] + 1 return get_seqs(setid, fold_sequences, **kwargs) def get_seqs(setid, selected_sequences, **kwargs): if setid < 1 or setid > 2: raise ValueError("setid must be 1 or 2, value entered: "+str(setid)) ground_truth = get_ground_truth() gt_split = ground_truth[ground_truth.setid == setid] gt_split = gt_split[gt_split.seq.isin(selected_sequences)] X, Y = data_io.get_data(gt_split, pose_style='OpenPose', **kwargs) return X, Y
106387
pkgname = "libmodplug" pkgver = "0.8.9.0" pkgrel = 0 build_style = "gnu_configure" configure_args = ["--enable-static"] hostmakedepends = ["pkgconf"] pkgdesc = "MOD playing library" maintainer = "q66 <<EMAIL>>" license = "custom:none" url = "http://modplug-xmms.sourceforge.net" source = f"$(SOURCEFORGE_SITE)/modplug-xmms/{pkgname}-{pkgver}.tar.gz" sha256 = "457ca5a6c179656d66c01505c0d95fafaead4329b9dbaa0f997d00a3508ad9de" @subpackage("libmodplug-devel") def _devel(self): return self.default_devel()
106401
import numpy as np from sklearn.metrics import average_precision_score def load_data(data_path): """load array data from data_path""" data = np.load(data_path) return data['X_train'], data['y_train'], data['X_test'], data['y_test'] def calculate_average_precision(label, index, similarity, num_search_sample): """calculate average precision of similar search result. The average precison is calculated over num_search_sample """ label_idx = np.array([label[idx] for idx in index]) label_idx_true = np.array([np.where(row == row[0], 1, 0) for row in label_idx]) label_idx_true = label_idx_true[:, 1:] ap = [] for i in range(num_search_sample): ap.append(average_precision_score(label_idx_true[i], similarity[i])) return ap
106422
import pytest import attr import xsimlab as xs from xsimlab.tests.fixture_process import SomeProcess, AnotherProcess, ExampleProcess from xsimlab.variable import _as_dim_tuple, _as_group_tuple @pytest.mark.parametrize( "dims,expected", [ ((), ((),)), ([], ((),)), ("", ((),)), (("x"), (("x",),)), (["x"], (("x",),)), ("x", (("x",),)), (("x", "y"), (("x", "y"),)), ([(), "x", ("x", "y")], ((), ("x",), ("x", "y"))), ], ) def test_as_dim_tuple(dims, expected): assert _as_dim_tuple(dims) == expected def test_as_dim_tuple_invalid(): invalid_dims = ["x", "y", ("x", "y"), ("y", "x")] with pytest.raises(ValueError) as excinfo: _as_dim_tuple(invalid_dims) assert "following combinations" in str(excinfo.value) assert "('x',), ('y',) and ('x', 'y'), ('y', 'x')" in str(excinfo.value) @pytest.mark.parametrize( "groups,group,expected", [ (None, None, ()), ("group1", None, ("group1",)), (["group1", "group2"], None, ("group1", "group2")), ("group1", "group2", ("group1", "group2")), ("group1", "group1", ("group1",)), ], ) def test_as_group_tuple(groups, group, expected): if group is not None: with pytest.warns(FutureWarning): actual = _as_group_tuple(groups, group) else: actual = _as_group_tuple(groups, group) assert actual == expected def test_variable(): # test constructor @attr.attrs class Foo: some_var = xs.variable() another_var = xs.variable(intent="out") assert Foo(some_var=2).some_var == 2 with pytest.raises(TypeError): # intent='out' not in constructor Foo(another_var=2) def test_index(): with pytest.raises(ValueError, match=r".*not accept scalar values.*"): xs.index(()) # test constructor @attr.attrs class Foo: var = xs.index(dims="x") with pytest.raises(TypeError): # index variable not in contructor (intent='out') Foo(var=2) def test_on_demand(): # test constructor @attr.attrs class Foo: var = xs.on_demand() with pytest.raises(TypeError): # on_demand variable not in contructor (intent='out') Foo(var=2) def test_any_object(): # test constructor @attr.attrs class Foo: var = xs.any_object() with pytest.raises(TypeError): # any_object variable not in contructor (intent='out') Foo(var=2) def test_foreign(): with pytest.raises(ValueError, match="intent='inout' is not supported.*"): xs.foreign(ExampleProcess, "some_var", intent="inout") var = attr.fields(ExampleProcess).out_foreign_var ref_var = attr.fields(AnotherProcess).another_var for k in ("description", "attrs"): assert var.metadata[k] == ref_var.metadata[k] # test constructor @attr.attrs class Foo: some_var = xs.foreign(SomeProcess, "some_var") another_var = xs.foreign(AnotherProcess, "another_var", intent="out") assert Foo(some_var=2).some_var == 2 with pytest.raises(TypeError): # intent='out' not in constructor Foo(another_var=2) def test_global_ref(): with pytest.raises(ValueError, match="intent='inout' is not supported.*"): xs.global_ref("some_var", intent="inout") # test constructor @attr.attrs class Foo: some_var = xs.global_ref("some_var") another_var = xs.global_ref("another_var", intent="out") assert Foo(some_var=2).some_var == 2 with pytest.raises(TypeError): # intent='out' not in constructor Foo(another_var=2) def test_group(): @attr.attrs class Foo: bar = xs.group("g") # test init with default tuple value foo = Foo() assert foo.bar == tuple() def test_group_dict(): @attr.attrs class Foo: bar = xs.group_dict("g") # test init with default dict value foo = Foo() assert foo.bar == dict()
106466
import datetime timenow = datetime.datetime.now() class user: def __init__(self, name, information) -> str: self.name = name self.information = information def get_username(self) -> str: return self.name def get_user_information(self) -> str: return self.information class laundryinfo: def __init__(self): self.time = datetime.datetime.now() def calculate(self, laundry_weight, type_of_laundry) -> str: cuci_komplit = 6000 cuci_kering = 4000 setrika = 4000 bed_cover = 8000 # selimut = 7000 # gorden = 7000 express = 9000 if type_of_laundry in ["selimut", "gorden"]: type_of_laundry = 7000 elif type_of_laundry == "cuci komplit": type_of_laundry = cuci_komplit elif type_of_laundry == "cuci kering": type_of_laundry = cuci_kering elif type_of_laundry == "setrika": type_of_laundry = setrika elif type_of_laundry == "bed cover": type_of_laundry = bed_cover elif type_of_laundry == express: type_of_laundry = express total = type_of_laundry * int(laundry_weight) return int(total) def get_time(self) -> str: timeNow = self.time.strftime("%H:%M") return f"waktu : {timeNow}" def save_data(): filename = f"laundry_{timenow.strftime('%d-%B-%Y')}.txt" text_information = f"desc :{user.get_user_information()}\nnama : {user.get_username()}\nharga : price\nwaktu: time\n\n" # lgtm [py/call/wrong-arguments] save_data_log = open(filename, "a") save_data_log.write(text_information) save_data_log.close() print("saved")
106503
import rospy from rospy.msg import AnyMsg class GdbPublisher(object): def __init__(self, topic, msgtype, queue_size = 1): self.topic = topic self.msgtype = msgtype # Construct publisher lazily, since we don't know whether to latch or not yet. self.publisher = None self.queue_size = queue_size def publish(self, serialized_message, latch): message = AnyMsg() message.deserialize(serialized_message) if self.publisher == None: self.publisher = rospy.Publisher(self.topic, self.msgtype, queue_size=self.queue_size, latch = latch) self.publisher.publish(message) class GdbPublisherDictionary(dict): def __init__(self, publishers = None): super(GdbPublisherDictionary, self).__init__() if(publishers != None): self.insert(publishers) def insert(self, publishers): try: iterator = iter(publishers) for publisher in publishers: self[publisher.topic] = publisher except TypeError: # add the single publisher that we received self[publishers.topic] = publishers def publish(self, topic, serialized_message, latch): publisher = self.get(topic, None) if publisher != None: publisher.publish(serialized_message, latch) return True return False class GdbMessageBreakpoint(gdb.Breakpoint): # the default context extractors default_serialized_message_extractor = lambda self: gdb.selected_frame().read_var('m') default_topic_extractor = lambda self: str(gdb.selected_frame().read_var('this').dereference()\ ['name_']['_M_dataplus']['_M_p'].string()) default_latched_extractor = lambda self: str(gdb.selected_frame().read_var('this').dereference()\ ['latch_']) def __init__(self, location, context_extractor = lambda: {}): super(GdbMessageBreakpoint, self).__init__(location, internal = True) self.context_extractor = context_extractor self.silent = True def stop(self): if(not hasattr(self, 'enclosing_node')): raise Exception('Missing node reference') context = self.context_extractor() if 'serialized_variable' not in context: context['serialized_variable'] = self.default_serialized_message_extractor() if 'topic' not in context: context['topic'] = self.default_topic_extractor() if 'latch' not in context: context['latch'] = self.default_latched_extractor() self.enclosing_node.handle_message(**context) return False class GdbPublisherNode(object): def __init__ (self, publisher_dictionary, breakpoints, log_publisher = False, log_publisher_state = False): self.ros_handle = rospy.init_node('gdb_publisher') self.publisher_dictionary = publisher_dictionary self.breakpoints = [] self.add_breakpoints(breakpoints) self.log_publisher = log_publisher self.log_publisher_state = log_publisher_state self.enabled_states_saved = False print "Initialized ROS publisher." def add_breakpoint(self, breakpoint): breakpoint.enclosing_node = self self.breakpoints.append(breakpoint) def add_breakpoints(self, breakpoints): try: iterator = iter(breakpoints) for breakpoint in breakpoints: self.add_breakpoint(breakpoint) except TypeError: self.add_breakpoint(breakpoint) def handle_message(self, serialized_variable, topic, latch): if topic[0] == '/': topic = topic[1:] if topic not in self.publisher_dictionary: if self.log_publisher: print "didn't publish message in {}".format(topic) # don't bother with executing the rest since we are not publishing this message return message_start = serialized_variable['message_start'] # in ros/serialization.h, serializeMessage(), num_bytes = len + 4 serialized_length = serialized_variable['num_bytes'] - 4 serialized_message = self.convert_serialized_to_python(message_start, serialized_length) self.publisher_dictionary.publish(topic, serialized_message, latch) if self.log_publisher: print 'published {}, {}'.format(topic, serialized_length) def enable_breakpoints(self, print_notice = True): for b in self.breakpoints: b.enabled = True if print_notice: print "Enabled ROS publisher." def disable_breakpoints(self, print_notice = True): for b in self.breakpoints: b.enabled = False if print_notice: print "Disabled ROS publisher." def save_state_and_disable_ros_publisher(self): if self.enabled_states_saved == True: raise Exception('This should not have happened, ROS publisher state has already been saved') self.breakpoint_enabled_states = [ b.enabled for b in self.breakpoints ] self.enabled_states_saved = True self.disable_breakpoints(False) if self.breakpoint_enabled_states[0] and self.log_publisher_state: print "Temporarily disabled ROS publisher." def restore_ros_publisher_state(self): if self.enabled_states_saved == True: for b in zip(self.breakpoints, self.breakpoint_enabled_states): b[0].enabled = b[1] if self.breakpoint_enabled_states[0] and self.log_publisher_state: print "Re-enabled ROS publisher." self.enabled_states_saved = False @staticmethod def convert_serialized_to_python(c_array, length): return ''.join([chr(c_array[char]) for char in range(length)])
106580
from __init__ import * import sys import subprocess import numpy as np from fractions import Fraction import math sys.path.insert(0, ROOT) from compiler import * from constructs import * def maxfilter(pipe_data): # Pipeline Variables x = Variable(Int, "x") y = Variable(Int, "y") c = Variable(Int, "c") t = Variable(Int, "t") # Pipeline Parameters R = Parameter(Int, "R") # image rows C = Parameter(Int, "C") # image cols # Register in the dictionary pipe_data['R'] = R pipe_data['C'] = C # Input Image img = Image(Float, "img", [3, R, C]) radius = 26 slices = int(math.ceil(math.log(radius,2))) + 1 rows = Interval(Int, 0, R-1) cols = Interval(Int, 0, C-1) c_int = Interval(Int, 0, 2) #channels = Interval(Int, 0, 2) t_int = Interval(Int, 0, slices) def clamped(i,j,k): xminR = Min(i, R-1) xclamp = Max(0, xminR) yminC = Min(j, C-1) yclamp = Max(0, yminC) return img(k,xclamp,yclamp) def clamp(e, mn, mx): minm = Min(e, mx) maxm = Max(minm, mn) return maxm rx = Variable(Int, "rx") ry = Variable(Int, "ry") rx_int = Interval(Int, -radius, R + radius) #TODO: re-check this ry_int = Interval(Int, 1, slices - 1) y_ry_int = Interval(Int, radius, C - R -radius*3 - 1) vert_log = Reduction(([x, y, c, t], [rows, cols, c_int, t_int]), ([x, rx, c, ry], [rows, y_ry_int, c_int, ry_int]), Int, "vert_log") vert_log.defn = [ Reduce(vert_log(x,y,c,t), Max(clamped(x, rx, c), #clamped(x, rx + clamp(((ry-1)),0,radius*2), c) ), #TODO: fix this #clamped(x, rx + clamp((1<<(ry-1)),0,radius*2), c) ), #TODO: to this clamped(x, rx + clamp(Cast(Int,Pow(2,ry-1)), 0, radius*2),c)), #TODO: This is a last resort Op.Max) ] slice_for_radius = Function(([t],[t_int]), Int, "slice_for_radius") slice_for_radius.defn = [ Cast(Int, Log(2*t + 1)/float(0.693147)) ] y_vert_int = Interval(Int, 0, C - slices - 1 - radius*2) vert = Function(([x, y, c, t], [rows, cols, c_int, t_int]), Int, "vert") eslice = clamp(slice_for_radius(t), 0, slices) first_sample = vert_log(x, y-t, c, eslice) #second_sample = vert_log(x, y + t + 1 - clamp( eslice, 0, 2*radius), c, eslice) #TODO: Fix this #second_sample = vert_log(x, y + t + 1 - clamp(1 << eslice, 0, 2*radius), c, eslice) #TODO: to this second_sample = vert_log(x, y + t + 1 - clamp( Cast(Int,Pow(2,eslice)), 0, 2*radius), c, eslice) #TODO: Last Resort vert.defn = [ Max(first_sample, second_sample) ] dx = Variable(Int, "dx") # for final dx_int = Interval(Int, -radius, 2*radius+1) dy = Variable(Int, "dy") dy_int = Interval(Int, 0, radius + 1) x_radius_int = Interval(Int, 0, radius) lhs = x*x + dy*dy rhs = ((radius + 0.25)*(radius + 0.25)) cond_t = Condition(lhs, "<", rhs) cond_f = Condition(lhs, ">=", rhs) filter_height = Reduction(([x], [dx_int]), ([x, dy], [x_radius_int, dy_int]), Int, "filter_height") #filter_height.defn = [ Reduce(filter_height(x), Select(cond_t, 1, 0), Op.Sum) ] filter_height.defn = [ Case(cond_t, Reduce(filter_height(x),1,Op.Sum)), Case(cond_f, Reduce(filter_height(x), 0, Op.Sum)) ] filter_height.default = 0 x_dx_int = Interval(Int, radius, R - 2*radius - 2) final = Reduction(([x, y, c], [rows, cols, c_int]), ([x, y, c, dx], [x_dx_int, cols, c_int, dx_int]), Int, "final") final.defn = [ Reduce(final(x, y, c), vert(x+ dx, y, c, clamp(filter_height(dx), 0, radius + 1)), Op.Max) ] return final
106591
from templeplus.pymod import PythonModifier from toee import * import tpdp import char_class_utils import tpactions ################################################### def GetConditionName(): return "Swashbuckler" print "Registering " + GetConditionName() classEnum = stat_level_swashbuckler classSpecModule = __import__('class049_swashbuckler') ################################################### #### standard callbacks - BAB and Save values def OnGetToHitBonusBase(attachee, args, evt_obj): classLvl = attachee.stat_level_get(classEnum) babvalue = game.get_bab_for_class(classEnum, classLvl) evt_obj.bonus_list.add(babvalue, 0, 137) # untyped, description: "Class" return 0 def OnGetSaveThrowFort(attachee, args, evt_obj): value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Fortitude) evt_obj.bonus_list.add(value, 0, 137) return 0 def OnGetSaveThrowReflex(attachee, args, evt_obj): value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Reflex) evt_obj.bonus_list.add(value, 0, 137) return 0 def OnGetSaveThrowWill(attachee, args, evt_obj): value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Will) evt_obj.bonus_list.add(value, 0, 137) return 0 classSpecObj = PythonModifier(GetConditionName(), 0) classSpecObj.AddHook(ET_OnToHitBonusBase, EK_NONE, OnGetToHitBonusBase, ()) classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_FORTITUDE, OnGetSaveThrowFort, ()) classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_REFLEX, OnGetSaveThrowReflex, ()) classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_WILL, OnGetSaveThrowWill, ()) #Checks for a load greater than light or armor greater than light (to enable various abilities) def SwashbucklerEncumberedCheck(obj): #Light armor or no armor armor = obj.item_worn_at(5) if armor != OBJ_HANDLE_NULL: armorFlags = armor.obj_get_int(obj_f_armor_flags) if (armorFlags != ARMOR_TYPE_LIGHT) and (armorFlags != ARMOR_TYPE_NONE): return 1 #No heavy or medium load HeavyLoad = obj.d20_query(Q_Critter_Is_Encumbered_Heavy) if HeavyLoad: return 1 MediumLoad = obj.d20_query(Q_Critter_Is_Encumbered_Medium) if MediumLoad: return 1 return 0 #Check if the weapons is usable with finesse def IsFinesseWeapon(creature, weapon): #Unarmed works if (weapon == OBJ_HANDLE_NULL): return 1 #Ranged weapons don't work weapFlags = weapon.obj_get_int(obj_f_weapon_flags) if (weapFlags & OWF_RANGED_WEAPON): return 0 #Light weapon works wieldType = creature.get_wield_type(weapon) if (wieldType == 0): return 1 #Whip, rapier, spiked chain works WeaponType = weapon.get_weapon_type() if (WeaponType == wt_whip) or (WeaponType == wt_spike_chain) or (WeaponType == wt_rapier): return 1 return 0 #Swashbuckler Abilities # Swashbuckler Grace def SwashbucklerGraceReflexBonus(attachee, args, evt_obj): #Must not be encumbered if SwashbucklerEncumberedCheck(attachee): return 0 classLvl = attachee.stat_level_get(classEnum) classBonusLvls = attachee.d20_query("Swashbuckler Grace Level Bonus") classLvl = classLvl + classBonusLvls if classLvl < 11: bonval = 1 elif classLvl < 20: bonval = 2 else: bonval = 3 evt_obj.bonus_list.add(bonval, 0, "Swashbuckler Grace" ) #Competence Bonus return 0 swashbucklerGrace = PythonModifier("Swashbuckler Grace", 2) #Spare, Spare swashbucklerGrace.MapToFeat("Swashbuckler Grace") swashbucklerGrace.AddHook(ET_OnSaveThrowLevel , EK_SAVE_REFLEX , SwashbucklerGraceReflexBonus, ()) # Swashbuckler Insightful Strike def SwashbucklerInsightfulStrikeDamageBonus(attachee, args, evt_obj): #Must not be encumbered if SwashbucklerEncumberedCheck(attachee): return 0 #Must be usable with weapon finesse weaponUsed = evt_obj.attack_packet.get_weapon_used() if not IsFinesseWeapon(attachee, weaponUsed): return 0 #Enemy must be sneak attackable target = evt_obj.attack_packet.target if target.d20_query(Q_Critter_Is_Immune_Critical_Hits): return 0 int = attachee.stat_level_get(stat_intelligence) intMod = (int - 10)/2 evt_obj.damage_packet.bonus_list.add_from_feat(intMod, 0, 137, "Insightful Strike") return 0 swashbucklerInsightfulStrike = PythonModifier("Swashbuckler Insightful Strike", 2) #Spare, Spare swashbucklerInsightfulStrike.MapToFeat("Swashbuckler Insightful Strike") swashbucklerInsightfulStrike.AddHook(ET_OnDealingDamage, EK_NONE, SwashbucklerInsightfulStrikeDamageBonus, ()) # Swashbuckler Dodge def SwashbucklerDodgeACBonus(attachee, args, evt_obj): #Must not be encumbered if SwashbucklerEncumberedCheck(attachee): return 0 attacker = evt_obj.attack_packet.attacker if attacker == OBJ_HANDLE_NULL or attacker == attachee: return 0 #Test if the ability is used prevAttacker = args.get_obj_from_args(0) #Works for each attack from the first attacker like dodge (SRD let you choose the opponent) if prevAttacker != OBJ_HANDLE_NULL: if attacker != prevAttacker: return 0 classLvl = attachee.stat_level_get(classEnum) classBonusLvls = attachee.d20_query("Swashbuckler Dodge Level Bonus") classLvl = classLvl + classBonusLvls bonval = classLvl / 5 evt_obj.bonus_list.add(bonval, 8, 137 ) #Dodge bonus args.set_args_from_obj(0, attacker) return 0 def SwashbucklerDodgeBeginRound(attachee, args, evt_obj): #Reset to a null attacker at the beginning of the round args.set_args_from_obj(0, OBJ_HANDLE_NULL) return 0 swashbucklerDodge = PythonModifier("Swashbuckler Dodge", 4) #Used this round flag, Attacker Upper Handle, Attacker Lower Handle, Spare swashbucklerDodge.MapToFeat("Swashbuckler Dodge") swashbucklerDodge.AddHook(ET_OnGetAC, EK_NONE, SwashbucklerDodgeACBonus, ()) swashbucklerDodge.AddHook(ET_OnBeginRound, EK_NONE, SwashbucklerDodgeBeginRound, ()) swashbucklerDodge.AddHook(ET_OnConditionAdd, EK_NONE, SwashbucklerDodgeBeginRound, ()) # Swashbuckler Acrobatic Charge swashbucklerAcrobaticCharge = PythonModifier("Swashbuckler Acrobatic Charge", 2) #Used this round flag, Spare swashbucklerAcrobaticCharge.MapToFeat("Swashbuckler Acrobatic Charge") #Swashbuckler Improved Flanking def SwashbucklerImprovedFlankingAttack(attachee, args, evt_obj): if evt_obj.attack_packet.get_flags() & D20CAF_FLANKED: evt_obj.bonus_list.add(2, 0, "Swashbuckler Improved Flanking") return 0 swashbucklerImprovedFlanking = PythonModifier("Swashbuckler Improved Flanking", 2) #Spare, Spare swashbucklerImprovedFlanking.MapToFeat("Swashbuckler Improved Flanking") swashbucklerImprovedFlanking.AddHook(ET_OnToHitBonus2, EK_NONE, SwashbucklerImprovedFlankingAttack, ()) # Swashbuckler Lucky def SwashbucklerLuckyRerollSavingThrow(attachee, args, evt_obj): if args.get_arg(0) and args.get_arg(1): if not evt_obj.return_val: evt_obj.return_val = 1 args.set_arg(0,0) return 0 def SwashbucklerLuckyRerollAttack(attachee, args, evt_obj): if args.get_arg(0) and args.get_arg(2): if not evt_obj.return_val: evt_obj.return_val = 1 args.set_arg(0,0) return 0 def SwashbucklerLuckyRadial(attachee, args, evt_obj): #Add a checkbox to use the reroll if a charge is available if args.get_arg(0): radial_parent = tpdp.RadialMenuEntryParent("Lucky") LuckyID = radial_parent.add_child_to_standard(attachee, tpdp.RadialMenuStandardNode.Class) checkboxSavingThrow = tpdp.RadialMenuEntryToggle("Reroll Next Missed Saving Throw", "TAG_INTERFACE_HELP") checkboxSavingThrow.link_to_args(args, 1) checkboxSavingThrow.add_as_child(attachee, LuckyID) checkboxAttack = tpdp.RadialMenuEntryToggle("Reroll Next Missed Attack", "TAG_INTERFACE_HELP") checkboxAttack.link_to_args(args, 2) checkboxAttack.add_as_child(attachee, LuckyID) return 0 def SwashbucklerLuckyNewDay(attachee, args, evt_obj): args.set_arg(0, 1) return 0 swashbucklerLucky = PythonModifier("Swashbuckler Lucky", 5) #Used, Reroll Saving Throw, Reroll Attack, Spare, Spare swashbucklerLucky.MapToFeat("Swashbuckler Lucky") swashbucklerLucky.AddHook(ET_OnBuildRadialMenuEntry, EK_NONE, SwashbucklerLuckyRadial, ()) swashbucklerLucky.AddHook(ET_OnD20Query, EK_Q_RerollSavingThrow, SwashbucklerLuckyRerollSavingThrow, ()) swashbucklerLucky.AddHook(ET_OnD20Query, EK_Q_RerollAttack, SwashbucklerLuckyRerollAttack, ()) swashbucklerLucky.AddHook(ET_OnConditionAdd, EK_NONE, SwashbucklerLuckyNewDay, ()) swashbucklerLucky.AddHook(ET_OnNewDay, EK_NEWDAY_REST, SwashbucklerLuckyNewDay, ()) # Swashbuckler Acrobatic Skill Mastery swashbucklerAcrobaticSkillMastery = PythonModifier("Swashbuckler Acrobatic Skill Mastery", 2) #Spare, Spare swashbucklerAcrobaticSkillMastery.MapToFeat("Swashbuckler Acrobatic Skill Mastery") # Swashbuckler Weakening Critical def SwashbucklerWeakeningCriticalOnDamage(attachee, args, evt_obj): #Enemy must not be immune to criticals target = evt_obj.attack_packet.target if target.d20_query(Q_Critter_Is_Immune_Critical_Hits): return 0 attackFlags = evt_obj.attack_packet.get_flags() #Must be a critical criticalHit = attackFlags & D20CAF_CRITICAL if not criticalHit: return 0 target.condition_add_with_args( "Damage_Ability_Loss", 0, 2) game.create_history_freeform(target.description + " takes 2 points of strength damage from weakening critical.\n\n") target.float_text_line("Strength damage!") return 0 swashbucklerWeakeningCritical = PythonModifier("Swashbuckler Weakening Critical", 2) #Spare, Spare swashbucklerWeakeningCritical.MapToFeat("Swashbuckler Weakening Critical") swashbucklerWeakeningCritical.AddHook(ET_OnDealingDamage2, EK_NONE, SwashbucklerWeakeningCriticalOnDamage, ()) # Swashbuckler Wounding Critical def SwashbucklerWoundingCriticalOnDamage(attachee, args, evt_obj): #Enemy must not be immune to criticals target = evt_obj.attack_packet.target if target.d20_query(Q_Critter_Is_Immune_Critical_Hits): return 0 attackFlags = evt_obj.attack_packet.get_flags() #Must be a critical criticalHit = attackFlags & D20CAF_CRITICAL if not criticalHit: return 0 target.condition_add_with_args( "Damage_Ability_Loss", 2, 2) game.create_history_freeform(target.description + " takes 2 points of constitution damage from wounding critical.\n\n") target.float_text_line("Constitution damage!") return 0 swashbucklerWoundingCritical = PythonModifier("Swashbuckler Wounding Critical", 2) #Spare, Spare swashbucklerWoundingCritical.MapToFeat("Swashbuckler Wounding Critical") swashbucklerWeakeningCritical.AddHook(ET_OnDealingDamage2, EK_NONE, SwashbucklerWoundingCriticalOnDamage, ())
106613
import argparse from baseline.utils import read_config_stream from mead.utils import hash_config, convert_path def main(): parser = argparse.ArgumentParser(description="Get the mead hash of a config.") parser.add_argument('config', help='JSON/YML Configuration for an experiment: local file or remote URL', type=convert_path, default="$MEAD_CONFIG") args = parser.parse_args() config = read_config_stream(args.config) print(hash_config(config)) if __name__ == "__main__": main()
106706
from pyramid.security import unauthenticated_userid from .models import User def get_user(request): user_id = unauthenticated_userid(request) if user_id is not None: return User.fetch_by_id(user_id) def group_finder(user_id, request): user = request.user if user: return ['admin'] if user.is_admin else ['student']
106714
import orca import pandas as pd from urbansim_templates import modelmanager, shared, utils, __version__ from urbansim_templates.shared import CoreTemplateSettings, OutputColumnSettings class ExpressionSettings(): """ Stores custom parameters used by the :mod:`~urbansim_templates.data.ColumnFromExpression` template. Parameters can be passed to the constructor or set as attributes. Parameters ---------- table : str, optional Name of Orca table the expression will be evaluated on. Required before running then template. expression : str, optional String describing operations on existing columns of the table, for example "a/log(b+c)". Required before running. Supports arithmetic and math functions including sqrt, abs, log, log1p, exp, and expm1 -- see Pandas ``df.eval()`` documentation for further details. """ def __init__(self, table = None, expression = None): self.table = table self.expression = expression @classmethod def from_dict(cls, d): return cls(table=d['table'], expression=d['expression']) def to_dict(self): return {'table': self.table, 'expression': self.expression} @modelmanager.template class ColumnFromExpression(): """ Template to register a column of derived data with Orca, based on an expression. Parameters may be passed to the constructor, but they are easier to set as attributes. The expression can refer to any columns in the same table, and will be evaluated using ``df.eval()``. Values will be calculated lazily, only when the column is needed for a specific operation. Parameters ---------- meta : :mod:`~urbansim_templates.shared.CoreTemplateSettings`, optional Standard parameters. This template sets the default value of ``meta.autorun`` to True. data : :mod:`~urbansim_templates.data.ExpressionSettings`, optional Special parameters for this template. output : :mod:`~urbansim_templates.shared.OutputColumnSettings`, optional Parameters for the column that will be generated. This template uses ``data.table`` as the default value for ``output.table``. """ def __init__(self, meta=None, data=None, output=None): self.meta = CoreTemplateSettings(autorun=True) if meta is None else meta self.meta.template = self.__class__.__name__ self.meta.template_version = __version__ self.data = ExpressionSettings() if data is None else data self.output = OutputColumnSettings() if output is None else output @classmethod def from_dict(cls, d): """ Create a class instance from a saved dictionary. """ if 'meta' not in d: return cls.from_dict_0_2_dev5(d) return cls( meta = CoreTemplateSettings.from_dict(d['meta']), data = ExpressionSettings.from_dict(d['data']), output = OutputColumnSettings.from_dict(d['output'])) @classmethod def from_dict_0_2_dev5(cls, d): """ Converter to read saved data from 0.2.dev5 or earlier. Automatically invoked by ``from_dict()`` as needed. """ return cls( meta = CoreTemplateSettings( name = d['name'], tags = d['tags'], autorun = d['autorun']), data = ExpressionSettings( table = d['table'], expression = d['expression']), output = OutputColumnSettings( column_name = d['column_name'], data_type = d['data_type'], missing_values = d['missing_values'], cache = d['cache'], cache_scope = d['cache_scope'])) def to_dict(self): """ Create a dictionary representation of the object. """ return { 'meta': self.meta.to_dict(), 'data': self.data.to_dict(), 'output': self.output.to_dict()} def run(self): """ Run the template, registering a column of derived data with Orca. Requires values to be set for ``data.table``, ``data.expression``, and ``output.column_name``. """ if self.data.table is None: raise ValueError("Please provide a table") if self.data.expression is None: raise ValueError("Please provide an expression") if self.output.column_name is None: raise ValueError("Please provide a column name") settings = self.output if settings.table is None: settings.table = self.data.table cols = utils.cols_in_expression(self.data.expression) def build_column(): df = utils.get_df(self.data.table, columns=cols) series = df.eval(self.data.expression) return series shared.register_column(build_column, settings)
106748
import machine tcounter = 0 p1 = machine.Pin(18) p1.init(p1.OUT) p1.value(1) def tcb(timer): global tcounter if tcounter & 1: p1.value(0) else: p1.value(1) tcounter += 1 if (tcounter % 100) == 0: print("[tcb] timer: {} counter: {}".format(timer.timernum(), tcounter)) # t1.deinit() # if it is used t1 = machine.Timer(2) t1.init(period=20, mode=t1.PERIODIC, callback=tcb)
106754
from openbiolink.graph_creation.metadata_db_file.edge.dbMetadataEdge import DbMetadataEdge from openbiolink.graph_creation.types.dbType import DbType class DbMetaEdgeDisGeNet(DbMetadataEdge): NAME = "Edge - DisGeNet - Gene Disease" # URL = "http://www.disgenet.org/ds/DisGeNET/results/curated_gene_disease_associations.tsv.gz" URL = "http://www.disgenet.org/static/disgenet_ap1/files/downloads/curated_gene_disease_associations.tsv.gz" OFILE_NAME = "DisGeNet_gene_disease.tsv.gz" COLS = [ "geneID", "geneSym", "DSI", "DPI", "umlsID", "disName", "diseaseType", "diseaseClass", "diseaseSemanticType", "score", "EI", "YearInitial", "YearFinal", "NofPmids", "NofSnps", "source", ] FILTER_COLS = ["geneID", "umlsID", "score"] HEADER = 1 DB_TYPE = DbType.DB_EDGE_DISGENET def __init__(self): super().__init__( url=DbMetaEdgeDisGeNet.URL, ofile_name=DbMetaEdgeDisGeNet.OFILE_NAME, dbType=DbMetaEdgeDisGeNet.DB_TYPE )
106762
load("//:import_external.bzl", import_external = "safe_wix_scala_maven_import_external") def dependencies(): import_external( name = "com_fasterxml_jackson_module_jackson_module_paranamer", artifact = "com.fasterxml.jackson.module:jackson-module-paranamer:2.9.6", artifact_sha256 = "dfd66598c0094d9a7ef0b6e6bb3140031fc833f6cf2e415da27bc9357cdfe63b", srcjar_sha256 = "375052d977a4647b49a8512a2e269f3296c455544f080a94bc8855dbfd24ad75", deps = [ "@com_fasterxml_jackson_core_jackson_databind", "@com_thoughtworks_paranamer_paranamer" ], ) import_external( name = "com_fasterxml_jackson_module_jackson_module_scala_2_12", artifact = "com.fasterxml.jackson.module:jackson-module-scala_2.12:2.9.6", artifact_sha256 = "c775854c1da6fc4602d5850b65513d18cb9d955b3c0f64551dd58ccb24a85aba", srcjar_sha256 = "5446419113a48ceb4fa802cd785edfc06531ab32763d2a2f7906293d1e445957", deps = [ "@com_fasterxml_jackson_core_jackson_annotations", "@com_fasterxml_jackson_core_jackson_core", "@com_fasterxml_jackson_core_jackson_databind", "@com_fasterxml_jackson_module_jackson_module_paranamer", "@org_scala_lang_scala_library", "@org_scala_lang_scala_reflect" ], )
106790
def draw_model(lik, mean, covariance): if lik == "normal": var = "𝐲" else: var = "𝐳" msg = f"{var} ~ 𝓝({mean}, {covariance})" msg += _lik_formulae(lik) return msg def draw_alt_hyp_table(hyp_num, stats, effsizes): from limix._display import Table cols = ["lml", "cov. effsizes", "cand. effsizes"] table = Table(cols, index=_describe_index()) table.add_column(_describe(stats, f"lml{hyp_num}")) df = effsizes[f"h{hyp_num}"] table.add_column(_describe(df[df["effect_type"] == "covariate"], "effsize")) table.add_column(_describe(df[df["effect_type"] == "candidate"], "effsize")) return "\n" + table.draw() + "\n" def draw_lrt_table(test_titles, pv_names, stats): from limix._display import Table table = Table(test_titles, index=_describe_index()) for name in pv_names: pv = stats[name].describe().iloc[1:] table.add_column(pv) return table.draw() def _lik_formulae(lik): msg = "" if lik == "bernoulli": msg += f" for yᵢ ~ Bern(μᵢ=g(zᵢ)) and g(x)=1/(1+e⁻ˣ)\n" elif lik == "probit": msg += f" for yᵢ ~ Bern(μᵢ=g(zᵢ)) and g(x)=Φ(x)\n" elif lik == "binomial": msg += f" for yᵢ ~ Binom(μᵢ=g(zᵢ), nᵢ) and g(x)=1/(1+e⁻ˣ)\n" elif lik == "poisson": msg += f" for yᵢ ~ Poisson(λᵢ=g(zᵢ)) and g(x)=eˣ\n" else: msg += "\n" return msg def _describe(df, field): return df[field].describe().iloc[1:] def _describe_index(): return ["mean", "std", "min", "25%", "50%", "75%", "max"]
106791
from .map_aiter import map_aiter from .join_aiters import join_aiters def message_stream_to_event_stream(event_template, message_stream): """ This tweaks each message from message_stream by wrapping it with a dictionary populated with the given template, putting the message is at the top level under "message". """ template = dict(event_template) def adaptor(message): event = dict(template) event.update(message=message) return event return map_aiter(adaptor, message_stream) def rws_to_event_aiter(rws_aiter, reader_to_message_stream): def rws_to_reader_event_template_adaptor(rws): return rws, rws["reader"] def reader_event_template_to_event_stream_adaptor(rws_reader): rws, reader = rws_reader return message_stream_to_event_stream(rws, reader_to_message_stream(reader)) def adaptor(rws): return reader_event_template_to_event_stream_adaptor( rws_to_reader_event_template_adaptor(rws)) return join_aiters(map_aiter(adaptor, rws_aiter))
106796
import os os.environ['CUDA_VISIBLE_DEVICES'] = '0' import torch import torch.optim as optim import torch.nn.functional as F from torch.autograd import Variable import time import sys sys.path.append('./net') from fun import * from protnet_att import * from loader import * from data_gentor import * class Tester: def run(self): self.Tester() return self.Show_pred() def Show_embed(self): self.model.eval() pred = [] for i in xrange(1): total = 0 tar_DS = self.te_DS[i] n,m = self.evl_nm[i] out = np.zeros((400, 32*4*3)) out1 = np.zeros((400, 6)) for batch_idx, (X, s_idx) in enumerate(tar_DS): s_idx = s_idx.numpy() out1[total:total+2,:6] = s_idx X, Y = Variable(X.cuda()), Variable(self.Y.cuda()) X = X.view(-1, 1, 128,160) pred = self.model(X, self.Xavg, self.Xstd, n, m) #_, pred = torch.max(pred.data, 1) out[total:total+2,:] = pred.data total += 2 return out, out1 def Show_pred(self): self.model.eval() pred = [] for i in xrange(1): total = 0 tar_DS = self.te_DS[i] n,m = self.evl_nm[i] out = np.zeros((400, 7)) for batch_idx, (X, s_idx) in enumerate(tar_DS): s_idx = s_idx.numpy() out[total:total+2,:6] = s_idx X, Y = Variable(X.cuda()), Variable(self.Y.cuda()) X = X.view(-1, 1, 128,160) pred = self.model(X, self.Xavg, self.Xstd, n, m) _, pred = torch.max(pred.data, 1) out[total:total+2,-1] = pred.data total += 2 return out def __init__(self, args): self.args = args # load data tidx=[tr_idx, tr_nidx, te_idx, te_nidx] ESC_X, ESC_Y, trvate = load_data(args.dn) tridx, vaidx, teidx = trvate self.Y = torch.LongTensor([self.args.way -1]).repeat(args.bs) self.model = nn.DataParallel(net().cuda()) # data builder # default: n-ways m-shots self.tr_DS = B_DS(ESC_X, ESC_Y, tridx, self.args) self.va_DS = B_DS(ESC_X, ESC_Y, vaidx, self.args, mode='Test') te_DS_n5m5 = B_DS(ESC_X, ESC_Y, teidx, self.args, n= 5, m=5, mode='Test') te_DS_n5m1 = B_DS(ESC_X, ESC_Y, teidx, self.args, n= 5, m=1, mode='Test') te_DS_n10m5 = B_DS(ESC_X, ESC_Y, teidx, self.args, n=10, m=5, mode='Test') te_DS_n10m1 = B_DS(ESC_X, ESC_Y, teidx, self.args, n=10, m=1, mode='Test') self.te_DS = [te_DS_n5m1, te_DS_n5m5, te_DS_n10m1, te_DS_n10m5] self.evl_nm = [[5,1], [5,5], [10,1], [10,5]] # load avg and std for Z-score Xavg = torch.tensor(ESC_X[tridx].mean(keepdims=1).astype('float32')) Xstd = torch.tensor(ESC_X[tridx].std(keepdims=1).astype('float32')) self.Xavg, self.Xstd = Variable(Xavg.cuda()), Variable(Xstd.cuda()) self.show_dataset_model_params() self.load_pretrained_model(self.model) def Tester(self , vate='Test'): print '\n' st = time.time() self.model.eval() te_print = [] for i in xrange(4): total = 0 correct = 0 tar_DS = self.te_DS[i] n,m = self.evl_nm[i] for batch_idx, (X, _) in enumerate(tar_DS): total += X.size(0) X, Y = Variable(X.cuda()), Variable(self.Y.cuda()) X = X.view(-1, 1, 128,160) pred = self.model(X, self.Xavg, self.Xstd, n, m) # Max _, pred = torch.max(pred.data, 1) correct += (pred==n-1).sum().item() oprint = '%s %d-way %d-shot acc:%f Time:%1f'%(vate, n, m, correct/float(total), time.time() - st) print oprint te_print.append(oprint) if vate != 'Test': return oprint, correct/float(total) return te_print def load_pretrained_model(self, model): # pre-training if os.path.exists(self.args.pmp): pretrained_model = torch.load(self.args.pmp) model_param = model.state_dict() for k in pretrained_model['state_dict'].keys(): try: model_param[k].copy_(pretrained_model['state_dict'][k]) print k except: print '[ERROR] Load pre-trained model %s'%(k) #self.model.apply(model_init) #break print 'Load Pre_trained Model : ' + self.args.pmp else: print 'Learning from scrath' #self.model.apply(model_init) def show_dataset_model_params(self): # show model structure print self.model # show params print show_model_params(self.model)
106810
class Solution: def getRow(self, rowIndex: int) -> List[int]: if rowIndex == 0: return [1] s = [1] for i in range(1, rowIndex + 1): s = [sum(x) for x in zip([0] + s, s + [0])] return s
106823
from pipeline.pipeline import * from pipeline.entitylinker import * from pipeline.triplealigner import * from pipeline.datareader import DBpediaAbstractsDataReader from pipeline.writer import * from pipeline.coreference import * from pipeline.placeholdertagger import * from utils.triplereader import * from utils.triplereaderitems import * from utils.triplereadertriples import * from utils.labelreader import * from pipeline.filter import * # Reading the DBpedia Abstracts Dataset reader = DBpediaAbstractsDataReader('./datasets/wikipedia-abstracts/csv/sample-dbpedia-abstracts-es.csv') trip_read = TripleReader('./datasets/wikidata/sample-wikidata-triples.csv') label_read = LabelReader('./datasets/wikidata/sample-wikidata-labels.csv', 'es') trip_read_items = TripleReaderItems('./datasets/wikidata/sample-wikidata-triples.csv') trip_read_trip = TripleReaderTriples('./datasets/wikidata/sample-wikidata-triples.csv') keyword_ent_linker = KeywordMatchingEntityLinker(trip_read_items, label_read) salign = SimpleAligner(trip_read) #prop = WikidataPropertyLinker('./datasets/wikidata/wikidata-properties.csv') date = DateLinker() #SPOalign = SPOAligner(trip_read) nsalign = NoSubjectAlign(trip_read) noalign = NoAligner(trip_read_trip) filter_entities = ['http://www.wikidata.org/entity/Q4167410', 'http://www.wikidata.org/entity/Q13406463'] ent_filt = EntityTypeFilter(trip_read_trip, filter_entities) sen_lim = SentenceLimiter() main_ent_lim = MainEntityLimiter() writer = JsonWriter('./out-test', "", 1) prop_tag = PropertyPlaceholderTagger() writer_triples = CustomeWriterTriples('./out-test', "", 1) writer_entities = CustomeWriterEntities('./out-test', "", 1) for d in reader.read_documents(): #print d.title #print label_read.get(d.docid) try: print "Processing Document Title: %s ..." % d.title if not ent_filt.run(d): continue d = keyword_ent_linker.run(d) d = date.run(d) #d = link.run(d) # d = nsalign.run(d) #d = coref.run(d) d = salign.run(d) #d = prop.run(d) #d = SPOalign.run(d) d = sen_lim.run(d, 0) if not main_ent_lim.run(d): continue d = noalign.run(d) d = prop_tag.run(d) writer_triples.run(d) writer_entities.run(d) # writer.run(d) print "Document Title: %s \t Number of Annotated Entities %s \t Number of Annotated Triples %s" % (d.title, len(d.entities), len(d.triples)) except Exception as e: print "error Processing document %s" % d.title
106828
def countWords(s): count=1 for i in s: if i==" ": count+=1 return count print(countWords("Hello World This is Rituraj"))
106832
import sys, os from basic.common import checkToSkip, ROOT_PATH, makedirsforfile from basic.annotationtable import readConcepts, readAnnotationsFrom, writeAnnotationsTo, writeConceptsTo from basic.data import readImageSet if __name__ == '__main__': args = sys.argv[1:] rootpath = '/var/scratch2/xirong/VisualSearch' srcCollection = args[0] annotationName = args[1] dstCollection = args[2] overwrite = 0 concepts = readConcepts(srcCollection, annotationName, rootpath) todo = [] for concept in concepts: resfile = os.path.join(rootpath, dstCollection, 'Annotations', 'Image', annotationName, '%s.txt'%concept) if checkToSkip(resfile, overwrite): continue todo.append(concept) if not todo: print ('nothing to do') sys.exit(0) imset = set(readImageSet(dstCollection, dstCollection, rootpath)) for concept in todo: names,labels = readAnnotationsFrom(srcCollection, annotationName, concept, rootpath=rootpath) selected = [x for x in zip(names,labels) if x[0] in imset] print concept, len(selected) writeAnnotationsTo([x[0] for x in selected], [x[1] for x in selected], dstCollection, annotationName, concept, rootpath=rootpath) writeConceptsTo(concepts, dstCollection, annotationName, rootpath)
106835
import sys sys.path.append('./') import numpy as np import torch import glob import cv2 from skimage import img_as_float32 as img_as_float from skimage import img_as_ubyte import time import os from codes.models.modules.VDN import VDN as DN from codes.data.util import imresize_np def denoise(noisy_path, pretrained_path, save_path, scale=4, LR_path=None): use_gpu = True C = 3 dep_U = 4 # load the pretrained model print('Loading the Model') checkpoint = torch.load(pretrained_path) net = DN(C, dep_U=dep_U, wf=64) if use_gpu: net = torch.nn.DataParallel(net).cuda() net.load_state_dict(checkpoint) else: load_state_dict_cpu(net, checkpoint) net.eval() files = glob.glob(os.path.join(noisy_path, '*.png')) if not os.path.exists(save_path): os.mkdir(save_path) for i in range(len(files)): im_noisy = cv2.imread(files[i])[:, :, ::-1] im_noisy = img_as_float(cv2.cvtColor(im_noisy, cv2.COLOR_BGR2RGB)) im_noisy = torch.from_numpy(im_noisy.transpose((2, 0, 1))[np.newaxis,]) _, C, H, W = im_noisy.shape if H % 2**dep_U != 0: H -= H % 2**dep_U if W % 2**dep_U != 0: W -= W % 2**dep_U im_noisy = im_noisy[:H, :W, ] if use_gpu: im_noisy = im_noisy.cuda() print('Begin Testing on GPU') else: print('Begin Testing on CPU') with torch.autograd.set_grad_enabled(False): tic = time.time() phi_Z = net(im_noisy, 'test') toc = time.time() - tic err = phi_Z.cpu().numpy() print('Time: %.5f' % toc) if use_gpu: im_noisy = im_noisy.cpu().numpy() else: im_noisy = im_noisy.numpy() im_denoise = im_noisy - err[:, :C, ] im_denoise = np.transpose(im_denoise.squeeze(), (1, 2, 0)) im_denoise = img_as_ubyte(im_denoise.clip(0, 1)) file_name = files[i].split('/')[-1] cv2.imwrite(os.path.join(save_path, file_name), im_denoise) if not LR_path is None: if not os.path.exists(LR_path): os.mkdir(LR_path) LR_denoise = imresize_np(im_denoise, 1 / scale, True) cv2.imwrite(os.path.join(LR_path, file_name), LR_denoise) def load_state_dict_cpu(net, state_dict0): state_dict1 = net.state_dict() for name, value in state_dict1.items(): assert 'module.'+name in state_dict0 state_dict1[name] = state_dict0['module.'+name] net.load_state_dict(state_dict1) def main(): # Validation noisy_path = '' save_path = '' LR_path = '' denoise(noisy_path, pretrained_path, save_path, 4, LR_path) if __name__ == '__main__': main()
106847
from awxkit.api.resources import resources from . import base from . import page class Dashboard(base.Base): pass page.register_page(resources.dashboard, Dashboard)
106942
import pytest pytestmark = [ pytest.mark.requires_salt_states("echo.text"), ] def test_echoed(salt_call_cli): echo_str = "Echoed!" ret = salt_call_cli.run("state.single", "echo.echoed", echo_str) assert ret.exitcode == 0 assert ret.json assert ret.json == echo_str def test_reversed(salt_call_cli): echo_str = "Echoed!" expected = echo_str[::-1] ret = salt_call_cli.run("state.single", "echo.reversed", echo_str) assert ret.exitcode == 0 assert ret.json assert ret.json == expected
106983
import json def convert(path:str): with open(path, 'r') as f: data = json.load(f) output_path = path.replace('convert', 'triplets').replace('json', 'txt') with open(output_path, 'w') as f: for ins in data: temp_ins = [] for a, o in zip(ins['aspects'], ins['opinions']): temp_ins.append(([x for x in range(a['from'], a['to'])], [x for x in range(o['from'], o['to'])], a['polarity'])) f.write(ins['raw_words'] + '####' + str(temp_ins) + '\n') return if __name__ == '__main__': convert('14lap/train_convert.json') convert('14lap/test_convert.json') convert('14res/train_convert.json') convert('14res/test_convert.json') convert('15res/train_convert.json') convert('15res/test_convert.json') convert('16res/train_convert.json') convert('16res/test_convert.json')
107028
import opendbpy as odb import os current_dir = os.path.dirname(os.path.realpath(__file__)) tests_dir = os.path.abspath(os.path.join(current_dir, os.pardir)) opendb_dir = os.path.abspath(os.path.join(tests_dir, os.pardir)) data_dir = os.path.join(tests_dir, "data") db = odb.dbDatabase.create() odb.read_lef(db, os.path.join(data_dir, "Nangate45","NangateOpenCellLibrary.mod.lef")) odb.read_lef(db, os.path.join(data_dir, "ICEWall","dummy_pads.lef")) odb.read_def(db, os.path.join(data_dir, "ICEWall","octilinear.def")) chip = db.getChip() if chip == None: exit("Read DEF Failed") result = odb.write_def(chip.getBlock(), os.path.join(opendb_dir, "build","generated_octilinear.def")) assert result==1, "DEF not written" db_file = os.path.join(opendb_dir, "build","export_oct.db") export_result = odb.write_db(db, db_file) if export_result!=1: exit("Export DB Failed") new_db = odb.dbDatabase.create() new_db = odb.read_db(new_db, db_file) if odb.db_diff(db, new_db): exit("Error: Difference found between exported and imported DB")
107065
from ..core.tooling.htstrings import HeadTailString class WalletURL(HeadTailString): __head__ = "https://edge.qiwi.com/" class urls: me = WalletURL("person-profile/v1/profile/current") identification = WalletURL("identification/v1/persons/{}/identification") history = WalletURL("payment-history/v2/persons/{}/payments") stats = WalletURL("payment-history/v2/persons/{}/payments/total") cheque = WalletURL("payment-history/v1/transactions/{}/cheque/file") request_cheque = WalletURL("payment-history/v1/transactions/{}/cheque/send") payment_info = WalletURL("payment-history/v2/transactions/{}") class web_hooks: register = WalletURL("payment-notifier/v1/hooks") active = WalletURL("/active", head=register) test = WalletURL("/test", head=register) delete = WalletURL("/{}", head=register) class balance: base = WalletURL("funding-sources/v2/persons/") balance = WalletURL("{}/accounts", head=base) available_aliases = WalletURL("/offer", head=balance) set_new_balance = WalletURL("/{}", head=balance) class payments: base = WalletURL("sinap/api/v2/terms/{}/payments") providers = WalletURL("", head="https://qiwi.com/mobile/detect.action") commission = WalletURL("sinap/providers/{}/onlineCommission")
107087
import subprocess import sys config_dir_path = None history_file_path = None log_file_path = None def open(path): if sys.platform == 'darwin': subprocess.Popen(['open', path]) elif sys.platform == 'win32': subprocess.Popen(['start', '', path]) else: subprocess.Popen(['xdg-open', path]) def open_config_dir(): global config_dir_path open(config_dir_path) def open_history_file(): global history_file_path open(history_file_path) def open_log_file(): global log_file_path open(log_file_path)
107128
from packaging import version from .code_cell import Cell from .nbresult import NbResult from .nbglobals import push_globals from .cell_conductor import * import os import sys import errno import warnings import nbformat SUPPORTED_LANGUAGES = [ "python" ] PYTHON_MIN_REQ = "3.5" KNOWN_NBFORMAT = 4 class Notebook: def __init__(self, filename): self.__filename = filename self.__fp = self.__validate(filename) self.__NotebookNode = self.__nbconvert(self.__fp) self.cells = list() self.state = {} self.__extract_source() def __validate(self, filename): if not os.path.isfile(filename): raise FileNotFoundError( errno.ENOENT, os.strerror( errno.ENOENT), filename) try: f = open(filename) return f except IOError: print( "File " + str(filename) + " could not be opened for processing.") raise def __nbconvert(self, fp): res = nbformat.read(fp, KNOWN_NBFORMAT) if res['metadata']['kernelspec']['language'] not in SUPPORTED_LANGUAGES: raise ImportError( "Languages other than Python are not currently supported.") if version.parse(res['metadata']['language_info'] ['version']) < version.parse(PYTHON_MIN_REQ): raise ImportError("Conveyor currently only supports Python " + PYTHON_MIN_REQ + " and above.") if int(res['metadata']['language_info'] ['version'][0]) != sys.version_info[0]: warnings.warn("You are trying to run code that is written in Python " + str(res['metadata']['language_info']['version'][0]) + " in Python " + str(sys.version_info[0]) + ". Errors may occur.") return res def __extract_source(self): if not self.__NotebookNode: print("No notebook to extract. See nbconvert details.") return False code_cell_idx = 0 all_cells = self.__NotebookNode['cells'] for cell in all_cells: if cell['cell_type'] == 'code': self.cells.append( Cell( code_cell_idx, cell['source'])) code_cell_idx += 1 # TODO: See how this handles compile/runtime errors def run( self, start_cell_idx=None, select_cells=None, until_variable=None, from_state=None, import_globals=False): """ Executes notebook code cells. :param start_cell_idx: (optional) Index of code cell to begin execution at. Useful for intercepting variables in notebooks for pipelines. :param select_cells: (optional) List of indices in order of select code cells to run. By default, all code cells will be run in order. :param until_variable: (optional) Name of variable to halt execution once acquired. If variable does not exist, will run all cells. :param from_state: (optional) Initialized values before code execution. Used in pipelines. :param import_globals: (optional) Set globals from notebook to globals in current workspace. False by default. :return: A list of dictionaries containing cell index's, state information, and outputs. """ custom_aggregate = NbResult() if from_state: self.state = from_state if until_variable: cell_idx = 0 if start_cell_idx: cell_idx = start_cell_idx while until_variable not in self.state and cell_idx < len( self.cells): cell_output = run_cell(self, cell_idx) custom_aggregate.append(cell_output) cell_idx += 1 if import_globals: push_globals(custom_aggregate, result_type=NbResult) return custom_aggregate elif start_cell_idx: cell_idx = start_cell_idx while cell_idx < len(self.cells): cell_output = run_cell(self, cell_idx) custom_aggregate.append(cell_output) cell_idx += 1 if import_globals: push_globals(custom_aggregate, result_type=NbResult) return custom_aggregate elif select_cells: for cell_idx in select_cells: cell_output = run_cell(self, cell_idx) custom_aggregate.append(cell_output) if import_globals: push_globals(custom_aggregate, result_type=NbResult) return custom_aggregate aggregate = NbResult(run_all(self, state=from_state)) if import_globals: push_globals(aggregate, result_type=NbResult) return aggregate
107142
map = [200090710, 200090610] sm.sendSay("Where would you like to go? \r\n#L0#Victoria Island#l\r\n#L1#Orbis#l") sm.warp(map[answer], 0)
107204
import importlib import site from abc import abstractmethod from typing import ( TYPE_CHECKING, Any, Iterable, Iterator, List, Optional, Set, Tuple, Type, Union, cast, ) import django.core.checks from rest_framework.serializers import ModelSerializer, Serializer from ..ast.protocols import DisableCommentProtocol from ..ast.source_provider import SourceProvider from ..check_id import DRF_META_CHECKS_NAMES, CheckId from ..forms import AttrsForm from ..registry import ChecksConfig, registry from .base_checks import BaseCheck if TYPE_CHECKING: cached_property = property else: from django.utils.functional import cached_property class DisableCommentProvider(DisableCommentProtocol): def __init__(self, serializer_class: Type[Serializer]): self.serializer_class = serializer_class @cached_property def _source_provider(self) -> SourceProvider: return SourceProvider(self.serializer_class) def is_disabled_by_comment(self, check_id: str) -> bool: check = CheckId.find_check(check_id) if check in DRF_META_CHECKS_NAMES: lines = (self._source_provider.source or "").splitlines() # find line starting with `class Meta` and lowest indent try: lineno, _ = sorted( ( (i, line) for i, line in enumerate(lines, 1) if line.strip().startswith(("class Meta(", "class Meta:")) ), key=lambda a: a[1].find("class Meta"), )[0] except StopIteration: return False return check in self._source_provider.get_disabled_checks_for_line(lineno) return check in self._source_provider.get_disabled_checks_for_line(1) def _collect_serializers( serializers: Iterable[Type[Serializer]], visited: Optional[Set[Type[Serializer]]] = None, ) -> Iterator[Type[Serializer]]: visited = visited or set() for serializer in serializers: if serializer not in visited: visited.add(serializer) yield from _collect_serializers(serializer.__subclasses__(), visited) yield serializer def _filter_app_serializers( serializers: Iterable[Type[Serializer]], include_apps: Optional[Iterable[str]] = None, ) -> Iterator[Type[Serializer]]: site_prefixes = set(site.PREFIXES) if include_apps is not None: app_paths = { a.path for a in django.apps.apps.get_app_configs() if a.name in include_apps } for s in serializers: module = importlib.import_module(s.__module__) if any(module.__file__.startswith(path) for path in app_paths): yield s return for s in serializers: module = importlib.import_module(s.__module__) if not any(module.__file__.startswith(path) for path in site_prefixes): yield s def _get_serializers_to_check( include_apps: Optional[Iterable[str]] = None, ) -> Tuple[Iterator[Type[Serializer]], Iterator[Type[ModelSerializer]]]: serializer_classes = _filter_app_serializers( _collect_serializers( s for s in Serializer.__subclasses__() if s is not ModelSerializer # type: ignore ), include_apps, ) model_serializer_classes = _filter_app_serializers( _collect_serializers(ModelSerializer.__subclasses__()), include_apps ) return ( serializer_classes, cast(Iterator[Type[ModelSerializer]], model_serializer_classes), ) @registry.add_handler("extra_checks_drf_serializer") def check_drf_serializers( checks: Iterable[Union["CheckDRFSerializer", "CheckDRFModelSerializer"]], config: ChecksConfig, app_configs: Optional[List[Any]] = None, **kwargs: Any, ) -> Iterator[Any]: model_serializer_checks = [] serializer_checks = [] for check in checks: if isinstance(check, CheckDRFModelSerializer): model_serializer_checks.append(check) else: serializer_checks.append(check) s_classes, m_classes = _get_serializers_to_check(config.include_apps) for s in s_classes: for check in serializer_checks: yield from check(s, DisableCommentProvider(s)) for s in m_classes: for check in model_serializer_checks: yield from check(s, DisableCommentProvider(s)) class CheckDRFSerializer(BaseCheck): @abstractmethod def apply( self, serializer: Serializer, **kwargs: Any ) -> Iterator[django.core.checks.CheckMessage]: raise NotImplementedError() class CheckDRFModelSerializer(BaseCheck): @abstractmethod def apply( self, serializer: ModelSerializer, **kwargs: Any ) -> Iterator[django.core.checks.CheckMessage]: raise NotImplementedError() @registry.register("extra_checks_drf_serializer") class CheckDRFSerializerExtraKwargs(CheckDRFModelSerializer): Id = CheckId.X301 level = django.core.checks.ERROR def apply( self, serializer: ModelSerializer, **kwargs: Any ) -> Iterator[django.core.checks.CheckMessage]: if not hasattr(serializer, "Meta") or not hasattr( serializer.Meta, "extra_kwargs" ): return invalid = serializer.Meta.extra_kwargs.keys() & serializer._declared_fields if invalid: yield self.message( "extra_kwargs mustn't include fields that declared on serializer.", hint=f"Remove extra_kwargs for fields: {', '.join(invalid)}", obj=serializer, ) @registry.register("extra_checks_drf_serializer") class CheckDRFSerializerMetaAttribute(CheckDRFModelSerializer): Id = CheckId.X302 settings_form_class = AttrsForm def __init__(self, attrs: List[str], **kwargs: Any) -> None: self.attrs = attrs super().__init__(**kwargs) def apply( self, serializer: ModelSerializer, **kwargs: Any ) -> Iterator[django.core.checks.CheckMessage]: meta = getattr(serializer, "Meta", None) for attr in self.attrs: if not hasattr(meta, attr): yield self.message( f"ModelSerializer must define `{attr}` in Meta.", hint=f"Add `{attr}` to serializer's Meta.", obj=serializer, )
107254
from collections import defaultdict import django import csv import sys import os import json os.environ['DJANGO_SETTINGS_MODULE'] = 'carebackend.settings' sys.path.append(os.path.dirname(__file__) + '/..') django.setup() from places.models import EmailSubscription outfl = sys.argv[1] by_place = defaultdict(list) for sub in EmailSubscription.objects.filter(processed=False, place__email_contact__isnull=True, place__gift_card_url__isnull=True): by_place[sub.place.place_id].append(sub) by_place_items = sorted(by_place.items(), key=lambda x: len(x[1]), reverse=True) with open(outfl, 'w') as fl: writer = csv.DictWriter(fl, fieldnames=['place_id', 'Place', 'Place Email', 'Website', 'Count', 'Emails', 'Gift Card URL']) writer.writeheader() for place_id, items in by_place_items: place = items[0].place writer.writerow({ 'place_id': place.place_id, 'Place': place.name, 'Place Email': place.email_contact, 'Website': place.place_url, 'Count': len(items), 'Emails': '; '.join([x.email for x in items]), 'Gift Card URL': place.gift_card_url })
107272
from model.network import LeNet5 from saliency.vanilla_gradient import save_vanilla_gradient from model.data import mnist_train_test_sets import numpy as np # Get MNIST dataset, preprocessed train_images, train_labels, test_images, test_labels = mnist_train_test_sets() # Load net and 98% acc weights net = LeNet5(weights_path="15epoch_weights.pkl") # Uncomment if you want to train or test # net.train(training_data=train_images, training_labels=train_labels, # batch_size=32, epochs=3, weights_path='weights.pkl') # net.test(test_images, test_labels) # Uncomment if you want to filter by class # target_image_class = 7 # target_image_indexes = [i for i in range(len(test_labels)) # if np.argmax(test_labels[i]) == target_image_class] # target_images = [test_images[index] for index in target_image_indexes] # target_labels = [test_labels[index] for index in target_image_indexes] # Generate saliency maps for the first 10 images target_images = train_images[:10] target_labels = train_labels[:10] save_vanilla_gradient(network=net, data=target_images, labels=target_labels)
107277
from statefun_tasks.messages_pb2 import Pipeline import unittest from statefun_tasks import in_parallel from tests.utils import TestHarness, tasks, TaskErrorException join_results_called = False join_results2_called = False join_results3_called = False say_goodbye_called = False @tasks.bind() def _say_hello(first_name, last_name): return f'Hello {first_name} {last_name}' @tasks.bind() def _say_goodbye(greeting, goodbye_message): global say_goodbye_called say_goodbye_called = True return f'{greeting}. So now I will say {goodbye_message}' @tasks.bind() def _fail(*args): raise Exception('I am supposed to fail') @tasks.bind() def _join_results(results): global join_results_called join_results_called = True return '; '.join(results) @tasks.bind() def _join_results2(results): global join_results2_called join_results2_called = True return '; '.join(results) @tasks.bind() def _join_results3(results): global join_results3_called join_results3_called = True return '; '.join(results) @tasks.bind() def _print_results(results): return str(results) @tasks.bind(with_state=True) def _say_hello_with_state(initial_state, first_name, last_name): state = len(first_name) + len(last_name) return state, f'Hello {first_name} {last_name}' @tasks.bind() def _say_goodbye_with_state(greeting, goodbye_message): return f'{greeting}. So now I will say {goodbye_message}' @tasks.bind(with_state=True) def _join_results_with_state(state, results): return state, '; '.join(results) + f' {state}' class ParallelWorkflowTests(unittest.TestCase): def setUp(self) -> None: self.test_harness = TestHarness() def test_parallel_workflow(self): pipeline = in_parallel([ _say_hello.send("John", "Smith"), _say_hello.send("Jane", "Doe").continue_with(_say_goodbye, goodbye_message="see you later!"), ]).continue_with(_join_results) result = self.test_harness.run_pipeline(pipeline) self.assertEqual(result, 'Hello <NAME>; Hello <NAME>. So now I will say see you later!') def test_parallel_workflow_with_state(self): pipeline = in_parallel([ _say_hello_with_state.send("John", "Smith"), _say_hello_with_state.send("Jane", "Doe").continue_with(_say_goodbye, goodbye_message="see you later!"), ]).continue_with(_join_results_with_state) result = self.test_harness.run_pipeline(pipeline) self.assertEqual(result, 'Hello <NAME>; Hello <NAME>. So now I will say see you later! 9') def test_nested_parallel_workflow(self): pipeline = in_parallel([ in_parallel([ in_parallel([ _say_hello.send("John", "Smith"), _say_hello.send("Jane", "Doe").continue_with(_say_goodbye, goodbye_message="see you later!") ]).continue_with(_join_results) ]) ]) result = self.test_harness.run_pipeline(pipeline) self.assertEqual(result, [['Hello <NAME>; Hello <NAME>. So now I will say see you later!']]) def test_nested_parallel_workflow_continuations(self): pipeline = in_parallel([ in_parallel([ in_parallel([ _say_hello.send("John", "Smith"), _say_hello.send("Jane", "Doe").continue_with(_say_goodbye, goodbye_message="see you later!") ]).continue_with(_join_results) ]).continue_with(_join_results2) ]).continue_with(_join_results3) result = self.test_harness.run_pipeline(pipeline) self.assertEqual(result, 'Hello <NAME>; Hello <NAME>. So now I will say see you later!') self.assertEqual(join_results_called, True) self.assertEqual(join_results2_called, True) self.assertEqual(join_results3_called, True) def test_continuation_into_parallel_workflow(self): pipeline = _say_hello.send("John", "Smith").continue_with(in_parallel([ _say_goodbye.send(goodbye_message="see you later!"), _say_goodbye.send(goodbye_message="see you later!") ])) result = self.test_harness.run_pipeline(pipeline) self.assertEqual(result, ['Hello <NAME>. So now I will say see you later!', 'Hello <NAME>. So now I will say see you later!']) def test_continuation_into_parallel_workflow_with_contination(self): pipeline = _say_hello.send("John", "Smith").continue_with(in_parallel([ _say_goodbye.send(goodbye_message="see you later!"), _say_goodbye.send(goodbye_message="see you later!") ]).continue_with(_join_results)) result = self.test_harness.run_pipeline(pipeline) self.assertEqual(result, 'Hello <NAME>. So now I will say see you later!; Hello <NAME>. So now I will say see you later!') def test_continuation_into_parallel_workflow_with_two_continations(self): pipeline = _say_hello.send("John", "Smith").continue_with(in_parallel([ _say_goodbye.send(goodbye_message="see you later!"), _say_goodbye.send(goodbye_message="see you later!"), ])).continue_with(_join_results).continue_with(_print_results) result = self.test_harness.run_pipeline(pipeline) self.assertEqual(result, 'Hello <NAME>. So now I will say see you later!; Hello <NAME>. So now I will say see you later!') def test_continuation_into_nested_parallel_workflow(self): pipeline = _say_hello.send("John", "Smith").continue_with(in_parallel([in_parallel([ _say_goodbye.send(goodbye_message="see you later!"), _say_goodbye.send(goodbye_message="see you later!") ])])) result = self.test_harness.run_pipeline(pipeline) self.assertEqual(result, [['Hello <NAME>. So now I will say see you later!', 'Hello <NAME>. So now I will say see you later!']]) def test_parallel_workflow_with_error(self): global join_results_called join_results_called = False pipeline = in_parallel([ _say_hello.send("John", "Smith"), _fail.send(), _say_goodbye.send("John", "Bye") ]).continue_with(_join_results) self.assertRaises(TaskErrorException, self.test_harness.run_pipeline, pipeline) self.assertEqual(join_results_called, False) self.assertEqual(say_goodbye_called, True) def test_parallel_workflow_with_error_and_continuations(self): global join_results_called join_results_called = False pipeline = in_parallel([ _fail.send().continue_with(in_parallel([_say_hello.send("John", "Smith").continue_with(_join_results)])), # this chain will fail at first step _say_goodbye.send("John", "Bye") # this chain will proceed ]) # overall we will get an exception self.assertRaises(TaskErrorException, self.test_harness.run_pipeline, pipeline) self.assertEqual(join_results_called, False) self.assertEqual(say_goodbye_called, True) def test_empty_parallel_workflow(self): pipeline = in_parallel([]) result = self.test_harness.run_pipeline(pipeline) self.assertEqual(result, ()) def test_parallel_workflow_with_last_task_in_group_being_a_group(self): pipeline = in_parallel([ _say_hello.send("John", "Smith"), _say_hello.send("Jane", "Doe").continue_with(_say_goodbye, goodbye_message="see you later!"), in_parallel([ _say_hello.send("Bob", "Smith"), _say_hello.send("Tom", "Smith"), ]), ]).continue_with(_print_results) result = self.test_harness.run_pipeline(pipeline) self.assertEqual(result, "['Hello <NAME>', 'Hello <NAME>. So now I will say see you later!', ['Hello <NAME>', 'Hello <NAME>']]") def test_parallel_workflow_with_max_parallelism(self): pipeline = in_parallel([ _say_hello.send("Jane", "Doe"), in_parallel([ _say_hello.send("Bob", "Smith").continue_with(_say_goodbye, goodbye_message="see you later!"), _say_hello.send("Tom", "Smith"), ], max_parallelism=1), ]).continue_with(_print_results) result = self.test_harness.run_pipeline(pipeline) self.assertEqual(result, "['Hello <NAME>', ['Hello <NAME>. So now I will say see you later!', 'Hello <NAME>']]") if __name__ == '__main__': unittest.main()
107281
import coremltools as ct def model_convert(model_name, stride_num, H, W): saved_model_path = f'{model_name}/{stride_num}/saved_model_{H}x{W}' input = ct.TensorType(name='sub_2', shape=(1, H, W, 3)) mlmodel = ct.convert(saved_model_path, inputs=[input], source='tensorflow') mlmodel.save(f'{saved_model_path}/model_coreml_float32.mlmodel') model_name = 'mobilenet050' stride_num = 'stride8' H = 240 W = 320 model_convert(model_name, stride_num, H, W) H = 480 W = 640 model_convert(model_name, stride_num, H, W) model_name = 'mobilenet050' stride_num = 'stride16' H = 240 W = 320 model_convert(model_name, stride_num, H, W) H = 480 W = 640 model_convert(model_name, stride_num, H, W) model_name = 'mobilenet050' stride_num = 'stride8' H = 240 W = 320 model_convert(model_name, stride_num, H, W) H = 480 W = 640 model_convert(model_name, stride_num, H, W) #===================================================== model_name = 'mobilenet075' stride_num = 'stride8' H = 240 W = 320 model_convert(model_name, stride_num, H, W) H = 480 W = 640 model_convert(model_name, stride_num, H, W) model_name = 'mobilenet075' stride_num = 'stride16' H = 240 W = 320 model_convert(model_name, stride_num, H, W) H = 480 W = 640 model_convert(model_name, stride_num, H, W) #===================================================== model_name = 'mobilenet100' stride_num = 'stride8' H = 240 W = 320 model_convert(model_name, stride_num, H, W) H = 480 W = 640 model_convert(model_name, stride_num, H, W) model_name = 'mobilenet100' stride_num = 'stride16' H = 240 W = 320 model_convert(model_name, stride_num, H, W) H = 480 W = 640 model_convert(model_name, stride_num, H, W) #===================================================== model_name = 'resnet50' stride_num = 'stride16' H = 240 W = 320 model_convert(model_name, stride_num, H, W) H = 480 W = 640 model_convert(model_name, stride_num, H, W) model_name = 'resnet50' stride_num = 'stride32' H = 240 W = 320 model_convert(model_name, stride_num, H, W) H = 480 W = 640 model_convert(model_name, stride_num, H, W)
107365
import mock import unittest from .helper import _ResourceMixin class SearchTest(_ResourceMixin, unittest.TestCase): def _getTargetClass(self): from .. import Search return Search @mock.patch('requests.get') def test_charge(self, api_call): class_ = self._getTargetClass() self.mockResponse(api_call, """{ "object": "search", "order": "chronological", "scope": "charge", "query": "thb", "filters": { "amount": "1000..2000", "captured": "true" }, "page": 1, "per_page": 30, "location": "/search", "total_pages": 1, "total": 1, "data": [ { "object": "charge", "id": "chrg_test", "livemode": false, "location": "/charges/chrg_test", "amount": 120000, "currency": "thb", "description": "iTunes Purchase", "metadata": {}, "status": "successful", "capture": true, "authorized": true, "reversed": false, "paid": true, "transaction": "trxn_test", "source_of_fund": "card", "refunded": 0, "refunds": { "object": "list", "from": "1970-01-01T07:00:00+07:00", "to": "2017-06-06T12:47:27+07:00", "offset": 0, "limit": 20, "total": 0, "order": null, "location": "/charges/chrg_test/refunds", "data": [] }, "return_uri": null, "offsite": null, "offline": null, "reference": null, "authorize_uri": null, "failure_code": null, "failure_message": null, "card": { "object": "card", "id": "card_test", "livemode": false, "location": "/customers/cust_test/cards/card_test", "country": "us", "city": "Bangkok", "postal_code": "10240", "financing": "", "bank": "", "last_digits": "4242", "brand": "Visa", "expiration_month": 12, "expiration_year": 2020, "fingerprint": "hWA+g07yu/7ngJfMJJ0ndGFqynzm2nQ3k/yDCofKZBM=", "name": "<NAME>", "security_code_check": true, "created": "2017-05-30T09:49:54Z" }, "customer": "cust_test", "ip": null, "dispute": null, "created": "2017-06-05T08:29:14Z" } ] }""") querystring = { 'query': 'thb', 'filters': { 'amount': '1000..2000', 'captured': 'true' } } result = class_.execute('charge', **querystring) self.assertTrue(isinstance(result, class_)) self.assertEqual(result.scope, 'charge') self.assertEqual(result.query, 'thb') self.assertEqual(result.total, 1) self.assertEqual(result._attributes['filters']['amount'], '1000..2000') self.assertEqual(result._attributes['filters']['captured'], 'true') self.assertEqual(result[0].id, 'chrg_test') self.assertEqual(result[0].currency, 'thb') self.assertEqual(result[0].amount, 120000) self.assertTrue(result[0].capture) @mock.patch('requests.get') def test_dispute(self, api_call): class_ = self._getTargetClass() self.mockResponse(api_call, """{ "object": "search", "order": "chronological", "scope": "dispute", "query": "dspt_test", "filters": { "status": "pending" }, "page": 1, "per_page": 30, "location": "/search", "total_pages": 1, "total": 1, "data": [ { "object": "dispute", "id": "dspt_test", "livemode": false, "location": "/disputes/dspt_test", "amount": 100000, "currency": "thb", "status": "pending", "message": null, "charge": "chrg_test", "created": "2015-03-23T05:24:39" } ] }""") querystring = { 'query': 'dspt_test', 'filters': { 'status': 'pending' } } result = class_.execute('dispute', **querystring) self.assertTrue(isinstance(result, class_)) self.assertEqual(result.scope, 'dispute') self.assertEqual(result.query, 'dspt_test') self.assertEqual(result.total, 1) self.assertEqual(result._attributes['filters']['status'], 'pending') self.assertEqual(result[0].id, 'dspt_test') self.assertEqual(result[0].status, 'pending') @mock.patch('requests.get') def test_recipient(self, api_call): class_ = self._getTargetClass() self.mockResponse(api_call, """{ "object": "search", "order": "chronological", "scope": "recipient", "query": "<EMAIL>", "filters": { "type": "individual" }, "page": 1, "per_page": 30, "location": "/search", "total_pages": 1, "total": 1, "data": [ { "object": "recipient", "id": "recp_test", "livemode": false, "location": "/recipients/recp_test", "verified": false, "active": false, "name": "<NAME>", "email": "<EMAIL>", "description": "Secondary recipient", "type": "individual", "tax_id": "1234567890", "bank_account": { "object": "bank_account", "brand": "test", "last_digits": "2345", "name": "<NAME>", "created": "2015-06-02T05:41:53Z" }, "failure_code": null, "created": "2015-06-02T05:41:53Z" } ] }""") querystring = { 'query': '<EMAIL>', 'filters': { 'type': 'individual' } } result = class_.execute('recipient', **querystring) self.assertTrue(isinstance(result, class_)) self.assertEqual(result.scope, 'recipient') self.assertEqual(result.query, '<EMAIL>') self.assertEqual(result.total, 1) self.assertEqual(result._attributes['filters']['type'], 'individual') self.assertEqual(result[0].id, 'recp_test') self.assertEqual(result[0].email, '<EMAIL>') self.assertEqual(result[0].type, 'individual') @mock.patch('requests.get') def test_customer(self, api_call): class_ = self._getTargetClass() self.mockResponse(api_call, """{ "object": "search", "order": "chronological", "scope": "customer", "query": "<EMAIL>", "filters": { "created": "2014-10-24" }, "page": 1, "per_page": 30, "location": "/search", "total_pages": 1, "total": 1, "data": [ { "object": "customer", "id": "cust_test", "livemode": false, "location": "/customers/cust_test", "default_card": null, "email": "<EMAIL>", "description": "<NAME> (id: 30)", "created": "2014-10-24T06:04:48Z", "cards": { "object": "list", "from": "1970-01-01T07:00:00+07:00", "to": "2014-10-24T13:04:48+07:00", "offset": 0, "limit": 20, "total": 1, "data": [ { "object": "card", "id": "card_test", "livemode": false, "location": "/customers/cust_test/cards/card_test", "country": "", "city": null, "postal_code": null, "financing": "", "last_digits": "4242", "brand": "Visa", "expiration_month": 9, "expiration_year": 2017, "fingerprint": "098f6bcd4621d373cade4e832627b4f6", "name": "Test card", "created": "2014-10-24T08:26:07Z" } ], "location": "/customers/cust_test/cards" } } ] }""") querystring = { 'query': '<EMAIL>', 'filters': { 'created': '2014-10-24' } } result = class_.execute('customer', **querystring) self.assertTrue(isinstance(result, class_)) self.assertEqual(result.scope, 'customer') self.assertEqual(result.query, '<EMAIL>') self.assertEqual(result.total, 1) self.assertEqual(result._attributes['filters']['created'], '2014-10-24') self.assertEqual(result[0].id, 'cust_test') self.assertEqual(result[0].email, '<EMAIL>') self.assertEqual(result[0].created, '2014-10-24T06:04:48Z')
107368
import os from getpass import getpass import yaml from netmiko import ConnectHandler def load_devices(device_file="lab_devices.yml"): device_dict = {} with open(device_file) as f: device_dict = yaml.safe_load(f) return device_dict if __name__ == "__main__": # Code so automated tests will run properly # Check for environment variable, if that fails, use getpass(). password = ( os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass() ) device_dict = load_devices() arista1 = device_dict["arista1"] cfg_changes = ["vlan 500", "name gold500"] for device in (arista1,): device["password"] = password net_connect = ConnectHandler(**device) # Disable cmd_verify output = net_connect.send_config_set(cfg_changes, cmd_verify=False) output += net_connect.save_config() print(output) net_connect.disconnect()
107374
from datetime import datetime, date from marqeta.response_models.currency_conversion import CurrencyConversion from marqeta.response_models.response import Response from marqeta.response_models.merchant_response_model import MerchantResponseModel from marqeta.response_models.store_response_model import StoreResponseModel from marqeta.response_models.transaction_card_acceptor import TransactionCardAcceptor from marqeta.response_models.cardholder_balance import CardholderBalance from marqeta.response_models.gpa_returns import GpaReturns from marqeta.response_models.gpa_response import GpaResponse from marqeta.response_models.program_transfer_response import ProgramTransferResponse from marqeta.response_models.fee_transfer_response import FeeTransferResponse from marqeta.response_models.peer_transfer_response import PeerTransferResponse from marqeta.response_models.msa_order_response import MsaOrderResponse from marqeta.response_models.msa_returns import MsaReturns from marqeta.response_models.offer_order_response import OfferOrderResponse from marqeta.response_models.auto_reload_model import AutoReloadModel from marqeta.response_models.deposit_deposit_response import DepositDepositResponse from marqeta.response_models.real_time_fee_group import RealTimeFeeGroup from marqeta.response_models.fee import Fee from marqeta.response_models.chargeback_response import ChargebackResponse from marqeta.response_models.network_fee_model import NetworkFeeModel from marqeta.response_models.digital_wallet_token import DigitalWalletToken from marqeta.response_models.cardholder_metadata import CardholderMetadata from marqeta.response_models.business_metadata import BusinessMetadata from marqeta.response_models.card_metadata import CardMetadata from marqeta.response_models.acquirer import Acquirer from marqeta.response_models.fraud import Fraud from marqeta.response_models.pos import Pos from marqeta.response_models.address_verification_model import AddressVerificationModel from marqeta.response_models.card_security_code_verification import CardSecurityCodeVerification from marqeta.response_models.transaction_metadata import TransactionMetadata from marqeta.response_models.user_card_holder_response import UserCardHolderResponse from marqeta.response_models.cardholder_authentication_data import CardholderAuthenticationData from marqeta.response_models import datetime_object import json import re class TransactionModel(object): def __init__(self, json_response): self.json_response = json_response def __str__(self): return json.dumps(self.json_response, default=self.json_serial) @staticmethod def json_serial(o): if isinstance(o, datetime) or isinstance(o, date): return o.__str__() @property def type(self): return self.json_response.get('type', None) @property def state(self): return self.json_response.get('state', None) @property def token(self): return self.json_response.get('token', None) @property def user_token(self): return self.json_response.get('user_token', None) @property def business_token(self): return self.json_response.get('business_token', None) @property def acting_user_token(self): return self.json_response.get('acting_user_token', None) @property def card_token(self): return self.json_response.get('card_token', None) @property def duration(self): return self.json_response.get('duration', None) @property def created_time(self): if 'created_time' in self.json_response: return datetime_object('created_time', self.json_response) @property def user_transaction_time(self): if 'user_transaction_time' in self.json_response: return datetime_object('user_transaction_time', self.json_response) @property def settlement_date(self): if 'settlement_date' in self.json_response: return datetime_object('settlement_date', self.json_response) @property def request_amount(self): return self.json_response.get('request_amount', None) @property def amount(self): return self.json_response.get('amount', None) @property def currency_conversion(self): if 'currency_conversion' in self.json_response: return CurrencyConversion(self.json_response['currency_conversion']) @property def issuerInterchangeAmount(self): return self.json_response.get('issuerInterchangeAmount', None) @property def currency_code(self): return self.json_response.get('currency_code', None) @property def approval_code(self): return self.json_response.get('approval_code', None) @property def response(self): if 'response' in self.json_response: return Response(self.json_response['response']) @property def preceding_related_transaction_token(self): return self.json_response.get('preceding_related_transaction_token', None) @property def incremental_authorization_transaction_tokens(self): return self.json_response.get('incremental_authorization_transaction_tokens', None) @property def merchant(self): if 'merchant' in self.json_response: return MerchantResponseModel(self.json_response['merchant']) @property def store(self): if 'store' in self.json_response: return StoreResponseModel(self.json_response['store']) @property def card_acceptor(self): if 'card_acceptor' in self.json_response: return TransactionCardAcceptor(self.json_response['card_acceptor']) @property def gpa(self): if 'gpa' in self.json_response: return CardholderBalance(self.json_response['gpa']) @property def gpa_order_unload(self): if 'gpa_order_unload' in self.json_response: return GpaReturns(self.json_response['gpa_order_unload']) @property def gpa_order(self): if 'gpa_order' in self.json_response: return GpaResponse(self.json_response['gpa_order']) @property def program_transfer(self): if 'program_transfer' in self.json_response: return ProgramTransferResponse(self.json_response['program_transfer']) @property def fee_transfer(self): if 'fee_transfer' in self.json_response: return FeeTransferResponse(self.json_response['fee_transfer']) @property def peer_transfer(self): if 'peer_transfer' in self.json_response: return PeerTransferResponse(self.json_response['peer_transfer']) @property def msa_orders(self): if 'msa_orders' in self.json_response: return [MsaOrderResponse(val) for val in self.json_response['msa_orders']] @property def msa_order_unload(self): if 'msa_order_unload' in self.json_response: return MsaReturns(self.json_response['msa_order_unload']) @property def offer_orders(self): if 'offer_orders' in self.json_response: return [OfferOrderResponse(val) for val in self.json_response['offer_orders']] @property def auto_reload(self): if 'auto_reload' in self.json_response: return AutoReloadModel(self.json_response['auto_reload']) @property def direct_deposit(self): if 'direct_deposit' in self.json_response: return DepositDepositResponse(self.json_response['direct_deposit']) @property def polarity(self): return self.json_response.get('polarity', None) @property def real_time_fee_group(self): if 'real_time_fee_group' in self.json_response: return RealTimeFeeGroup(self.json_response['real_time_fee_group']) @property def fee(self): if 'fee' in self.json_response: return Fee(self.json_response['fee']) @property def chargeback(self): if 'chargeback' in self.json_response: return ChargebackResponse(self.json_response['chargeback']) @property def network(self): return self.json_response.get('network', None) @property def subnetwork(self): return self.json_response.get('subnetwork', None) @property def acquirer_fee_amount(self): return self.json_response.get('acquirer_fee_amount', None) @property def fees(self): if 'fees' in self.json_response: return [NetworkFeeModel(val) for val in self.json_response['fees']] @property def digital_wallet_token(self): if 'digital_wallet_token' in self.json_response: return DigitalWalletToken(self.json_response['digital_wallet_token']) @property def user(self): if 'user' in self.json_response: return CardholderMetadata(self.json_response['user']) @property def business(self): if 'business' in self.json_response: return BusinessMetadata(self.json_response['business']) @property def card(self): if 'card' in self.json_response: return CardMetadata(self.json_response['card']) @property def acquirer(self): if 'acquirer' in self.json_response: return Acquirer(self.json_response['acquirer']) @property def fraud(self): if 'fraud' in self.json_response: return Fraud(self.json_response['fraud']) @property def pos(self): if 'pos' in self.json_response: return Pos(self.json_response['pos']) @property def address_verification(self): if 'address_verification' in self.json_response: return AddressVerificationModel(self.json_response['address_verification']) @property def card_security_code_verification(self): if 'card_security_code_verification' in self.json_response: return CardSecurityCodeVerification(self.json_response['card_security_code_verification']) @property def transaction_metadata(self): if 'transaction_metadata' in self.json_response: return TransactionMetadata(self.json_response['transaction_metadata']) @property def card_holder_model(self): if 'card_holder_model' in self.json_response: return UserCardHolderResponse(self.json_response['card_holder_model']) @property def standin_approved_by(self): return self.json_response.get('standin_approved_by', None) @property def network_reference_id(self): return self.json_response.get('network_reference_id', None) @property def acquirer_reference_id(self): return self.json_response.get('acquirer_reference_id', None) @property def cardholder_authentication_data(self): if 'cardholder_authentication_data' in self.json_response: return CardholderAuthenticationData(self.json_response['cardholder_authentication_data']) def __repr__(self): return '<Marqeta.response_models.transaction_model.TransactionModel>' + self.__str__()
107393
from aiogram import types from bot.misc import dp @dp.inline_handler() async def example_echo(iq: types.InlineQuery): await iq.answer(results=[], switch_pm_text='To bot', switch_pm_parameter='sp')
107399
import unittest from bubuku.id_extractor import _search_broker_id class TestBrokerIdExtractor(unittest.TestCase): def test_match_valid(self): assert '123534' == _search_broker_id(['broker.id=123534']) assert '123534' == _search_broker_id(['\tbroker.id=123534']) assert '123534' == _search_broker_id(['\tbroker.id=123534\n']) assert '123534' == _search_broker_id(['broker.id=123534 \n\r']) assert '123534' == _search_broker_id(['\tbroker.id=123534 \r']) assert '123534' == _search_broker_id(['xbroker.id=1', 'broker.id=123534']) assert '123534' == _search_broker_id(['broker.id=123534', 'boker.id=123534']) def test_match_invalid(self): assert _search_broker_id([]) is None assert _search_broker_id(['broker_id=123534']) is None assert _search_broker_id(['xbroker.id=1', 'broker.id=12f3534']) is None assert _search_broker_id(['bruker.id=123534', 'boker.id=123534']) is None
107400
import os import shutil def process_subset(data_root, subset): coord_path = os.path.join(data_root, subset, 'coordinate') for accid in sorted(os.listdir(coord_path)): coord_file_path = os.path.join(coord_path, accid) for filename in sorted(os.listdir(coord_file_path)): vid = filename.split('_')[0] name_required = 'maps_%s_%s.avi'%(accid, vid) print('processing the video file: %s'%(name_required)) # check if requried salmap video file exists! salmap_video_src = os.path.join(data_root, 'salmaps', name_required) if not os.path.exists(salmap_video_src): print('salmap video file does not exist! use focus map instead! %s'%(salmap_video_src)) salmap_video_src = os.path.join(data_root, subset, 'focus_videos', accid, vid + '.avi') assert os.path.exists(salmap_video_src), 'video file does not exist! %s'%(salmap_video_src) # create destination folder salmap_dst_path = os.path.join(data_root, subset, 'salmap_videos', accid) if not os.path.exists(salmap_dst_path): os.makedirs(salmap_dst_path) # copy file shutil.copyfile(salmap_video_src, os.path.join(salmap_dst_path, vid + '.avi')) if __name__ == "__main__": raw_data_path = '/ssd/data/DADA-2000' process_subset(raw_data_path, 'training') process_subset(raw_data_path, 'validation') process_subset(raw_data_path, 'testing')
107414
import networkx as nx from graphilp.imports import networkx as impnx from graphilp.network import atsp_desrochers_laporte as tsp from gurobipy import GRB def test_atsp_desrochers_laporte(): # create graph instance n=10 G = nx.complete_graph(n) G.add_weighted_edges_from([(u, (u+1)%n, 2) for u in range(n)]) G.add_weighted_edges_from([(2, 7, 10)]) # wrap as GraphILP graph optG = impnx.read(G) # create a warmstart warmstart=[(0, 2), (2, 1), (1, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 0)] # generate model m = tsp.create_model(optG, GRB.MAXIMIZE, metric='metric', warmstart=warmstart) # solve model m.optimize() # extract solution tour = tsp.extract_solution(optG, m) # check correctness assert(len(tour) == n) assert((2, 7) in tour)
107531
PAD = 0 EOS = 1 BOS = 2 UNK = 3 UNK_WORD = '<unk>' PAD_WORD = '<pad>' BOS_WORD = '<s>' EOS_WORD = '</s>' NEG_INF = -10000 # -float('inf')
107538
from django.db.models.signals import post_save, pre_save, post_delete from django.contrib.auth.models import User from .models import UserProfile def create_profile(sender, instance, created, **kwargs): if created: UserProfile.objects.create( user=instance, name=instance.username, username=instance.username, #email=instance.email, ) print('Profile Created!') def update_profile(sender, instance, created, **kwargs): user_profile, _ = UserProfile.objects.get_or_create(user=instance) if created == False: user_profile.username = instance.username #instance.userprofile.email = instance.email user_profile.save() print('Profile updated!') post_save.connect(create_profile, sender=User) post_save.connect(update_profile, sender=User)
107545
import setuptools from pathlib import Path this_directory = Path(__file__).parent long_description = (this_directory / "README.md").read_text() setuptools.setup( name='valentine', version='0.1.4', description='Valentine Matcher', license_files=('LICENSE',), author='<NAME>', author_email='<EMAIL>', maintainer='<NAME>', maintainer_email='<EMAIL>', url='https://delftdata.github.io/valentine/', download_url='https://github.com/delftdata/valentine/archive/refs/tags/v0.1.4.tar.gz', packages=setuptools.find_packages(exclude=('tests*', 'examples*')), install_requires=[ 'numpy>=1.21,<2.0', 'scipy>=1.7,<2.0', 'pandas>=1.3,<1.4', 'nltk>=3.6,<3.7', 'snakecase>=1.0,<2.0', 'anytree>=2.8,<2.9', 'six>=1.16,<1.17', 'strsim==0.0.3', 'networkx>=2.6,<2.7', 'chardet>=4.0.0,<5.0.0', 'python-Levenshtein==0.12.2', 'PuLP>=2.5.1,<2.6', 'pyemd==0.5.1', 'python-dateutil>=2.8,<2.9' ], keywords=['matching', 'valentine', 'schema matching', 'dataset discovery', 'coma', 'cupid', 'similarity flooding'], include_package_data=True, python_requires='>=3.7,<3.11', long_description=long_description, long_description_content_type='text/markdown' )
107559
a=[] n=int(input("Enter no. of elements: ")) print("Enter array:") for x in range(n): element=int(input()) a.append(element) a.sort() b=[] for i in range(0,len(a)-1): if a[i]==a[i+1]: b.append(a[i]) b=list(set(b)) a=set(a) while(len(b)>0): a.remove(b[0]) b.pop(0) print("Output:",end=" ") print(sum(a))
107573
from scipy.stats import multivariate_normal # 生成多维概率分布的方法 import numpy as np class GaussianMixture: def __init__(self, n_components: int = 1, covariance_type: str = 'full', tol: float = 0.001, reg_covar: float = 1e-06, max_iter: int = 100): self.n_components = n_components self.means_ = None self.covariances_ = None self.weights_ = None self.reg_covar = reg_covar # 该参数是为了防止出现奇异协方差矩阵 self.max_iter = max_iter def fit(self, X_train): # 获取一些必要的数据信息 n_samples, n_feature = X_train.shape self.reg_covar = self.reg_covar * np.identity(n_feature) # 初始化一些必要的参数:均值,协方差,权重 self.means_ = np.random.randint(X_train.min() / 2, X_train.max() / 2, size=(self.n_components, n_feature)) self.covariances_ = np.zeros((self.n_components, n_feature, n_feature)) for k in range(self.n_components): np.fill_diagonal(self.covariances_[k], 1) self.weights_ = np.ones(self.n_components) / self.n_components P_mat = np.zeros((n_samples, self.n_components)) # 概率矩阵 for i in range(self.max_iter): # 分别对K各类概率 for k in range(self.n_components): self.covariances_ += self.reg_covar # 防止出现奇异协方差矩阵 g = multivariate_normal(mean=self.means_[k], cov=self.covariances_[k]) #### E-step,计算概率 #### P_mat[:, k] = self.weights_[k] * g.pdf(X_train) # 计算X在各分布下出现的频率 totol_N = P_mat.sum(axis=1) # 计算各样本出现的总频率 # 如果某一样本在各类中的出现频率和为0,则使用K来代替,相当于分配等概率 totol_N[totol_N == 0] = self.n_components P_mat /= totol_N.reshape(-1, 1) #### E-step,计算概率 #### #### M-step,更新参数 #### for k in range(self.n_components): N_k = np.sum(P_mat[:, k], axis=0) # 类出现的频率 self.means_[k] = (1 / N_k) * np.sum(X_train * P_mat[:, k].reshape(-1, 1), axis=0) # 该类的新均值 self.covariances_[k] = (1 / N_k) * np.dot((P_mat[:, k].reshape(-1, 1) * (X_train - self.means_[k])).T, (X_train - self.means_[k])) + self.reg_covar self.weights_[k] = N_k / n_samples #### M-step,更新参数 #### def predict(self, X_test): #### E-step,计算概率 #### P_mat = np.zeros((X_test.shape[0], self.n_components)) for k in range(self.n_components): g = multivariate_normal(mean=self.means_[k], cov=self.covariances_[k]) P_mat[:, k] = self.weights_[k] * g.pdf(X_test) totol_N = P_mat.sum(axis=1) totol_N[totol_N == 0] = self.n_components P_mat /= totol_N.reshape(-1, 1) #### E-step,计算概率 #### return np.argmax(P_mat, axis=1) if __name__ == '__main__': from sklearn.datasets.samples_generator import make_blobs from model_selection.train_test_split import train_test_split X, _ = make_blobs(cluster_std=1.5, random_state=42, n_samples=1000, centers=3) X = np.dot(X, np.random.RandomState(0).randn(2, 2)) # 生成斜形类簇 import matplotlib.pyplot as plt plt.clf() plt.scatter(X[:, 0], X[:, 1], alpha=0.3) plt.show() X_train, X_test = train_test_split(X, test_size=0.2) n_samples, n_feature = X_train.shape gmm = GaussianMixture(n_components=6) gmm.fit(X_train) Y_pred = gmm.predict(X_test) plt.clf() plt.scatter(X_test[:, 0], X_test[:, 1], c=Y_pred, alpha=0.3) plt.show()
107577
from .elastic import ElasticsearchSourceForm, ElasticsearchStatusCheckForm from .grafana_elastic import GrafanaElasticsearchStatusCheckForm from .grafana import GrafanaInstanceAdminForm, GrafanaDataSourceAdminForm, GrafanaInstanceForm, GrafanaDashboardForm, \ GrafanaPanelForm, GrafanaSeriesForm, GrafanaStatusCheckForm
107627
from mock import Mock, sentinel, patch import pytest import selenium import pytest_webdriver def test_browser_to_use(): caps = Mock(CHROME=sentinel.chrome, UNKNOWN=None) wd = Mock(DesiredCapabilities = Mock(return_value = caps)) assert pytest_webdriver.browser_to_use(wd, 'chrome') == sentinel.chrome with pytest.raises(ValueError): pytest_webdriver.browser_to_use(wd, 'unknown')
107651
from collections import namedtuple import numpy as np from scipy.interpolate import Akima1DInterpolator as Akima import openmdao.api as om """United States standard atmosphere 1976 tables, data obtained from http://www.digitaldutch.com/atmoscalc/index.htm""" USatm1976Data = namedtuple("USatm1976Data", ["alt", "T", "P", "rho", "speed_of_sound", "viscosity"]) USatm1976Data.alt = np.array( [ -1000, 0, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000, 13000, 14000, 15000, 16000, 17000, 18000, 19000, 20000, 21000, 22000, 23000, 24000, 25000, 26000, 27000, 28000, 29000, 30000, 31000, 32000, 33000, 34000, 35000, 36000, 37000, 38000, 39000, 40000, 41000, 42000, 43000, 44000, 45000, 46000, 47000, 48000, 49000, 50000, 51000, 52000, 53000, 54000, 55000, 56000, 57000, 58000, 59000, 60000, 61000, 62000, 63000, 64000, 65000, 66000, 67000, 68000, 69000, 70000, 71000, 72000, 73000, 74000, 75000, 76000, 77000, 78000, 79000, 80000, 81000, 82000, 83000, 84000, 85000, 86000, 87000, 88000, 89000, 90000, 91000, 92000, 93000, 94000, 95000, 96000, 97000, 98000, 99000, 100000, 105000, 110000, 115000, 120000, 125000, 130000, 135000, 140000, 145000, 150000, ] ) # units='ft' USatm1976Data.T = np.array( [ 522.236, 518.67, 515.104, 511.538, 507.972, 504.405, 500.839, 497.273, 493.707, 490.141, 486.575, 483.008, 479.442, 475.876, 472.31, 468.744, 465.178, 461.611, 458.045, 454.479, 450.913, 447.347, 443.781, 440.214, 436.648, 433.082, 429.516, 425.95, 422.384, 418.818, 415.251, 411.685, 408.119, 404.553, 400.987, 397.421, 393.854, 390.288, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 389.97, 390.18, 390.729, 391.278, 391.826, 392.375, 392.923, 393.472, 394.021, 394.569, 395.118, 395.667, 396.215, 396.764, 397.313, 397.861, 398.41, 398.958, 399.507, 400.056, 400.604, 401.153, 401.702, 402.25, 402.799, 403.348, 403.896, 404.445, 404.994, 405.542, 406.091, 406.639, 407.188, 407.737, 408.285, 408.834, 411.59, 419.271, 426.952, 434.633, 442.314, 449.995, 457.676, 465.357, 473.038, 480.719, ] ) # units='degR' USatm1976Data.P = np.array( [ 15.2348, 14.6959, 14.1726, 13.6644, 13.1711, 12.6923, 12.2277, 11.777, 11.3398, 10.9159, 10.5049, 10.1065, 9.7204, 9.34636, 8.98405, 8.63321, 8.29354, 7.96478, 7.64665, 7.33889, 7.4123, 6.75343, 6.47523, 6.20638, 5.94664, 5.69578, 5.45355, 5.21974, 4.9941, 4.77644, 4.56651, 4.36413, 4.16906, 3.98112, 3.8001, 3.6258, 3.45803, 3.29661, 3.14191, 2.99447, 2.85395, 2.72003, 2.59239, 2.47073, 2.35479, 2.24429, 2.13897, 2.0386, 1.94293, 1.85176, 1.76486, 1.68204, 1.60311, 1.52788, 1.45618, 1.38785, 1.32272, 1.26065, 1.20149, 1.14511, 1.09137, 1.04016, 0.991347, 0.944827, 0.900489, 0.858232, 0.817958, 0.779578, 0.743039, 0.708261, 0.675156, 0.643641, 0.613638, 0.585073, 0.557875, 0.531976, 0.507313, 0.483825, 0.461455, 0.440148, 0.419853, 0.400519, 0.382101, 0.364553, 0.347833, 0.331902, 0.31672, 0.302253, 0.288464, 0.275323, 0.262796, 0.250856, 0.239473, 0.228621, 0.218275, 0.20841, 0.199003, 0.190032, 0.181478, 0.173319, 0.165537, 0.158114, 0.12582, 0.10041, 0.08046, 0.064729, 0.0522725, 0.0423688, 0.0344637, 0.0281301, 0.0230369, 0.0189267, ] ) # units='psi' USatm1976Data.rho = np.array( [ 0.00244752, 0.00237717, 0.00230839, 0.00224114, 0.00217539, 0.00211114, 0.00204834, 0.00198698, 0.00192704, 0.0018685, 0.00181132, 0.00175549, 0.00170099, 0.00164779, 0.00159588, 0.00154522, 0.00149581, 0.00144761, 0.00140061, 0.00135479, 0.00131012, 0.00126659, 0.00122417, 0.00118285, 0.0011426, 0.00110341, 0.00106526, 0.00102812, 0.000991984, 0.000956827, 0.000922631, 0.000889378, 0.00085705, 0.000825628, 0.000795096, 0.000765434, 0.000736627, 0.000708657, 0.000675954, 0.000644234, 0.000614002, 0.000585189, 0.000557728, 0.000531556, 0.000506612, 0.000482838, 0.00046018, 0.000438586, 0.000418004, 0.000398389, 0.000379694, 0.000361876, 0.000344894, 0.000328709, 0.000313284, 0.000298583, 0.000284571, 0.000271217, 0.00025849, 0.00024636, 0.000234799, 0.000223781, 0.000213279, 0.000203271, 0.000193732, 0.000184641, 0.000175976, 0.000167629, 0.000159548, 0.000151867, 0.000144566, 0.000137625, 0.000131026, 0.000124753, 0.000118788, 0.000113116, 0.000107722, 0.000102592, 9.77131e-05, 9.30725e-05, 8.86582e-05, 0.000084459, 8.04641e-05, 7.66632e-05, 7.30467e-05, 6.96054e-05, 6.63307e-05, 6.32142e-05, 6.02481e-05, 5.74249e-05, 5.47376e-05, 5.21794e-05, 4.97441e-05, 4.74254e-05, 4.52178e-05, 4.31158e-05, 0.000041114, 3.92078e-05, 3.73923e-05, 3.56632e-05, 3.40162e-05, 3.24473e-05, 2.56472e-05, 2.00926e-05, 1.58108e-05, 1.24948e-05, 9.9151e-06, 7.89937e-06, 6.3177e-06, 5.07154e-06, 4.08586e-06, 3.30323e-06, ] ) # units='slug/ft**3' USatm1976Data.a = np.array( [ 1120.28, 1116.45, 1112.61, 1108.75, 1104.88, 1100.99, 1097.09, 1093.18, 1089.25, 1085.31, 1081.36, 1077.39, 1073.4, 1069.4, 1065.39, 1061.36, 1057.31, 1053.25, 1049.18, 1045.08, 1040.97, 1036.85, 1032.71, 1028.55, 1024.38, 1020.19, 1015.98, 1011.75, 1007.51, 1003.24, 998.963, 994.664, 990.347, 986.01, 981.655, 977.28, 972.885, 968.471, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.076, 968.337, 969.017, 969.698, 970.377, 971.056, 971.735, 972.413, 973.091, 973.768, 974.445, 975.121, 975.797, 976.472, 977.147, 977.822, 978.496, 979.169, 979.842, 980.515, 981.187, 981.858, 982.53, 983.2, 983.871, 984.541, 985.21, 985.879, 986.547, 987.215, 987.883, 988.55, 989.217, 989.883, 990.549, 991.214, 994.549, 1003.79, 1012.94, 1022.01, 1031, 1039.91, 1048.75, 1057.52, 1066.21, 1074.83, ] ) # units='ft/s' USatm1976Data.viscosity = np.array( [ 3.81e-07, 3.78e-07, 3.76e-07, 3.74e-07, 3.72e-07, 3.70e-07, 3.68e-07, 3.66e-07, 3.64e-07, 3.62e-07, 3.60e-07, 3.57e-07, 3.55e-07, 3.53e-07, 3.51e-07, 3.49e-07, 3.47e-07, 3.45e-07, 3.42e-07, 3.40e-07, 3.38e-07, 3.36e-07, 3.34e-07, 3.31e-07, 3.29e-07, 3.27e-07, 3.25e-07, 3.22e-07, 3.20e-07, 3.18e-07, 3.16e-07, 3.13e-07, 3.11e-07, 3.09e-07, 3.06e-07, 3.04e-07, 3.02e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 2.99e-07, 3.00e-07, 3.00e-07, 3.00e-07, 3.01e-07, 3.01e-07, 3.01e-07, 3.02e-07, 3.02e-07, 3.03e-07, 3.03e-07, 3.03e-07, 3.04e-07, 3.04e-07, 3.04e-07, 3.05e-07, 3.05e-07, 3.05e-07, 3.06e-07, 3.06e-07, 3.06e-07, 3.07e-07, 3.07e-07, 3.08e-07, 3.08e-07, 3.08e-07, 3.09e-07, 3.09e-07, 3.09e-07, 3.10e-07, 3.10e-07, 3.10e-07, 3.11e-07, 3.11e-07, 3.11e-07, 3.13e-07, 3.18e-07, 3.23e-07, 3.28e-07, 3.33e-07, 3.37e-07, 3.42e-07, 3.47e-07, 3.51e-07, 3.56e-07, ] ) # units='lbf*s/ft**2' T_interp = Akima(USatm1976Data.alt, USatm1976Data.T) P_interp = Akima(USatm1976Data.alt, USatm1976Data.P) rho_interp = Akima(USatm1976Data.alt, USatm1976Data.rho) a_interp = Akima(USatm1976Data.alt, USatm1976Data.a) viscosity_interp = Akima(USatm1976Data.alt, USatm1976Data.viscosity) T_interp_deriv = T_interp.derivative(1) P_interp_deriv = P_interp.derivative(1) rho_interp_deriv = rho_interp.derivative(1) a_interp_deriv = a_interp.derivative(1) viscosity_interp_deriv = viscosity_interp.derivative(1) class AtmosComp(om.ExplicitComponent): def setup(self): self.add_input("altitude", val=1.0, units="ft") self.add_input("Mach_number", val=1.0) self.add_output("T", val=1.0, units="degR") self.add_output("P", val=1.0, units="psi") self.add_output("rho", val=1.0, units="slug/ft**3") self.add_output("speed_of_sound", val=1.0, units="ft/s") self.add_output("mu", val=1.0, units="lbf*s/ft**2") self.add_output("v", val=1.0, units="ft/s") self.declare_partials(["T", "P", "rho", "speed_of_sound", "mu", "v"], "altitude") self.declare_partials("v", "Mach_number") def compute(self, inputs, outputs): outputs["T"] = T_interp(inputs["altitude"]) outputs["P"] = P_interp(inputs["altitude"]) outputs["rho"] = rho_interp(inputs["altitude"]) outputs["speed_of_sound"] = a_interp(inputs["altitude"]) outputs["mu"] = viscosity_interp(inputs["altitude"]) outputs["v"] = outputs["speed_of_sound"] * inputs["Mach_number"] def compute_partials(self, inputs, partials): partials["T", "altitude"] = T_interp_deriv(inputs["altitude"]) partials["P", "altitude"] = P_interp_deriv(inputs["altitude"]) partials["rho", "altitude"] = rho_interp_deriv(inputs["altitude"]) partials["speed_of_sound", "altitude"] = a_interp_deriv(inputs["altitude"]) partials["mu", "altitude"] = viscosity_interp_deriv(inputs["altitude"]) partials["v", "altitude"] = a_interp_deriv(inputs["altitude"]) * inputs["Mach_number"] partials["v", "Mach_number"] = a_interp(inputs["altitude"])
107652
import unittest import io from ppci import ir from ppci.irutils import verify_module from ppci.lang.c import CBuilder from ppci.lang.c.options import COptions from ppci.arch.example import ExampleArch from ppci.lang.c import CSynthesizer class CSynthesizerTestCase(unittest.TestCase): def test_hello(self): """ Convert C to Ir, and then this IR to C """ src = r""" void printf(char*); void main(int b) { printf("Hello" "world\n"); } """ arch = ExampleArch() builder = CBuilder(arch.info, COptions()) f = io.StringIO(src) ir_module = builder.build(f, None) assert isinstance(ir_module, ir.Module) verify_module(ir_module) synthesizer = CSynthesizer() synthesizer.syn_module(ir_module) if __name__ == "__main__": unittest.main()
107657
import os import numpy as np import cv2 import sys import argparse import pathlib import glob import time sys.path.append('../../') from util import env, inverse, project_so, make_dirs from mesh import Mesh import scipy.io as sio """ Draw a 3 by n point cloud using open3d library """ def draw(vertex): import open3d pcd = open3d.PointCloud() pcd.points = open3d.Vector3dVector(vertex.T) open3d.draw_geometries([pcd]) data_path = env() print('home directory = %s' % data_path) PATH_POSE = '%s/dataset/redwood/{}/{}.xf' % data_path PATH_DEPTH = '%s/dataset/redwood/{}/{}.png' % data_path PATH_MAT = '%s/processed_dataset/redwood/{}/{}.mat' % data_path parser = argparse.ArgumentParser(description='Process Redwood Dataset') parser.add_argument('--shapeid', type=str) args = parser.parse_args() def getData(shapeid): depth_paths = [] poses = [] pose_paths = [] frames = glob.glob(PATH_DEPTH.format(shapeid, '*')) frames.sort() for i, frame in enumerate(frames): frameid = frame.split('/')[-1].split('.')[0] depth_path = PATH_DEPTH.format(shapeid, frameid) #tmp = cv2.resize(cv2.imread(imgsPath, 2)/1000., (64,64)) #AuthenticdepthMap.append(tmp.reshape(1,tmp.shape[0],tmp.shape[1],1)) pose_fp = PATH_POSE.format(shapeid, frameid) flag = True try: tmp = np.loadtxt(pose_fp) assert abs(tmp[3, 3] - 1.0) < 1e-4, 'bottom right corner should be one' assert (abs(tmp[3, :3]) < 1e-4).all(), '[3, :3] should be zero' R = tmp[:3, :3] assert np.linalg.det(R) > 0.01, 'determinant should be 1' assert np.linalg.norm(R.dot(R.T) - np.eye(3), 'fro') ** 2 < 1e-4, 'should be a rotation matrix' project_R = project_so(R) assert np.linalg.norm(R-project_R, 'fro') ** 2 < 1e-4, 'projection onto SO3 should be identical' tmp[:3, :3] = project_R tmp = inverse(tmp) except Exception as e: print('error on {}: {}'.format(pose_fp, e)) #print(R.dot(R.T)) #print(np.linalg.norm(R.dot(R.T) - np.eye(3), 'fro')) flag = False if not flag: print('ignoring frame {}'.format(frameid)) assert False poses.append(tmp) depth_paths.append(depth_path) pose_paths.append(pose_fp) T = np.concatenate(poses).reshape(-1,4,4) return depth_paths, T, pose_paths def main(): depth_paths, T, pose_paths = getData(args.shapeid) n = len(depth_paths) print('found %d clean depth images...' % n) intrinsic = np.array([[525.0,0,319.5],[0,525.0,239.5],[0,0,1]]) np.random.seed(816) indices = np.random.permutation(n) print(indices[:100]) #indices = sorted(indices) make_dirs(PATH_MAT.format(args.shapeid, 0)) import open3d pcd_combined = open3d.PointCloud() for i, idx in enumerate(indices): import ipdb; ipdb.set_trace() print('%d / %d' % (i, len(indices))) mesh = Mesh.read(depth_paths[idx], mode='depth', intrinsic = intrinsic) pcd = open3d.PointCloud() pcd.points = open3d.Vector3dVector(mesh.vertex.T) pcd.transform(inverse(T[idx])) #pcd = open3d.voxel_down_sample(pcd, voxel_size=0.02) pcd_combined += pcd pcd_combined = open3d.voxel_down_sample(pcd_combined, voxel_size=0.02) sio.savemat(PATH_MAT.format(args.shapeid, i), mdict={ 'vertex': mesh.vertex, 'validIdx_rowmajor': mesh.validIdx, 'pose': T[idx], 'depth_path': depth_paths[idx], 'pose_path': pose_paths[idx]}) if i <= 50 and i >= 40: pcd_combined_down = open3d.voxel_down_sample(pcd_combined, voxel_size=0.02) open3d.draw_geometries([pcd_combined_down]) pcd_combined_down = open3d.voxel_down_sample(pcd_combined, voxel_size=0.02) open3d.draw_geometries([pcd_combined_down]) #draw(mesh.vertex) #sId = np.kron(np.array(range(n)), np.ones([n,1])).astype('int') #tId = np.kron(np.array(range(n)).reshape(-1,1), np.ones([1,n])).astype('int') #valId = (sId > tId) #sId = sId[valId] #tId = tId[valId] #numEach = 1 #print('n=%d' % n) #print('numEach=%d' % numEach) #left = numEach * args.split #right = min(numEach * (1 + args.split), len(sId)) #print('computing [%d:%d] out of [%d:%d]' % (left, right, 0, len(sId))) #sId = sId[left:right] #tId = tId[left:right] # #for i in range(len(sId)): # sId_this = sId[i] # tId_this = tId[i] # print(sId_this, tId_this) # sys.stdout.flush() # outpath = os.path.join(outDir, '{}_{}.npy'.format(sId_this,tId_this)) # #if os.path.exists(outpath): # # continue # # start_time = time.time() # """ # sourceMeshNPY = convertMatlabFormat(DepthPath[sId_this])[np.newaxis,:] # targetMeshNPY = convertMatlabFormat(DepthPath[tId_this])[np.newaxis,:] # #import pdb; pdb.set_trace() # print('convert') # sys.stdout.flush() # validId = (sourceMeshNPY.sum(2)!=0).squeeze() # import util # util.pc2obj(sourceMeshNPY[0,validId,:].T,'test1.obj') # util.pc2obj(targetMeshNPY[0,validId,:].T,'test2.obj') # print('source, target') # print('time elapsed = %f' % (time.time() - start_time)) # sys.stdout.flush() # sourceMesh = matlab.double(sourceMeshNPY.tolist()) # targetMesh = matlab.double(targetMeshNPY.tolist()) # #import pdb; pdb.set_trace() # print('time elapsed = %f' % (time.time() - start_time)) # R_,t_,sigma=eng.pythonMain(sourceMesh,targetMesh,nargout=3) # R = np.zeros([4,4]) # R[:3,:3] = np.array(R_) # R[3,3] = 1 # R[:3,3] = np.array(t_).squeeze() # # #sourceMeshNPYHomo = np.ones([4,sourceMeshNPY.shape[1]]) # #sourceMeshNPYHomo[:3,:] = sourceMeshNPY[0].copy().T # #sourceMeshNPYHomo = np.matmul(R, sourceMeshNPYHomo)[:3,:] # #util.pc2obj(sourceMeshNPYHomo,'test1T.obj') # """ # sourceMesh = Mesh.read(DepthPath[sId_this],mode='depth',intrinsic=intrinsic) # print(sourceMesh.vertex.shape) # print('done loading source') # sys.stdout.flush() # targetMesh = Mesh.read(DepthPath[tId_this],mode='depth',intrinsic=intrinsic) # print('done loading target') # sys.stdout.flush() # #np.save('temp.npy', {'R':Pose[tgt][:3, :3].dot, 'src': sourceMesh.vertex, 'tgt': targetMesh.vertex, 'srcValidIdx': sourceMesh.validIdx, tgtValidIdx: targetMesh.validIdx}) # #assert False # R,sigma = globalRegistration(sourceMesh, targetMesh, optsRGBD()) # print('done registration') # ##import ipdb # ##ipdb.set_trace() # # print('dumping to %s' % outpath) # np.save(outpath, {'R':R, 'sigma':sigma}) # end_time = time.time() # print('time elapsed = %f' % (end_time - start_time)) # sys.stdout.flush() #snapshot = tracemalloc.take_snapshot() #display_top(snapshot) if __name__ == '__main__': main()
107695
import os import re import json from functools import partial from .constants import * def convert(s): a = re.compile("((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))") return a.sub(r"_\1", s).lower() def convertArray(a): newArr = [] for i in a: if isinstance(i, list): newArr.append(convertArray(i)) elif isinstance(i, dict): newArr.append(convertJSON(i)) else: newArr.append(i) return newArr def convertJSON(j): out = {} for k in j: newK = convert(k) if isinstance(j[k], dict): out[newK] = convertJSON(j[k]) elif isinstance(j[k], list): out[newK] = convertArray(j[k]) else: out[newK] = j[k] return out def load_json(filename): with open(filename, "r") as f: return convertJSON(json.load(f)) def str_to_json(s): return json.loads(s.replace("'", '"')) def inputWithDefault(message, default): value = input(f"{message} (default: {default}): ") if not value.strip(): return default else: return value def replace_filename(filename): return strip_dash(remove_dup_dash(remove_slash(remove_special(filename)))).lower() def replace_path(pathname): return strip_dash(remove_dup_dash(remove_special(pathname))).lower() def remove_dup_dash(string): return re.sub("--+", "-", string) def remove_special(string): return re.sub( r"[\~\`\;\:\'\"\!\@\#\$\%\^\&\*\(\)\-\_\+\=\<\>\{\}\[\]\,\. ]", "-", string ) def remove_slash(string): return re.sub(r"[\/]", "-", string) def strip_dash(string): if len(string) == 0: return string striped = string if striped[-1] == "-": striped = striped[:-1] if striped[0] == "-": striped = striped[1:] return striped def property_to_str(page, prop): prop_type = prop["type"] prop_slug = prop["slug"] prop_value = page.get_property(prop_slug) if not prop_value: return "" if prop_type == "title": return f"{prop_slug}: '{prop_value}'" if prop_type in ["created_time", "last_edited_time"]: return f"{prop_slug}: {prop_value.strftime('%Y-%m-%d')}" if prop_type in ["created_by", "last_edited_by"]: return f"{prop_slug}: {prop_value.full_name}" if prop_type == "date": if not prop_value: return "" if prop_value.end: return f"{prop_slug}: {prop_value.start} - {prop_value.end}" return f"{prop_slug}: {prop_value.start}" if prop_type == "file": if len(prop_value) == 1: return f'{prop_slug}: "{prop_value[0]}"' else: return f"{prop_slug}: {prop_value}" else: return f"{prop_slug}: {prop_value}" def get_created_time(page, database): if not database: return "" created_times = list( filter(lambda p: p["type"] == "created_time", database.get_schema_properties()) ) if len(created_times) == 0: return "" prop_slug = created_times[0]["slug"] return page.get_property(prop_slug).strftime("%Y-%m-%d") def get_filename(append_created_time, page, database): created_time = get_created_time(page, database) title = replace_filename(page.title) if not created_time: return title if not append_created_time: return title return f"{created_time}-{title}" def get_dir_path(create_page_directory, sub_path, filename): replaced_sub_path = replace_path(sub_path) if create_page_directory: return os.path.join(replaced_sub_path, filename) else: return os.path.join(replaced_sub_path) def append_metadata(add_metadata, metadata, page): metadata_str = "" if add_metadata and len(metadata): metadata_str += "---\n" metadata_str += "\n".join(metadata) metadata_str += "\n---\n\n" return metadata_str else: return f"# {page.title}\n\n" def get_ordered_properties(database): cv = database._get_a_collection_view() tp = filter( lambda tp_item: tp_item["visible"] == True, cv.get("format.table_properties") ) sp = database.get_schema_properties() properties = [] for tp_item in tp: properties += list( filter(lambda sp_item: sp_item["id"] == tp_item["property"], sp) ) return properties
107706
from allennlp_semparse import DomainLanguage, predicate class NlaLanguage(DomainLanguage): def __init__(self): super().__init__( start_types={int}, allowed_constants={ "0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, }, ) @predicate def add(self, num1: int, num2: int) -> int: return num1 + num2 @predicate def subtract(self, num1: int, num2: int) -> int: return num1 - num2 @predicate def multiply(self, num1: int, num2: int) -> int: return num1 * num2 @predicate def divide(self, num1: int, num2: int) -> int: return num1 // num2 if num2 != 0 else 0
107717
import numpy as np import logging class PID(object): def __init__(self, kp, ki, kd): self.kp = kp self.ki = ki self.kd = kd self.reset() def update(self, t, e): # TODO add anti-windup logic # Most environments have a short execution time # the controller doesn't have much time to wind up dt = t - self.last_t self.last_t = t p_term = self.kp * e self.accum += e * dt i_term = self.ki * self.accum de = e - self.last_e self.last_e = e d_term = self.kd * de / dt if dt > 0 else 0 return p_term + i_term + d_term def reset(self): self.last_t = 0 self.last_e = 0 self.accum = 0 class PidController(object): """ This is a loose port from Betaflight """ FD_ROLL = 0 FD_PITCH = 1 FD_YAW = 2 PTERM_SCALE = 0.032029 ITERM_SCALE = 0.244381 DTERM_SCALE = 0.000529 minthrottle = 1000 maxthrottle = 2000 def __init__(self, pid_roll = [40, 40, 30], pid_pitch = [58, 50, 35], pid_yaw = [80, 45, 20], mixer = [], itermLimit = 150): # init gains and scale self.Kp = [pid_roll[0], pid_pitch[0], pid_yaw[0]] self.Kp = [self.PTERM_SCALE * p for p in self.Kp] self.Ki = [pid_roll[1], pid_pitch[1], pid_yaw[1]] self.Ki = [self.ITERM_SCALE * i for i in self.Ki] self.Kd = [pid_roll[2], pid_pitch[2], pid_yaw[2]] self.Kd = [self.DTERM_SCALE * d for d in self.Kd] self.itermLimit = itermLimit self.previousRateError = [0]*3 self.previousTime = 0 self.previous_motor_values = [self.minthrottle]*4 self.pid_rpy = [PID(*pid_roll), PID(*pid_pitch), PID(*pid_yaw)] self.mixer = mixer def calculate_motor_values(self, current_time, sp_rates, gyro_rates): rpy_sums = [] for i in range(3): u = self.pid_rpy[i].update(current_time, sp_rates[i] - gyro_rates[i]) rpy_sums.append(u) return self.mix(*rpy_sums) def constrainf(self, amt, low, high): # From BF src/main/common/maths.h if amt < low: return low elif amt > high: return high else: return amt def mix(self, r, p, y): PID_MIXER_SCALING = 1000.0 pidSumLimit = 10000.#500 pidSumLimitYaw = 100000.#1000.0#400 motorOutputMixSign = 1 motorOutputRange = self.maxthrottle - self.minthrottle# throttle max - throttle min motorOutputMin = self.minthrottle mixer_index_throttle = 0 mixer_index_roll = 1 mixer_index_pitch = 2 mixer_index_yaw = 3 scaledAxisPidRoll = self.constrainf(r, -pidSumLimit, pidSumLimit) / PID_MIXER_SCALING scaledAxisPidPitch = self.constrainf(p, -pidSumLimit, pidSumLimit) / PID_MIXER_SCALING scaledAxisPidYaw = self.constrainf(y, -pidSumLimitYaw, pidSumLimitYaw) / PID_MIXER_SCALING scaledAxisPidYaw = -scaledAxisPidYaw # Find roll/pitch/yaw desired output motor_count = 4 motorMix = [0]*motor_count motorMixMax = 0 motorMixMin = 0 # No additional throttle, in air mode throttle = 0 motorRangeMin = 1000 motorRangeMax = 2000 for i in range(motor_count): mix = (scaledAxisPidRoll * self.mixer[i][1] + scaledAxisPidPitch * self.mixer[i][2] + scaledAxisPidYaw * self.mixer[i][3]) if mix > motorMixMax: motorMixMax = mix elif mix < motorMixMin: motorMixMin = mix motorMix[i] = mix motorMixRange = motorMixMax - motorMixMin if motorMixRange > 1.0: for i in range(motor_count): motorMix[i] /= motorMixRange # Get the maximum correction by setting offset to center when airmode enabled throttle = 0.5 else: # Only automatically adjust throttle when airmode enabled. Airmode logic is always active on high throttle throttleLimitOffset = motorMixRange / 2.0 throttle = self.constrainf(throttle, 0.0 + throttleLimitOffset, 1.0 - throttleLimitOffset) motor = [] for i in range(motor_count): motorOutput = motorOutputMin + (motorOutputRange * (motorOutputMixSign * motorMix[i] + throttle * self.mixer[i][mixer_index_throttle])) motorOutput = self.constrainf(motorOutput, motorRangeMin, motorRangeMax); motor.append(motorOutput) motor = list(map(int, np.round(motor))) return motor def reset(self): for pid in self.pid_rpy: pid.reset()
107720
from sikr.db.connector import Base, engine from sikr.models.users import UserGroup, User from sikr.models.entries import Group, Entry from sikr.utils.logs import logger def generate_schema(): """Generate the initial schema for the database.""" start_msg = "Creating database schema..." end_msg = "Database schema created" print(f"[ -- ] {start_msg}") logger.info(start_msg) try: Base.metadata.create_all(engine) print(f"[ OK ] {end_msg}") logger.info(end_msg) except Exception as e: error_msg = f"Error creating schema: {e}" print(f"[ERROR] {error_msg}") logger.error(error_msg) sys.exit(1)
107731
import os import numpy as np import cv2 as cv from tests_common import NewOpenCVTests def generate_test_trajectory(): result = [] angle_i = np.arange(0, 271, 3) angle_j = np.arange(0, 1200, 10) for i, j in zip(angle_i, angle_j): x = 2 * np.cos(i * 3 * np.pi/180.0) * (1.0 + 0.5 * np.cos(1.2 + i * 1.2 * np.pi/180.0)) y = 0.25 + i/270.0 + np.sin(j * np.pi/180.0) * 0.2 * np.sin(0.6 + j * 1.5 * np.pi/180.0) z = 2 * np.sin(i * 3 * np.pi/180.0) * (1.0 + 0.5 * np.cos(1.2 + i * np.pi/180.0)) result.append(cv.viz.makeCameraPose((x, y, z), (0.0, 0, 0), (0.0, 1.0, 0.0))) x = np.zeros(shape=(len(result), 1, 16 ), dtype= np.float64) for idx, m in enumerate(result): x[idx, 0, :] = m.mat().reshape(16) return x, result def tutorial3(camera_pov, filename): myWindow = cv.viz_Viz3d("Coordinate Frame") myWindow.showWidget("axe",cv.viz_WCoordinateSystem()) cam_origin = (3.0, 3.0, 3.0) cam_focal_point = (3.0,3.0,2.0) cam_y_dir = (-1.0,0.0,0.0) camera_pose = cv.viz.makeCameraPose(cam_origin, cam_focal_point, cam_y_dir) transform = cv.viz.makeTransformToGlobal((0.0,-1.0,0.0), (-1.0,0.0,0.0), (0.0,0.0,-1.0), cam_origin) dragon_cloud,_,_ = cv.viz.readCloud(filename) cloud_widget = cv.viz_WCloud(dragon_cloud, cv.viz_Color().green()) cloud_pose = cv.viz_Affine3d() cloud_pose = cv.viz_Affine3d().rotate((0, np.pi / 2, 0)).translate((0, 0, 3)) cloud_pose_global = transform.product(cloud_pose) myWindow.showWidget("CPW_FRUSTUM", cv.viz_WCameraPosition((0.889484, 0.523599)), camera_pose) if not camera_pov: myWindow.showWidget("CPW", cv.viz_WCameraPosition(0.5), camera_pose) myWindow.showWidget("dragon", cloud_widget, cloud_pose_global) if camera_pov: myWindow.setViewerPose(camera_pose) class viz_test(NewOpenCVTests): def setUp(self): super(viz_test, self).setUp() if not bool(os.environ.get('OPENCV_PYTEST_RUN_VIZ', False)): self.skipTest("Use OPENCV_PYTEST_RUN_VIZ=1 to enable VIZ UI tests") def test_viz_tutorial3_global_view(self): tutorial3(False, self.find_file("viz/dragon.ply")) def test_viz_tutorial3_camera_view(self): tutorial3(True, self.find_file("viz/dragon.ply")) def test_viz(self): dragon_cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply")) myWindow = cv.viz_Viz3d("abc") myWindow.showWidget("coo", cv.viz_WCoordinateSystem(1)) myWindow.showWidget("cloud", cv.viz_WPaintedCloud(dragon_cloud)) myWindow.spinOnce(500, True) def test_viz_show_simple_widgets(self): viz = cv.viz_Viz3d("show_simple_widgets") viz.setBackgroundMeshLab() viz.showWidget("coos", cv.viz_WCoordinateSystem()) viz.showWidget("cube", cv.viz_WCube()) viz.showWidget("cub0", cv.viz_WCube((-1.0, -1, -1), (-0.5, -0.5, -0.5), False, cv.viz_Color().indigo())) viz.showWidget("arro", cv.viz_WArrow((-0.5, -0.5, -0.5), (0.5, 0.5, 0.5), 0.009, cv.viz_Color().raspberry())) viz.showWidget("cir1", cv.viz_WCircle(0.5, 0.01, cv.viz_Color.bluberry())) viz.showWidget("cir2", cv.viz_WCircle(0.5, (0.5, 0.0, 0.0), (1.0, 0.0, 0.0), 0.01, cv.viz_Color().apricot())) viz.showWidget("cyl0", cv.viz_WCylinder((-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), 0.125, 30, cv.viz_Color().brown())) viz.showWidget("con0", cv.viz_WCone(0.25, 0.125, 6, cv.viz_Color().azure())) viz.showWidget("con1", cv.viz_WCone(0.125, (0.5, -0.5, 0.5), (0.5, -1.0, 0.5), 6, cv.viz_Color().turquoise())) text2d = cv.viz_WText("Different simple widgets", (20, 20), 20, cv.viz_Color().green()) viz.showWidget("text2d", text2d) text3d = cv.viz_WText3D("Simple 3D text", ( 0.5, 0.5, 0.5), 0.125, False, cv.viz_Color().green()) viz.showWidget("text3d", text3d) viz.showWidget("plane1", cv.viz_WPlane((0.25, 0.75))) viz.showWidget("plane2", cv.viz_WPlane((0.5, -0.5, -0.5), (0.0, 1.0, 1.0), (1.0, 1.0, 0.0), (1.0, 0.5), cv.viz_Color().gold())) viz.showWidget("grid1", cv.viz_WGrid((7,7), (0.75,0.75), cv.viz_Color().gray()), cv.viz_Affine3d().translate((0.0, 0.0, -1.0))) viz.spinOnce(500, True) text2d.setText("Different simple widgets (updated)") text3d.setText("Updated text 3D") viz.spinOnce(500, True) def test_viz_show_overlay_image(self): lena = cv.imread(self.find_file("viz/lena.png")) gray = cv.cvtColor(lena, cv.COLOR_BGR2GRAY) rows = lena.shape[0] cols = lena.shape[1] half_lsize = (lena.shape[1] // 2, lena.shape[0] // 2) viz = cv.viz_Viz3d("show_overlay_image") viz.setBackgroundMeshLab(); vsz = viz.getWindowSize() viz.showWidget("coos", cv.viz_WCoordinateSystem()) viz.showWidget("cube", cv.viz_WCube()) x = cv.viz_WImageOverlay(lena, (10, 10, half_lsize[1], half_lsize[0])) viz.showWidget("img1", x) viz.showWidget("img2", cv.viz_WImageOverlay(gray, (vsz[0] - 10 - cols // 2, 10, half_lsize[1], half_lsize[0]))) viz.showWidget("img3", cv.viz_WImageOverlay(gray, (10, vsz[1] - 10 - rows // 2, half_lsize[1], half_lsize[0]))) viz.showWidget("img5", cv.viz_WImageOverlay(lena, (vsz[0] - 10 - cols // 2, vsz[1] - 10 - rows // 2, half_lsize[1], half_lsize[0]))) viz.showWidget("text2d", cv.viz_WText("Overlay images", (20, 20), 20, cv.viz_Color().green())) i = 0 for num in range(50): i = i + 1 a = i % 360 pose = (3 * np.sin(a * np.pi/180), 2.1, 3 * np.cos(a * np.pi/180)); viz.setViewerPose(cv.viz.makeCameraPose(pose , (0.0, 0.5, 0.0), (0.0, 0.1, 0.0))) img = lena * (np.sin(i * 10 * np.pi/180) * 0.5 + 0.5) x.setImage(img.astype(np.uint8)) viz.spinOnce(100, True) viz.showWidget("text2d", cv.viz_WText("Overlay images (stopped)", (20, 20), 20, cv.viz_Color().green())) viz.spinOnce(500, True) def test_viz_show_image_3d(self): lena = cv.imread(self.find_file("viz/lena.png")) lena_gray = cv.cvtColor(lena, cv.COLOR_BGR2GRAY) viz = cv.viz_Viz3d("show_image_3d") viz.setBackgroundMeshLab() viz.showWidget("coos", cv.viz_WCoordinateSystem()) viz.showWidget("cube", cv.viz_WCube()); viz.showWidget("arr0", cv.viz_WArrow((0.5, 0.0, 0.0), (1.5, 0.0, 0.0), 0.009, cv.viz_Color().raspberry())) x = cv.viz_WImage3D(lena, (1.0, 1.0)) viz.showWidget("img0", x, cv.viz_Affine3d((0.0, np.pi/2, 0.0), (.5, 0.0, 0.0))) viz.showWidget("arr1", cv.viz_WArrow((-0.5, -0.5, 0.0), (0.2, 0.2, 0.0), 0.009, cv.viz_Color().raspberry())) viz.showWidget("img1", cv.viz_WImage3D(lena_gray, (1.0, 1.0), (-0.5, -0.5, 0.0), (1.0, 1.0, 0.0), (0.0, 1.0, 0.0))) viz.showWidget("arr3", cv.viz_WArrow((-0.5, -0.5, -0.5), (0.5, 0.5, 0.5), 0.009, cv.viz_Color().raspberry())) viz.showWidget("text2d", cv.viz_WText("Images in 3D", (20, 20), 20, cv.viz_Color().green())) i = 0 for num in range(50): img = lena * (np.sin(i*7.5*np.pi/180) * 0.5 + 0.5) x.setImage(img.astype(np.uint8)) i = i + 1 viz.spinOnce(100, True); viz.showWidget("text2d", cv.viz_WText("Images in 3D (stopped)", (20, 20), 20, cv.viz_Color().green())) viz.spinOnce(500, True) def test_viz_show_cloud_bluberry(self): dragon_cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply")) pose = cv.viz_Affine3d() pose = pose.rotate((0, 0.8, 0)); viz = cv.viz_Viz3d("show_cloud_bluberry") viz.setBackgroundColor(cv.viz_Color().black()) viz.showWidget("coosys", cv.viz_WCoordinateSystem()) viz.showWidget("dragon", cv.viz_WCloud(dragon_cloud, cv.viz_Color().bluberry()), pose) viz.showWidget("text2d", cv.viz_WText("Bluberry cloud", (20, 20), 20, cv.viz_Color().green())) viz.spinOnce(500, True) def test_viz_show_cloud_random_color(self): dragon_cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply")) colors = np.random.randint(0, 255, size=(dragon_cloud.shape[0],dragon_cloud.shape[1],3), dtype=np.uint8) pose = cv.viz_Affine3d() pose = pose.rotate((0, 0.8, 0)); viz = cv.viz_Viz3d("show_cloud_random_color") viz.setBackgroundMeshLab() viz.showWidget("coosys", cv.viz_WCoordinateSystem()) viz.showWidget("dragon", cv.viz_WCloud(dragon_cloud, colors), pose) viz.showWidget("text2d", cv.viz_WText("Random color cloud", (20, 20), 20, cv.viz_Color().green())) viz.spinOnce(500, True) def test_viz_show_cloud_masked(self): dragon_cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply")) qnan = np.NAN for idx in range(dragon_cloud.shape[0]): if idx % 15 != 0: dragon_cloud[idx,:] = qnan pose = cv.viz_Affine3d() pose = pose.rotate((0, 0.8, 0)) viz = cv.viz_Viz3d("show_cloud_masked"); viz.showWidget("coosys", cv.viz_WCoordinateSystem()) viz.showWidget("dragon", cv.viz_WCloud(dragon_cloud), pose) viz.showWidget("text2d", cv.viz_WText("Nan masked cloud", (20, 20), 20, cv.viz_Color().green())) viz.spinOnce(500, True) def test_viz_show_cloud_collection(self): cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply")) ccol = cv.viz_WCloudCollection() pose = cv.viz_Affine3d() pose1 = cv.viz_Affine3d().translate((0, 0, 0)).rotate((np.pi/2, 0, 0)) ccol.addCloud(cloud, cv.viz_Color().white(), cv.viz_Affine3d().translate((0, 0, 0)).rotate((np.pi/2, 0, 0))) ccol.addCloud(cloud, cv.viz_Color().blue(), cv.viz_Affine3d().translate((1, 0, 0))) ccol.addCloud(cloud, cv.viz_Color().red(), cv.viz_Affine3d().translate((2, 0, 0))) ccol.finalize(); viz = cv.viz_Viz3d("show_cloud_collection") viz.setBackgroundColor(cv.viz_Color().mlab()) viz.showWidget("coosys", cv.viz_WCoordinateSystem()); viz.showWidget("ccol", ccol); viz.showWidget("text2d", cv.viz_WText("Cloud collection", (20, 20), 20, cv.viz_Color(0, 255,0 ))) viz.spinOnce(500, True) def test_viz_show_painted_clouds(self): cloud,_,_ = cv.viz.readCloud(self.find_file("viz/dragon.ply")) viz = cv.viz_Viz3d("show_painted_clouds") viz.setBackgroundMeshLab() viz.showWidget("coosys", cv.viz_WCoordinateSystem()) pose1 = cv.viz_Affine3d((0.0, -np.pi/2, 0.0), (-1.5, 0.0, 0.0)) pose2 = cv.viz_Affine3d((0.0, np.pi/2, 0.0), (1.5, 0.0, 0.0)) viz.showWidget("cloud1", cv.viz_WPaintedCloud(cloud), pose1) viz.showWidget("cloud2", cv.viz_WPaintedCloud(cloud, (0.0, -0.75, -1.0), (0.0, 0.75, 0.0)), pose2); viz.showWidget("cloud3", cv.viz_WPaintedCloud(cloud, (0.0, 0.0, -1.0), (0.0, 0.0, 1.0), cv.viz_Color().blue(), cv.viz_Color().red())) viz.showWidget("arrow", cv.viz_WArrow((0.0, 1.0, -1.0), (0.0, 1.0, 1.0), 0.009, cv.viz_Color())) viz.showWidget("text2d", cv.viz_WText("Painted clouds", (20, 20), 20, cv.viz_Color(0, 255, 0))) viz.spinOnce(500, True) def test_viz_show_mesh(self): mesh = cv.viz.readMesh(self.find_file("viz/dragon.ply")) viz = cv.viz_Viz3d("show_mesh") viz.showWidget("coosys", cv.viz_WCoordinateSystem()); viz.showWidget("mesh", cv.viz_WMesh(mesh), cv.viz_Affine3d().rotate((0, 0.8, 0))); viz.showWidget("text2d", cv.viz_WText("Just mesh", (20, 20), 20, cv.viz_Color().green())) viz.spinOnce(500, True) def test_viz_show_mesh_random_colors(self): mesh = cv.viz.readMesh(self.find_file("viz/dragon.ply")) mesh.colors = np.random.randint(0, 255, size=mesh.colors.shape, dtype=np.uint8) viz = cv.viz_Viz3d("show_mesh") viz.showWidget("coosys", cv.viz_WCoordinateSystem()); viz.showWidget("mesh", cv.viz_WMesh(mesh), cv.viz_Affine3d().rotate((0, 0.8, 0))) viz.setRenderingProperty("mesh", cv.viz.SHADING, cv.viz.SHADING_PHONG) viz.showWidget("text2d", cv.viz_WText("Random color mesh", (20, 20), 20, cv.viz_Color().green())) viz.spinOnce(500, True) def test_viz_show_textured_mesh(self): lena = cv.imread(self.find_file("viz/lena.png")) angle = np.arange(0,64) points0 = np.vstack((np.zeros(shape=angle.shape, dtype=np.float32), np.cos(angle * np.pi /128), np.sin(angle* np.pi /128))) points1 = np.vstack((1.57 * np.ones(shape=angle.shape, dtype=np.float32),np.cos(angle* np.pi /128), np.sin(angle* np.pi /128))) tcoords0 = np.vstack((np.zeros(shape=angle.shape, dtype=np.float32), angle / 64)) tcoords1 = np.vstack((np.ones(shape=angle.shape, dtype=np.float32), angle / 64)) points = np.zeros(shape=(points0.shape[0], points0.shape[1] * 2 ),dtype=np.float32) tcoords = np.zeros(shape=(tcoords0.shape[0], tcoords0.shape[1] * 2),dtype=np.float32) tcoords[:,0::2] = tcoords0 tcoords[:,1::2] = tcoords1 points[:,0::2] = points0 * 0.75 points[:,1::2] = points1 * 0.75 polygons = np.zeros(shape=(4 * (points.shape[1]-2)+1),dtype=np.int32) for idx in range(points.shape[1] // 2 - 1): polygons[8 * idx: 8 * (idx + 1)] = [3, 2*idx, 2*idx+1, 2*idx+2, 3, 2*idx+1, 2*idx+2, 2*idx+3] mesh = cv.viz_Mesh() mesh.cloud = points.transpose().reshape(1,points.shape[1],points.shape[0]) mesh.tcoords = tcoords.transpose().reshape(1,tcoords.shape[1],tcoords.shape[0]) mesh.polygons = polygons.reshape(1, 4 * (points.shape[1]-2)+1) mesh.texture = lena viz = cv.viz_Viz3d("show_textured_mesh") viz.setBackgroundMeshLab(); viz.showWidget("coosys", cv.viz_WCoordinateSystem()); viz.showWidget("mesh", cv.viz_WMesh(mesh)) viz.setRenderingProperty("mesh", cv.viz.SHADING, cv.viz.SHADING_PHONG) viz.showWidget("text2d", cv.viz_WText("Textured mesh", (20, 20), 20, cv.viz_Color().green())); viz.spinOnce(500, True) def test_viz_show_polyline(self): palette = [ cv.viz_Color().red(), cv.viz_Color().green(), cv.viz_Color().blue(), cv.viz_Color().gold(), cv.viz_Color().raspberry(), cv.viz_Color().bluberry(), cv.viz_Color().lime()] palette_size = len(palette) polyline = np.zeros(shape=(1, 32, 3), dtype=np.float32) colors = np.zeros(shape=(1, 32, 3), dtype=np.uint8) for i in range(polyline.shape[1]): polyline[0,i,0] = i / 16.0 polyline[0,i,1] = np.cos(i * np.pi/6) polyline[0,i,2] = np.sin(i * np.pi/6) colors[0,i,0] = palette[i % palette_size].get_blue() colors[0,i,1] = palette[i % palette_size].get_green() colors[0,i,2] = palette[i % palette_size].get_red() viz = cv.viz_Viz3d("show_polyline") viz.showWidget("polyline", cv.viz_WPolyLine(polyline, colors)) viz.showWidget("coosys", cv.viz_WCoordinateSystem()) viz.showWidget("text2d", cv.viz_WText("Polyline", (20, 20), 20, cv.viz_Color().green())) viz.spinOnce(500, True) def test_viz_show_sampled_normals(self): mesh = cv.viz.readMesh(self.find_file("viz/dragon.ply")) mesh.normals = cv.viz.computeNormals(mesh) pose = cv.viz_Affine3d().rotate((0, 0.8, 0)) viz = cv.viz_Viz3d("show_sampled_normals") viz.showWidget("mesh", cv.viz_WMesh(mesh), pose) viz.showWidget("normals", cv.viz_WCloudNormals(mesh.cloud, mesh.normals, 30, 0.1, cv.viz_Color().green()), pose) viz.setRenderingProperty("normals", cv.viz.LINE_WIDTH, 2.0) viz.showWidget("text2d", cv.viz_WText("Cloud or mesh normals", (20, 20), 20, cv.viz_Color().green())) viz.spinOnce(500, True); def test_viz_show_cloud_shaded_by_normals(self): mesh = cv.viz.readMesh(self.find_file("viz/dragon.ply")) mesh.normals = cv.viz.computeNormals(mesh) pose = cv.viz_Affine3d().rotate((0, 0.8, 0)) cloud = cv.viz_WCloud(mesh.cloud, cv.viz_Color().white(), mesh.normals) cloud.setRenderingProperty(cv.viz.SHADING, cv.viz.SHADING_GOURAUD) viz = cv.viz_Viz3d("show_cloud_shaded_by_normals") viz.showWidget("cloud", cloud, pose) viz.showWidget("text2d", cv.viz_WText("Cloud shaded by normals", (20, 20), 20, cv.viz_Color().green())) viz.spinOnce(500, True) def test_viz_show_image_method(self): lena = cv.imread(self.find_file("viz/lena.png")) lena_gray = cv.cvtColor(lena, cv.COLOR_BGR2GRAY) viz = cv.viz_Viz3d("show_image_method") viz.showImage(lena) viz.spinOnce(1500, True) viz.showImage(lena, (lena.shape[1], lena.shape[0])) viz.spinOnce(1500, True) #cv.viz.imshow("show_image_method", lena_gray).spinOnce(500, True) BUG def test_viz_show_follower(self): viz = cv.viz_Viz3d("show_follower") viz.showWidget("coos", cv.viz_WCoordinateSystem()) viz.showWidget("cube", cv.viz_WCube()) text_3d = cv.viz_WText3D("Simple 3D follower", (-0.5, -0.5, 0.5), 0.125, True, cv.viz_Color().green()) viz.showWidget("t3d_2", text_3d) viz.showWidget("text2d", cv.viz_WText("Follower: text always facing camera", (20, 20), 20, cv.viz_Color().green())) viz.setBackgroundMeshLab() viz.spinOnce(500, True) text_3d.setText("Updated follower 3D") viz.spinOnce(500, True) def test_viz_show_trajectory_reposition(self): mat, path = generate_test_trajectory() viz = cv.viz_Viz3d("show_trajectory_reposition_to_origin") viz.showWidget("coos", cv.viz_WCoordinateSystem()) viz.showWidget("sub3", cv.viz_WTrajectory(mat[0: len(path) // 3,:,:], cv.viz.PyWTrajectory_BOTH, 0.2, cv.viz_Color().brown()), path[0].inv()) viz.showWidget("text2d", cv.viz_WText("Trajectory resposition to origin", (20, 20), 20, cv.viz_Color().green())) viz.spinOnce(500, True) def test_viz_show_trajectories(self): mat, path = generate_test_trajectory() size =len(path) sub0 = np.copy(mat[0: size//10+1,::]) sub1 = np.copy(mat[size//10: size//5+1,::]) sub2 = np.copy(mat[size//5: 11*size//12,::]) sub3 = np.copy(mat[11 * size // 12 : size,::]) sub4 = np.copy(mat[3 * size//4: 33*size//40,::]) sub5 = np.copy(mat[11*size//12: size,::]) K = np.array([[1024.0, 0.0, 320.0], [0.0, 1024.0, 240.0], [0.0, 0.0, 1.0]],dtype=np.float64) viz = cv.viz_Viz3d("show_trajectories") viz.showWidget("coos", cv.viz_WCoordinateSystem()) viz.showWidget("sub0", cv.viz_WTrajectorySpheres(sub0, 0.25, 0.07)) viz.showWidget("sub1", cv.viz_WTrajectory(sub1, cv.viz.PyWTrajectory_PATH, 0.2, cv.viz_Color().brown())) viz.showWidget("sub2", cv.viz_WTrajectory(sub2, cv.viz.PyWTrajectory_FRAMES, 0.2)) viz.showWidget("sub3", cv.viz_WTrajectory(sub3, cv.viz.PyWTrajectory_BOTH, 0.2, cv.viz_Color().green())) viz.showWidget("sub4", cv.viz_WTrajectoryFrustums(sub4, K, 0.3, cv.viz_Color().yellow())) viz.showWidget("sub5", cv.viz_WTrajectoryFrustums(sub5, (0.78, 0.78), 0.15, cv.viz_Color().magenta())) #BUG viz.showWidget("text2d", cv.viz_WText("Different kinds of supported trajectories", (20, 20), 20, cv.viz_Color().green())) i = 0 for num in range(50): i = i - 1 a = i % 360 pose = (np.sin(a * np.pi/180)* 7.5, 0.7, np.cos(a * np.pi/180)* 7.5) viz.setViewerPose(cv.viz.makeCameraPose(pose , (0.0, 0.5, 0.0), (0.0, 0.1, 0.0))); viz.spinOnce(100, True) viz.resetCamera() viz.spinOnce(500, True) def test_viz_show_camera_positions(self): K = np.array([[1024.0, 0.0, 320.0], [0.0, 1024.0, 240.0], [0.0, 0.0, 1.0]],dtype=np.float64) lena = cv.imread(self.find_file("viz/lena.png")) lena_gray = cv.cvtColor(lena, cv.COLOR_BGR2GRAY) poses = [] for i in range(2): pose = (5 * np.sin(3.14 + 2.7 + i*60 * np.pi/180), 2 - i*1.5, 5 * np.cos(3.14 + 2.7 + i*60 * np.pi/180)) poses.append(cv.viz.makeCameraPose(pose, (0.0, 0.0, 0.0), (0.0, -0.1, 0.0))) viz = cv.viz_Viz3d("show_camera_positions") viz.showWidget("sphe", cv.viz_WSphere((0,0,0), 1.0, 10, cv.viz_Color().orange_red())) viz.showWidget("coos", cv.viz_WCoordinateSystem(1.5)) viz.showWidget("pos1", cv.viz_WCameraPosition(0.75), poses[0]) viz.showWidget("pos2", cv.viz_WCameraPosition((0.78, 0.78), lena, 2.2, cv.viz_Color().green()), poses[0]) viz.showWidget("pos3", cv.viz_WCameraPosition(0.75), poses[0]) viz.showWidget("pos4", cv.viz_WCameraPosition(K, lena_gray, 3, cv.viz_Color().indigo()), poses[1]) viz.showWidget("text2d", cv.viz_WText("Camera positions with images", (20, 20), 20, cv.viz_Color().green())) viz.spinOnce(500, True) """ TEST(Viz, show_widget_merger) { WWidgetMerger merger; merger.addWidget(WCube(Vec3d::all(0.0), Vec3d::all(1.0), true, Color::gold())); RNG& rng = theRNG(); for(int i = 0; i < 77; ++i) { Vec3b c; rng.fill(c, RNG::NORMAL, Scalar::all(128), Scalar::all(48), true); merger.addWidget(WSphere(Vec3d(c)*(1.0/255.0), 7.0/255.0, 10, Color(c[2], c[1], c[0]))); } merger.finalize(); Viz3d viz("show_mesh_random_color"); viz.showWidget("coo", WCoordinateSystem()); viz.showWidget("merger", merger); viz.showWidget("text2d", WText("Widget merger", Point(20, 20), 20, Color::green())); viz.spinOnce(500, true); } """ if __name__ == '__main__': NewOpenCVTests.bootstrap()
107746
import math from pygame.math import Vector2 class Geometry: @classmethod def polygon_point_intersection(cls, point_list, point): """ :param point_list: Reference to polygon object :param point: Reference to point object :return: true if point is inside polygon """ n = len(point_list) inside = False x,y = point.x, point.y p1x, p1y = point_list[0] for i in range(n + 1): p2x, p2y = point_list[i % n] if y > min(p1y, p2y): if y <= max(p1y, p2y): if x <= max(p1x, p2x): if p1y != p2y: xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x if p1x == p2x or x <= xints: inside = not inside p1x, p1y = p2x, p2y return inside @classmethod def circle_point_intersection(cls, circle_center, circle_radius, point): return point.distance_to(circle_center) <= circle_radius @classmethod def line_point_intersection(cls, segment, point): p1x, p1y = segment[0] p2x, p2y = segment[1] valor = (p2.y - p1.y)*point.x + (p1.x - p2.x)*point.y - p1.x*(p2.y - p1.y) - p1.y*(p1.x - p2.x) return (p2.y - p1.y) * valor < 0 and ((p1.y <= point.y < p2.y) or (p2.y <= point.y < p1.y)) @classmethod def inside_bounding_box(cls, point_list, point): xmax = -100000 xmin = 100000 ymax = -100000 ymin = 100000 for p in point_list: xmax = max(p.x, xmax) xmin = min(p.x, xmin) ymax = max(p.y, ymax) ymin = min(p.y, ymin) return xmin <= point.x < xmax and ymin <= point.y < ymax @classmethod def rotate_point(cls, pivot, point, angle): cx, cy = pivot.x, pivot.y px, py = point.x, point.y px -= cx py -= cy pxnew = px * math.cos(angle) - py * math.sin(angle) pynew = px * math.sin(angle) + py * math.cos(angle) px = pxnew + cx py = pynew + cy return Vector2(px, py)
107749
import json from schematics.exceptions import ConversionError from nose.tools import eq_,raises from enum import Enum from moncli import column_value as cv from moncli.enums import ColumnType from moncli.types import StatusType # default class and data mapping declaration for common use class Status(Enum): ready = 'Ready' in_progress = 'In Progress' done = 'Done' def test_should_succeed_when_to_native_returns_a_str_when_passing_in_a_statusvalue_value_with_api_data_to_status_type(): # Arrange settings_str = json.dumps({'labels': {'0': 'Ready','1':'In Progress','2': 'Done'}}) column_value = cv.create_column_value(ColumnType.status, id='status', title='Status 1',value=json.dumps({'index': 1}),settings_str=settings_str) # Act status_type = StatusType(title='Status') format = status_type.to_native(column_value) # Assert eq_(format,'In Progress') def test_should_succeed_when_to_native_returns_an_enum_class_when_passing_in_a_status_value_value_with_api_data_has_enum_type_to_status_type(): # Arrange settings_str = json.dumps({'labels': {'0': 'Ready','1':'In Progress','2': 'Done'}}) column_value = cv.create_column_value(ColumnType.status, id='status', title='Status 1',value=json.dumps({'index': 2}),settings_str=settings_str) # Act status_type = StatusType(title='Status',as_enum=Status) status_type.to_native(column_value) format = status_type.to_native('Done') # Assert eq_(format,Status.done) def test_should_succeed_when_to_native_returns_a_str_when_passing_in_a_int_value_to_status_type(): # Arrange settings_str = json.dumps({'labels': {'0': 'Ready','1':'In Progress','2': 'Done'}}) column_value = cv.create_column_value(ColumnType.status, id='status', title='Status 1',value=json.dumps({'index': 1}),settings_str=settings_str) # Act status_type = StatusType(title='Status') status_type.to_native(column_value) format = status_type.to_native(1) # Assert eq_(format,'In Progress') def test_should_succeed_when_to_native_returns_a_str_when_passed_a_str_value_that_is_a_valid_label_index_int_to_status_type(): # Arrange settings_str = json.dumps({'labels': {'0': 'Ready','1':'In Progress','2': 'Done'}}) column_value = cv.create_column_value(ColumnType.status, id='status', title='Status 1',value=json.dumps({'index': 1}),settings_str=settings_str) # Act status_type = StatusType(title='Status') status_type.to_native(column_value) format = status_type.to_native("1") # Assert eq_(format,'In Progress') def test_should_succeed_when_to_native_returns_a_str_when_passed_a_str_value_that_is_a_valid_label_to_status_type(): # Arrange settings_str = json.dumps({'labels': {'0': 'Ready','1':'In Progress','2': 'Done'}}) column_value = cv.create_column_value(ColumnType.status, id='status', title='Status 1',value=json.dumps({'index': 1}),settings_str=settings_str) # Act status_type = StatusType(title='Status') status_type.to_native(column_value) format = status_type.to_native('In Progress') # Assert eq_(format,'In Progress') def test_should_suceed_when_to_native_return_enum_value_when_pass_str_and_status_type_has_enum_value_to_status_type(): # Arrange settings_str = json.dumps({'labels': {'0': 'Ready','1':'In Progress','2': 'Done'}}) column_value = cv.create_column_value(ColumnType.status, id='status', title='Status 1',value=json.dumps({'index': 2}),settings_str=settings_str) # Act status_type = StatusType(title='Status',as_enum=Status) status_type.to_native(column_value) format = status_type.to_native('Done') # Assert eq_(format,Status.done) def test_should_suceed_when_to_native_return_enum_value_when_pass_enum_value_and_status_type_has_enum_value_to_status_type(): # Arrange settings_str = json.dumps({'labels': {'0': 'Ready','1':'In Progress','2': 'Done'}}) column_value = cv.create_column_value(ColumnType.status, id='status', title='Status 1',value=json.dumps({'index': 2}),settings_str=settings_str) # Act status_type = StatusType(title='Status',as_enum=Status) status_type.to_native(column_value) format = status_type.to_native(Status.done) # Assert eq_(format,Status.done) @raises(ConversionError) def test_should_succeed_when_to_native_raises_a_conversionerror_when_passed_either_an_invalid_int_or_str_to_status_type(): # Arrange settings_str = json.dumps({'labels': {'0': 'Ready','1':'In Progress','2': 'Done'}}) column_value = cv.create_column_value(ColumnType.status, id='status', title='Status 1',value=json.dumps({'index': 1}),settings_str=settings_str) status_type = StatusType(title='Status') status_type.to_native(column_value) # Act status_type.to_native('Not Done') @raises(ConversionError) def test_should_succeed_when_to_native_raises_a_conversionerror_when_passed_invalid_value_with_enum_class_to_status_type(): # Arrange settings_str = json.dumps({'labels': {'0': 'Ready','1':'In Progress','2': 'Done'}}) column_value = cv.create_column_value(ColumnType.status, id='status', title='Status 1',value=json.dumps({'index': 1}),settings_str=settings_str) status_type = StatusType(title='Status',as_enum=Status) status_type.to_native(column_value) # Act status_type.to_native('Not Done') def test_should_succeed_when_to_primitive_returns_empty_dict_when_passed_in_a_none_to_status_type(): settings_str = json.dumps({'labels': {'0': 'Ready','1':'In Progress','2': 'Done'}}) column_value = cv.create_column_value(ColumnType.status, id='status', title='Status 1',value=json.dumps({'index': 1}),settings_str=settings_str) # Act status_type = StatusType(title='Status',as_enum=Status) status_type.to_native(column_value) format = status_type.to_primitive(None) # Assert eq_(format,{}) def test_should_succeed_when_to_primitive_returns_export_dict_when_passed_in_a_str_value_to_status_type(): settings_str = json.dumps({'labels': {'0': 'Ready','1':'In Progress','2': 'Done'}}) column_value = cv.create_column_value(ColumnType.status, id='status', title='Status 1',value=json.dumps({'index': 1}),settings_str=settings_str) # Act status_type = StatusType(title='Status',as_enum=Status) status_type.to_native(column_value) format = status_type.to_primitive("Done") # Assert eq_(format,{'index': 2}) def test_should_succeed_when_to_primitive_returns_export_dict_when_passed_in_a_str_or_enum_class_value__to_status_type(): settings_str = json.dumps({'labels': {'0': 'Ready','1':'In Progress','2': 'Done'}}) column_value = cv.create_column_value(ColumnType.status, id='status', title='Status 1',value=json.dumps({'index': 1}),settings_str=settings_str) # Act status_type = StatusType(title='Status',as_enum=Status) status_type.to_native(column_value) format = status_type.to_primitive(Status.done) # Assert eq_(format,{'index': 2})
107752
import pytorch_lightning as pl import torch.nn as nn from loguru import logger from torch.optim import Adam from torchnlp.datasets import imdb_dataset # type: ignore from slp.data.collators import SequenceClassificationCollator from slp.modules.classifier import RNNTokenSequenceClassifier from slp.plbind.dm import PLDataModuleFromCorpus from slp.plbind.helpers import FromLogits from slp.plbind.module import RnnPLModule from slp.plbind.trainer import make_trainer, watch_model from slp.util.log import configure_logging MAX_LENGTH = 1024 collate_fn = SequenceClassificationCollator(device="cpu", max_length=MAX_LENGTH) # collate_fn = SequenceClassificationCollator(device="cpu") if __name__ == "__main__": pl.utilities.seed.seed_everything(seed=42) EXPERIMENT_NAME = "imdb-words-sentiment-classification" configure_logging(f"logs/{EXPERIMENT_NAME}") train, test = imdb_dataset(directory="./data/", train=True, test=True) raw_train = [d["text"] for d in train] labels_train = [d["sentiment"] for d in train] raw_test = [d["text"] for d in test] labels_test = [d["sentiment"] for d in test] ldm = PLDataModuleFromCorpus( raw_train, labels_train, test=raw_test, test_labels=labels_test, batch_size=64, batch_size_eval=32, collate_fn=collate_fn, pin_memory=True, num_workers=1, tokens="words", embeddings_file="./cache/glove.6B.50d.txt", embeddings_dim=50, lower=True, max_length=MAX_LENGTH, limit_vocab_size=-1, lang="en_core_web_md", ) ldm.setup() model = RNNTokenSequenceClassifier( 3, embeddings=ldm.embeddings, bidirectional=True, merge_bi="sum", finetune_embeddings=True, attention=True, nystrom=True, num_landmarks=32, num_heads=2, ) optimizer = Adam([p for p in model.parameters() if p.requires_grad], lr=1e-3) criterion = nn.CrossEntropyLoss() lm = RnnPLModule( model, optimizer, criterion, metrics={"acc": FromLogits(pl.metrics.classification.Accuracy())}, ) trainer = make_trainer( EXPERIMENT_NAME, max_epochs=100, gpus=1, save_top_k=1, ) watch_model(trainer, model) trainer.fit(lm, datamodule=ldm) trainer.test(ckpt_path="best", test_dataloaders=ldm.test_dataloader())
107760
from unetgan.train_big import train_ubiggan from unetgan.test_model import test_model from unetgan.test_single import test_single if __name__ == "__main__": # Only for 100k iters train_ubiggan(train_path=".", latent_dim=140, num_epochs=80) # test_model() # test_single()
107776
from .mplayer_pool import MplayerPool from .mplayer_pool import ManagedMplayer from .gstreamer_pool import GstreamerPool from .gstreamer_pool import ManagedGstreamer from .director_media_bridge import DirectorMediaBridge from .mplayer_pool import DEFAULT_ARGS, SRV_QUERY, ROS_NODE_NAME
107800
import re from emoji.unicode_codes import UNICODE_EMOJI from nonebot import on_regex from nonebot.params import RegexDict from nonebot.plugin import PluginMetadata from nonebot.adapters.onebot.v11 import MessageSegment from .config import Config from .data_source import mix_emoji __plugin_meta__ = PluginMetadata( name="emoji合成", description="将两个emoji合成为一张图片", usage="{emoji1}+{emoji2},如:😎+😁", config=Config, extra={ "unique_name": "emojimix", "example": "😎+😁", "author": "meetwq <<EMAIL>>", "version": "0.1.7", }, ) emojis = filter(lambda e: len(e) == 1, UNICODE_EMOJI["en"]) pattern = "(" + "|".join(re.escape(e) for e in emojis) + ")" emojimix = on_regex( rf"^\s*(?P<code1>{pattern})\s*\+\s*(?P<code2>{pattern})\s*$", block=True, priority=13, ) @emojimix.handle() async def _(msg: dict = RegexDict()): emoji_code1 = msg["code1"] emoji_code2 = msg["code2"] result = await mix_emoji(emoji_code1, emoji_code2) if isinstance(result, str): await emojimix.finish(result) else: await emojimix.finish(MessageSegment.image(result))
107809
import numpy as np from tensorflow import keras from tensorflow.keras import backend as K class WarmUpLearningRateScheduler(keras.callbacks.Callback): """Warmup learning rate scheduler """ def __init__(self, warmup_batches, init_lr, verbose=0): """Constructor for warmup learning rate scheduler Arguments: warmup_batches {int} -- Number of batch for warmup. init_lr {float} -- Learning rate after warmup. Keyword Arguments: verbose {int} -- 0: quiet, 1: update messages. (default: {0}) """ super(WarmUpLearningRateScheduler, self).__init__() self.warmup_batches = warmup_batches self.init_lr = init_lr self.verbose = verbose self.batch_count = 0 self.learning_rates = [] def on_batch_end(self, batch, logs=None): self.batch_count = self.batch_count + 1 lr = K.get_value(self.model.optimizer.lr) self.learning_rates.append(lr) def on_batch_begin(self, batch, logs=None): if self.batch_count <= self.warmup_batches: lr = self.batch_count*self.init_lr/self.warmup_batches K.set_value(self.model.optimizer.lr, lr) if self.verbose > 0: print('\nBatch %05d: WarmUpLearningRateScheduler setting learning ' 'rate to %s.' % (self.batch_count + 1, lr)) if __name__ == '__main__': from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense # Create a model. model = Sequential() model.add(Dense(32, activation='relu', input_dim=100)) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) # Number of training samples. sample_count = 12 # Total epochs to train. epochs = 7 # Number of warmup epochs. warmup_epoch = 5 # Training batch size, set small value here for demonstration purpose. batch_size = 4 # Generate dummy data. data = np.random.random((sample_count, 100)) labels = np.random.randint(10, size=(sample_count, 1)) # Convert labels to categorical one-hot encoding. one_hot_labels = keras.utils.to_categorical(labels, num_classes=10) # Compute the number of warmup batches. warmup_batches = warmup_epoch * sample_count / batch_size # Create the Learning rate scheduler. warm_up_lr = WarmUpLearningRateScheduler(warmup_batches, init_lr=0.001) # Train the model, iterating on the data in batches of 32 samples model.fit(data, one_hot_labels, epochs=epochs, batch_size=batch_size, verbose=0, callbacks=[warm_up_lr])
107840
from __future__ import absolute_import, division, print_function from concurrent.futures import Future from dask import delayed from daskos.delayed import MesosDelayed, MesosDelayedLeaf, mesos from daskos.utils import key_split from satyr.proxies.messages import Cpus, Disk, Mem # tests are not modules, so these are not picklable @mesos(cpus=0.1, mem=256, docker='test1') def add(x, y): return x + y @mesos(cpus=0.2, mem=128, docker='test2') def mul(x, y): return x * y add_params = {'docker': 'test1', 'force_pull': False, 'resources': [Cpus(0.1), Mem(256), Disk(0)], 'envs': {}, 'uris': []} mul_params = {'docker': 'test2', 'force_pull': False, 'resources': [Cpus(0.2), Mem(128), Disk(0)], 'envs': {}, 'uris': []} def test_mesos_is_delayed(): def add(x, y): return x + y add1 = delayed(add) add2 = mesos(add) assert isinstance(add2, add1.__class__) assert add1(2, 3).compute() == add2(2, 3).compute() def test_mesos_delayed_types(): s = add(1, 2) m = mul(s, 10) z = add(s, m) assert isinstance(add, MesosDelayedLeaf) assert isinstance(mul, MesosDelayedLeaf) assert isinstance(s, MesosDelayed) assert isinstance(m, MesosDelayed) assert isinstance(z, MesosDelayed) def test_mesos_delayed_params(): s = add(1, 2) m = mul(s, 10) z = add(s, m) assert add.params[add.key] == add_params assert mul.params[mul.key] == mul_params for d in [s, m, z]: assert d.dask.keys() == d.params.keys() for key, params in d.params.items(): fn = key_split(key) if fn == 'add': assert params == add_params elif fn == 'mul': assert params == mul_params def test_mesos_delayed_compute(): @mesos(cpus=0.1, mem=128) def add(x, y): return x + y @mesos(cpus=0.11, mem=129) def mul(x, y): return x * y s = add(1, 2) m = mul(s, 10) z = add(s, m) w = add(s, s + 2) # s + 2 calculated in threads assert z.compute() == 33 assert mul(s, z).compute() == 99 assert w.compute() == 8 def test_mesos_executor_async_compute(executor): @mesos(cpus=0.11, mem=128) def add(x, y): return x + y @mesos(cpus=0.1, mem=129) def mul(x, y): return x * y s = add(1, 2) m = mul(s, 10) z = add(s, m) w = add(s, s + 2) # s + 2 calculated in threads f = executor.compute(z) assert isinstance(f, Future) assert f.result() == 33 assert executor.submit.call_count == 3 fs = executor.compute([z, m, s]) assert isinstance(fs, list) assert all([isinstance(r, Future) for r in fs]) assert [r.result() for r in fs] == [33, 30, 3] assert executor.submit.call_count == 6 f = executor.compute(w) assert isinstance(f, Future) assert f.result() == 8 assert executor.submit.call_count == 8 # inly incremented by 2 instead of 3 def test_mesos_executor_sync_compute(executor): @mesos(cpus=0.1, mem=128) def add(x, y): return x + y @mesos(cpus=0.2, mem=128) def mul(x, y): return x * y s = add(1, 2) m = mul(s, 10) z = add(s, m) f = executor.compute(z, sync=True) assert f == 33 fs = executor.compute([z, m, s], sync=True) assert fs == [33, 30, 3]
107860
from .convnet import convnet4 from .resnet import resnet12, resnet18, resnet24 from .resnet import resnet24 from .resnet import seresnet12 from .wresnet import wrn_28_10 from .resnet_standard import resnet50 model_pool = [ 'convnet4', 'resnet12', 'resnet18', 'resnet24', 'seresnet12', 'wrn_28_10', 'resnet50', ] model_dict = { 'wrn_28_10': wrn_28_10, 'convnet4': convnet4, 'resnet12': resnet12, 'resnet18': resnet18, 'resnet24': resnet24, 'seresnet12': seresnet12, 'resnet50': resnet50, }
107916
from email.mime.image import MIMEImage from email.utils import unquote from pathlib import Path from django.core.mail import EmailMessage, EmailMultiAlternatives, make_msgid from .utils import UNSET class AnymailMessageMixin(EmailMessage): """Mixin for EmailMessage that exposes Anymail features. Use of this mixin is optional. You can always just set Anymail attributes on any EmailMessage. (The mixin can be helpful with type checkers and other development tools that complain about accessing Anymail's added attributes on a regular EmailMessage.) """ def __init__(self, *args, **kwargs): self.esp_extra = kwargs.pop('esp_extra', UNSET) self.envelope_sender = kwargs.pop('envelope_sender', UNSET) self.metadata = kwargs.pop('metadata', UNSET) self.send_at = kwargs.pop('send_at', UNSET) self.tags = kwargs.pop('tags', UNSET) self.track_clicks = kwargs.pop('track_clicks', UNSET) self.track_opens = kwargs.pop('track_opens', UNSET) self.template_id = kwargs.pop('template_id', UNSET) self.merge_data = kwargs.pop('merge_data', UNSET) self.merge_global_data = kwargs.pop('merge_global_data', UNSET) self.merge_metadata = kwargs.pop('merge_metadata', UNSET) self.anymail_status = AnymailStatus() super().__init__(*args, **kwargs) def attach_inline_image_file(self, path, subtype=None, idstring="img", domain=None): """Add inline image from file path to an EmailMessage, and return its content id""" assert isinstance(self, EmailMessage) return attach_inline_image_file(self, path, subtype, idstring, domain) def attach_inline_image(self, content, filename=None, subtype=None, idstring="img", domain=None): """Add inline image and return its content id""" assert isinstance(self, EmailMessage) return attach_inline_image(self, content, filename, subtype, idstring, domain) class AnymailMessage(AnymailMessageMixin, EmailMultiAlternatives): pass def attach_inline_image_file(message, path, subtype=None, idstring="img", domain=None): """Add inline image from file path to an EmailMessage, and return its content id""" pathobj = Path(path) filename = pathobj.name content = pathobj.read_bytes() return attach_inline_image(message, content, filename, subtype, idstring, domain) def attach_inline_image(message, content, filename=None, subtype=None, idstring="img", domain=None): """Add inline image to an EmailMessage, and return its content id""" if domain is None: # Avoid defaulting to hostname that might end in '.com', because some ESPs # use Content-ID as filename, and Gmail blocks filenames ending in '.com'. domain = 'inline' # valid domain for a msgid; will never be a real TLD content_id = make_msgid(idstring, domain) # Content ID per RFC 2045 section 7 (with <...>) image = MIMEImage(content, subtype) image.add_header('Content-Disposition', 'inline', filename=filename) image.add_header('Content-ID', content_id) message.attach(image) return unquote(content_id) # Without <...>, for use as the <img> tag src ANYMAIL_STATUSES = [ 'sent', # the ESP has sent the message (though it may or may not get delivered) 'queued', # the ESP will try to send the message later 'invalid', # the recipient email was not valid 'rejected', # the recipient is blacklisted 'failed', # the attempt to send failed for some other reason 'unknown', # anything else ] class AnymailRecipientStatus: """Information about an EmailMessage's send status for a single recipient""" def __init__(self, message_id, status): try: # message_id must be something that can be put in a set # (see AnymailStatus.set_recipient_status) set([message_id]) except TypeError: raise TypeError("Invalid message_id %r is not scalar type" % message_id) if status is not None and status not in ANYMAIL_STATUSES: raise ValueError("Invalid status %r" % status) self.message_id = message_id # ESP message id self.status = status # one of ANYMAIL_STATUSES, or None for not yet sent to ESP def __repr__(self): return "AnymailRecipientStatus({message_id!r}, {status!r})".format( message_id=self.message_id, status=self.status) class AnymailStatus: """Information about an EmailMessage's send status for all recipients""" def __init__(self): self.message_id = None # set of ESP message ids across all recipients, or bare id if only one, or None self.status = None # set of ANYMAIL_STATUSES across all recipients, or None for not yet sent to ESP self.recipients = {} # per-recipient: { email: AnymailRecipientStatus, ... } self.esp_response = None def __repr__(self): def _repr(o): if isinstance(o, set): # force sorted order, for reproducible testing item_reprs = [repr(item) for item in sorted(o)] return "{%s}" % ", ".join(item_reprs) else: return repr(o) details = ["status={status}".format(status=_repr(self.status))] if self.message_id: details.append("message_id={message_id}".format(message_id=_repr(self.message_id))) if self.recipients: details.append("{num_recipients} recipients".format(num_recipients=len(self.recipients))) return "AnymailStatus<{details}>".format(details=", ".join(details)) def set_recipient_status(self, recipients): self.recipients.update(recipients) recipient_statuses = self.recipients.values() self.message_id = set([recipient.message_id for recipient in recipient_statuses]) if len(self.message_id) == 1: self.message_id = self.message_id.pop() # de-set-ify if single message_id self.status = set([recipient.status for recipient in recipient_statuses])
107941
import pandas as pd import sys pgefile=sys.argv[1] df = pd.read_csv(pgefile, delim_whitespace=True) df2 = df[sys.argv[2:]] pge = df2.groupby("problem") for name, grp in pge: # print(name) # print(grp) ac_t = grp["elapsed_seconds"].iloc[2]/grp["elapsed_seconds"].iloc[0] ac_m = grp["evald_models"].iloc[2]/grp["evald_models"].iloc[0] ac_r = ac_m / ac_t bd_t = grp["elapsed_seconds"].iloc[3]/grp["elapsed_seconds"].iloc[1] bd_m = grp["evald_models"].iloc[3]/grp["evald_models"].iloc[1] bd_r = bd_m / bd_t fstr = "{:21s} & {:.2f} & {:.2f} & {:.2f} & {:.2f} & {:.2f} & {:.2f}" out = fstr.format(name, ac_t, ac_m, ac_r, bd_t, bd_m, bd_r) print(out) # print(name, ac_t) # print(name, ac_t, ac_m, ac_r, bd_t, bd_m, bd_r) print("\n\n") for name, grp in pge: # print(name) # print(grp) r2 = grp["best_r2"] fstr = "{:21s} & {:.3f} & {:.3f} & {:.3f} & {:.3f}" out = fstr.format(name, r2.iloc[0], r2.iloc[1], r2.iloc[2], r2.iloc[3]) print(out) print("\n\n") for name, grp in pge: # print(name) # print(grp) r2 = grp["ave_size"] fstr = "{:21s} & {:.0f} & {:.0f} & {:.0f} & {:.0f}" out = fstr.format(name, r2.iloc[0], r2.iloc[1], r2.iloc[2], r2.iloc[3]) print(out)
107956
import sqlite3 import psycopg2 from bs4 import BeautifulSoup import requests from sqlalchemy import create_engine # db engine import pandas as pd """ delays = [12, 3, 9, 21, 5, 6, 19, 7, 33, 11, 2, 17, 4] def get_random_ua(): random_ua = '' ua_file = 'ua_file.txt' try: with open(ua_file) as f: lines = f.readlines() if len(lines) > 0: while not random_ua.strip(): prng = np.random.RandomState() index = prng.permutation(len(lines) - 1) idx = np.asarray(index, dtype=np.integer)[0] random_proxy = lines[int(idx)] except Exception as ex: print('Exception in random_ua') print(str(ex)) finally: return random_ua url = "https://futa.edu.ng/" user_agent = get_random_ua() """ headers = { # # 'user-agent': user_agent, 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/56.0.2924.87 Safari/537.36', # 'referer': 'https://google.ng/', # # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', # # 'Accept-Encoding': 'gzip, deflate, br', # # 'Accept-Language': 'en-US,en;q=0.9', # # 'Pragma': 'no-cache', } # make a request with headers r = requests.get('https://covid19.ncdc.gov.ng', headers=headers, timeout=15) # print(r.status_code) # 200 for success content = BeautifulSoup(r.text, 'lxml') # parsing content My_table = content.find('table', {'id': 'custom1'}) # table to be scrapped having id as custom1 # links = My_table.findAll('b') # all cases data seems to be in b tags stately = My_table.findAll('td') # all state name seems to be in td tags # print(links) # save states data to list states = [] for state in stately: states.append(state.text.strip("\n")) # print(states) # get states with even indexes somes = [] for i in range(0, len(states), 5): somes.append(states[i]) # print("states") # print(somes) # print("Total number of affected states:", len(somes)+1) # save cases data to list all_cases = [] for i in range(0, len(states)): # checking condition if i % 5 != 0: all_cases.append(states[i]) # print("all cases") # print(all_cases) # save confirmed cases data to list confirmed_cases = [] for i in range(0, len(all_cases), 4): confirmed_cases.append(all_cases[i]) # print("confirmed") # print(confirmed_cases) # save cases data to list admitted_cases = [] for i in range(1, len(all_cases), 4): admitted_cases.append(all_cases[i]) # print("admitted") # print(admitted_cases) # save discharged cases data to list discharged_cases = [] for i in range(2, len(all_cases), 4): discharged_cases.append(all_cases[i]) # print("recovered") # print(discharged_cases) # save deaths data to list deaths = [] for i in range(3, len(all_cases), 4): deaths.append(all_cases[i]) # print("deaths") # print(deaths) # take data to pandas dataframe df = pd.DataFrame() df['States'] = somes df['No_of_cases'] = confirmed_cases df['No_on_admission'] = admitted_cases df['No_discharged'] = discharged_cases df['No_of_deaths'] = deaths print('Dataframe\n', df) # # # save data to csv df.to_csv(r'ncovid.csv', index=True, index_label='id') print("SUCCESS!!!") # # from sqlalchemy import create_engine # mysql engine # engine = create_engine('mysql+pymysql://root:@localhost/ncovid') # sqlite engine # engine = sqlite3.connect(r"C:\Users\USER\Desktop\ncovid-19-api\api\db.sqlite3") # connections for mysql # con = MySQLdb.connect(host="localhost", user="root", # passwd="", db="ncovid") # add postgres db engine engine = create_engine('postgresql+psycopg2://postgres:mastersam@localhost/ncovid') # # adding df to tables df.to_sql(con=engine, name='data', if_exists='replace', index=True, index_label='id') # print('Data transferred from df to postgresql successfully!!!') # checking the data # print('checking the data...') # conn = psycopg2.connect(host="localhost", database="ncovid", user="postgres", password="<PASSWORD>") # cur = conn.cursor() # cur.execute("SELECT * FROM confirmed") # # rows = cur.fetchall() # # for row in rows: # print(row) # print('Done checking confirmed\nNow checking data!!!') # # cur.execute("SELECT * FROM data") # # rows = cur.fetchall() # # for row in rows: # print(row) # print('Done checking data!!!') # # conn.close()
107968
def betweenness_centrality(G, k=None, normalized=True, weight=None, endpoints=False, seed=None): # doesn't currently support `weight`, `k`, `endpoints`, `seed` query = """\ CALL gds.betweenness.stream({ nodeProjection: $node_label, relationshipProjection: { relType: { type: $relationship_type, orientation: $direction, properties: {} } } }) YIELD nodeId, score RETURN gds.util.asNode(nodeId).%s AS node, score ORDER BY node ASC """ % G.identifier_property params = G.base_params() with G.driver.session() as session: result = {row["node"]: row["score"] for row in session.run(query, params)} return result def closeness_centrality(G, u=None, distance=None, wf_improved=True, reverse=False): # doesn't currently supported `distance`, `reverse`, `wf_improved` query = """\ CALL gds.alpha.closeness.stream({ nodeProjection: $node_label, relationshipProjection: { relType: { type: $relationship_type, orientation: $direction, properties: {} } }}) YIELD nodeId, centrality RETURN gds.util.asNode(nodeId).%s AS node, centrality ORDER BY node ASC """ % G.identifier_property params = G.base_params() with G.driver.session() as session: result = {row["node"]: row["centrality"] for row in session.run(query, params)} if u: return result[u] return result def pagerank(G, alpha=0.85, personalization=None, max_iter=100, tol=1.0e-8, nstart=None, weight='weight'): # doesn't currently supported `personalization`, `tol`, `nstart`, `weight` query = """\ CALL gds.pageRank.stream({ nodeProjection: $node_label, relationshipProjection: { relType: { type: $relationship_type, orientation: $direction, properties: {} } }, relationshipWeightProperty: null, dampingFactor: $dampingFactor, maxIterations: $iterations }) YIELD nodeId, score WITH gds.util.asNode(nodeId).%s AS node, score RETURN node, score """ % G.identifier_property params = G.base_params() params["iterations"] = max_iter params["dampingFactor"] = alpha with G.driver.session() as session: result = {row["node"]: row["score"] for row in session.run(query, params)} return result
108001
from flask import Flask from flask import render_template from flask import request import json import dbconfig if dbconfig.test: from mockdbhelper import MockDBHelper as DBHelper else: from dbhelper import DBHelper app = Flask(__name__) DB = DBHelper() @app.route("/") def home(): crimes = DB.get_all_crimes() crimes = json.dumps(crimes) return render_template("home.html", crimes=crimes) @app.route("/submitcrime", methods=['POST']) def submitcrime(): category = request.form.get("category") date = request.form.get("date") latitude = float(request.form.get("latitude")) longitude = float(request.form.get("longitude")) description = request.form.get("description") DB.add_crime(category, date, latitude, longitude, description) return home() if __name__ == '__main__': app.run(debug=True)
108056
event_aliases = { 'halloween 2020': 1, 'candy': 2, 'swimsuits 2020': 3, 'maids': 5, 'christmas 2020': 6, 'countdown': 7, 'monster hunter pt1': 9, 'mh1': 9, 'monster hunter pt2': 10, 'mh2': 10, }
108060
import os import logging DOCKER_PID_CMD = "docker inspect {} --format='{{{{.State.Pid}}}}'" NSS_CMD = "lsns -p {} -t pid | tail -n 1 | awk '{{print $1}}'" def replace_namespace(text, args): nss = None text = text.replace("SAVE_NAMESPACE", """ struct task_struct *t = (struct task_struct *) bpf_get_current_task(); key.namespace = t->nsproxy->pid_ns_for_children->ns.inum; """) if args.container: try: pid_response = os.popen(DOCKER_PID_CMD.format(args.container)) pid = int(pid_response.read().strip()) nss_response = os.popen(NSS_CMD.format(pid)) nss = int(nss_response.read().strip()) except ValueError: msg = "Coulnd't get namespace for container %" logging.exception(msg, args.container) nss = None if args.namespace and not nss: nss = args.namespace if not nss: text = text.replace("CHECK_NAMESPACE", "") return text # starting from 4.18 it's possible to use cgroup_id: # key->cgroup_id = bpf_get_current_cgroup_id(); text = text.replace("CHECK_NAMESPACE", """ if (key.namespace != {}) return 0; """.format(nss)) return text traditional = [ (1024 ** 5, 'P'), (1024 ** 4, 'T'), (1024 ** 3, 'G'), (1024 ** 2, 'M'), (1024 ** 1, 'K'), (1024 ** 0, 'B'), ] def size(size_in_bytes, system=None): """Human-readable file size. Using the traditional system, where a factor of 1024 is used:: >>> size(10) '10B' >>> size(100) '100B' >>> size(1000) '1000B' >>> size(2000) '1K' >>> size(10000) '9K' >>> size(20000) '19K' >>> size(100000) '97K' >>> size(200000) '195K' >>> size(1000000) '976K' >>> size(2000000) '1M' Using the SI system, with a factor 1000:: >>> size(10, system=si) '10B' >>> size(100, system=si) '100B' >>> size(1000, system=si) '1K' >>> size(2000, system=si) '2K' >>> size(10000, system=si) '10K' >>> size(20000, system=si) '20K' >>> size(100000, system=si) '100K' >>> size(200000, system=si) '200K' >>> size(1000000, system=si) '1M' >>> size(2000000, system=si) '2M' """ system = system or traditional for factor, suffix in system: if size_in_bytes >= factor: break amount = int(size_in_bytes/factor) if isinstance(suffix, tuple): singular, multiple = suffix if amount == 1: suffix = singular else: suffix = multiple return str(amount) + suffix
108073
import pickle import base64 from flask import Flask, request app = Flask(__name__) @app.route("/") def index(): try: user = base64.b64decode(request.cookies.get('user')) user = pickle.loads(user) username = user["username"] except: username = "Guest" return "Hello %s" % username if __name__ == "__main__": app.run()
108112
import os import faker import pandas as pd from django.test import TestCase from django_datajsonar.models import Node from elasticsearch_dsl.connections import connections from series_tiempo_ar_api.apps.dump.generator import constants from series_tiempo_ar_api.apps.dump.generator.dta import DtaGenerator from series_tiempo_ar_api.apps.dump.models import GenerateDumpTask, DumpFile from series_tiempo_ar_api.apps.dump.tasks import enqueue_write_csv_task from series_tiempo_ar_api.libs.utils.utils import index_catalog samples_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'samples') fake = faker.Faker() class DtaGeneratorTests(TestCase): index = fake.pystr(max_chars=50).lower() @classmethod def setUpClass(cls): index_catalog('test_catalog', os.path.join(samples_dir, 'distribution_daily_periodicity.json'), cls.index) enqueue_write_csv_task() super(DtaGeneratorTests, cls).setUpClass() def test_generate_dta_no_csv_loaded(self): node = Node.objects.create(catalog_id="empty_catalog", catalog_url="test.com", indexable=True) task = GenerateDumpTask.objects.create() DtaGenerator(task.id).generate() self.assertFalse(DumpFile.objects.filter(node=node, file_type=DumpFile.TYPE_DTA)) def test_generate_dta(self): task = GenerateDumpTask.objects.create() DtaGenerator(task.id).generate() csv = DumpFile.objects.get(file_type=DumpFile.TYPE_CSV, file_name=DumpFile.FILENAME_VALUES, node=None).file rows_len = len(pd.read_csv(csv)) stata = DumpFile.objects.get(file_type=DumpFile.TYPE_DTA, file_name=DumpFile.FILENAME_VALUES, node=None).file self.assertGreater(rows_len, 0) self.assertEqual(rows_len, len(pd.read_stata(stata))) def test_values_dta_columns(self): task = GenerateDumpTask.objects.create() DtaGenerator(task.id).generate() stata = DumpFile.objects.get(file_type=DumpFile.TYPE_DTA, file_name=DumpFile.FILENAME_VALUES, node=None).file df = pd.read_stata(stata) self.assertListEqual(list(df.columns), constants.STATA_VALUES_COLS) @classmethod def tearDownClass(cls): connections.get_connection().indices.delete(cls.index) super(DtaGeneratorTests, cls).tearDownClass()
108157
from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible_collections.community.general.tests.unit.compat import unittest from ansible_collections.manala.roles.plugins.filter.users_groups import users_groups from ansible.errors import AnsibleFilterError class Test(unittest.TestCase): def test_not_list(self): with self.assertRaises(AnsibleFilterError) as error: users_groups(NotImplemented) self.assertEqual("Expected an iterable but was a <class 'NotImplementedType'>", str(error.exception)) def test_not_groups_list(self): with self.assertRaises(AnsibleFilterError) as error: users_groups([], NotImplemented) self.assertEqual("Expected a groups iterable but was a <class 'NotImplementedType'>", str(error.exception)) def test_skipped(self): self.assertListEqual([ {'user': 'foo'}, ], users_groups([ {'user': 'foo'}, ], [ {'skipped': True}, ])) def test(self): self.assertListEqual([ {'user': 'foo', 'group': 'foo'}, {'user': 'bar', 'group': 'bar'}, ], users_groups([ {'user': 'foo'}, {'user': 'bar', 'group': 'baz'}, ], [ {'item': {'user': 'foo'}, 'stdout': 'foo'}, {'item': {'user': 'bar'}, 'stdout': 'bar'}, ]))
108185
import qcore from qcore.asserts import AssertRaises class Foo(metaclass=qcore.DisallowInheritance): pass def test_disallow_inheritance(): with AssertRaises(TypeError): class Bar(Foo): pass
108206
from .decorators import run_only_once from .directives import GraphQLCostDirective, schema_with_cost_directive, cost_directive_source_doc from .execution import ExtendedExecutionContext from .utilities import build_schema_with_cost __version__ = "0.4.0" __all__ = [ "run_only_once", "ExtendedExecutionContext", "GraphQLCostDirective", "schema_with_cost_directive", "cost_directive_source_doc", "build_schema_with_cost" ]
108222
from django.contrib import admin from .models import Board, List, Item, Label, Comment, Attachment, Notification admin.site.register(Board) admin.site.register(List) admin.site.register(Item) admin.site.register(Label) admin.site.register(Comment) admin.site.register(Attachment) admin.site.register(Notification)
108225
from random import choice NETWORKING_AGENT_FILENAME = "networking/user-agents" agents = None def _get_agents(): global agents if agents is None: with open(NETWORKING_AGENT_FILENAME) as user_agents: agents = [x.strip() for x in user_agents.readlines()] return agents def get_agent(): return choice(_get_agents())
108228
from django.test import TestCase from django.contrib.auth import get_user_model from django.test import Client from suite.views import ClubCreate from django.urls import reverse from suite.models import Club class View_Club_Search_TestCase(TestCase): def setUp(self): self.client = Client() self.club=Club.objects.create(club_name="Cool club",club_type="PUB",club_description="a club") def test_get_login(self): self.client.force_login(get_user_model().objects.get_or_create(first_name='testuser')[0]) response = self.client.get(reverse('suite:club_search')) self.assertEqual(response.status_code,200) def test_get_not_logged_in(self): response = self.client.get(reverse('suite:club_search')) self.assertRedirects(response, "/?next="+reverse('suite:club_search'), 302,200) def test_post(self): self.client.force_login(get_user_model().objects.get_or_create(first_name='testuser')[0]) data = {'keyword':"club"} response = self.client.post(reverse('suite:club_search'),data, follow=True) self.assertContains(response,"Cool club") def test_post_no_club(self): self.client.force_login(get_user_model().objects.get_or_create(first_name='testuser')[0]) # no club name data = {'keyword':"test"} response = self.client.post(reverse('suite:club_search'),data, follow=True) self.assertEqual(response.status_code,200) self.assertNotContains(response,"Cool club")
108280
import copy import numpy as np class Objective(): pass class MeanSquaredError(): def calc_acc(self,y_hat,y): return 0 def calc_loss(self,y_hat,y): loss = np.mean(np.sum(np.power(y_hat-y,2),axis=1)) return 0.5*loss def backward(self,y_hat,y): return y_hat-y class MeanAbsoluteError(Objective): # def __init__(self): # super(MeanAbsoluteError, self).__init__('linear') def calc_acc(self,y_hat,y): return 0 def calc_loss(self, y_hat, y): return np.mean(np.sum(np.absolute(y_hat - y), axis=1)).tolist() def backward(self, y_hat, y): pos=np.where((y_hat-y)<0) mask=np.ones_like(y_hat) mask[pos]=-1 return mask class BinaryCrossEntropy(Objective): # def __init__(self): # super(BinaryCrossEntropy, self).__init__('sigmoid') def calc_acc(self,y_hat,y): y_pred = y_hat >= 0.5 acc = np.mean(y_pred == y).tolist() return acc def calc_loss(self,y_hat,y): loss=-np.multiply(y,np.log(y_hat))-np.multiply(1-y,np.log(1-y_hat)) return np.mean(np.sum(loss,axis=1)).tolist() def backward(self,y_hat,y): avg = np.prod(np.asarray(y_hat.shape[:-1])) return (np.divide(1-y,1-y_hat)-np.divide(y,y_hat))/avg class SparseCategoricalCrossEntropy(Objective): def calc_acc(self,y_hat,y): acc = (np.argmax(y_hat, axis=-1) == np.argmax(y, axis=-1)) acc = np.mean(acc).tolist() return acc def calc_loss(self,y_hat,y): avg=np.prod(np.asarray(y_hat.shape[:-1])) loss=-np.sum(np.multiply(y,np.log(y_hat)))/avg return loss.tolist() def backward(self,y_hat,y_true): avg = np.prod(np.asarray(y_hat.shape[:-1])) return (y_hat-y_true)/avg class CategoricalCrossEntropy(Objective): def calc_acc(self,y_hat,y): acc = (np.argmax(y_hat, axis=-1) == y) acc = np.mean(acc).tolist() return acc def calc_loss(self,y_hat,y_true): to_sum_dim=np.prod(y_hat.shape[:-1]) last_dim=y_hat.shape[-1] N=y_hat.shape[0] probs=y_hat.reshape(-1,last_dim) y_flat=y_true.reshape(to_sum_dim) loss = -np.sum(np.log(probs[np.arange(to_sum_dim), y_flat])) / N return loss # to_sum_shape=np.asarray(y_hat.shape[:-1]) # avg=np.prod(to_sum_shape) # idx=[] # for s in to_sum_shape: # idx.append(np.arange(s).tolist()) # idx.append(y.flatten().tolist()) # # loss=-np.sum(np.log(y_hat[idx]))/avg # return loss.tolist() def backward(self,y_hat,y_true): # to_sum_shape = np.asarray(y_hat.shape[:-1]) # avg = np.prod(to_sum_shape) # idx = [] # for s in to_sum_shape: # idx.append(np.arange(s).tolist()) # idx.append(y_true.flatten().tolist()) # # y_hat[idx]-=1 # return y_hat/avg to_sum_dim=np.prod(y_hat.shape[:-1]) last_dim=y_hat.shape[-1] N=y_hat.shape[0] probs=y_hat.reshape(-1,last_dim) y_flat = y_true.reshape(to_sum_dim) probs[np.arange(to_sum_dim), y_flat] -= 1 probs/=N output=probs.reshape(y_hat.shape) return output def get_objective(objective): if objective.__class__.__name__=='str': objective=objective.lower() if objective in['categoricalcrossentropy','categorical_crossentropy','categorical_cross_entropy']: return CategoricalCrossEntropy() elif objective in['sparsecategoricalcrossentropy','sparse_categorical_crossentropy','sparse_categorical_cross_entropy']: return SparseCategoricalCrossEntropy() elif objective in ['binarycrossentropy','binary_cross_entropy','binary_crossentropy']: return BinaryCrossEntropy() elif objective in ['meansquarederror','mean_squared_error','mse']: return MeanSquaredError() elif objective in ['meanabsoluteerror','mean_absolute_error','mae']: return MeanAbsoluteError() elif isinstance(objective,Objective): return copy.deepcopy(objective) else: raise ValueError('unknown objective type!')
108330
from torchtext import data from torch.utils.data import DataLoader from graph import MTBatcher, get_mt_dataset, MTDataset, DocumentMTDataset from modules import make_translation_model from optim import get_wrapper from loss import LabelSmoothing import numpy as np import torch as th import torch.optim as optim import argparse import yaml import os def run(proc_id, n_gpus, devices, config, checkpoint): th.manual_seed(config['seed']) np.random.seed(config['seed']) th.cuda.manual_seed_all(config['seed']) dev_id = devices[proc_id] if n_gpus > 1: dist_init_method = 'tcp://{master_ip}:{master_port}'.format( master_ip='127.0.0.1', master_port='12345') world_size = n_gpus th.distributed.init_process_group(backend="nccl", init_method=dist_init_method, world_size=world_size, rank=dev_id) _dataset = config['dataset'] grad_accum = config['grad_accum'] if _dataset == 'iwslt': TEXT = [data.Field(batch_first=True) for _ in range(2)] dataset = get_mt_dataset('iwslt') train, dev, test = dataset.splits(exts=('.tc.zh', '.tc.en'), fields=TEXT, root='./data') train = DocumentMTDataset(train, context_length=config['context_len'], part=(proc_id, n_gpus)) dev = DocumentMTDataset(dev, context_length=config['context_len']) test = DocumentMTDataset(test, context_length=config['context_len']) vocab_zh, vocab_en = dataset.load_vocab(root='./data') print('vocab size: ', len(vocab_zh), len(vocab_en)) vocab_sizes = [len(vocab_zh), len(vocab_en)] TEXT[0].vocab = vocab_zh TEXT[1].vocab = vocab_en batcher = MTBatcher(TEXT, graph_type=config['graph_type'], **config.get('graph_attrs', {})) train_loader = DataLoader(dataset=train, batch_size=config['batch_size'] // n_gpus, collate_fn=batcher, shuffle=True, num_workers=6) dev_loader = DataLoader(dataset=dev, batch_size=config['dev_batch_size'], collate_fn=batcher, shuffle=False) test_loader = DataLoader(dataset=test, batch_size=config['dev_batch_size'], collate_fn=batcher, shuffle=False) elif _dataset == 'wmt': TEXT = data.Field(batch_first=True) dataset = get_mt_dataset('wmt14') train, dev, test = dataset.splits(exts=['.en', '.de'], fields=[TEXT, TEXT], root='./data') train = MTDataset(train, part=(proc_id, n_gpus)) dev = MTDataset(dev) test = MTDataset(test) vocab = dataset.load_vocab(root='./data')[0] print('vocab size: ', len(vocab)) vocab_sizes = [len(vocab)] TEXT.vocab = vocab batcher = MTBatcher(TEXT, graph_type=config['graph_type'], **config.get('graph_attrs', {})) train_loader = DataLoader(dataset=train, batch_size=config['batch_size'] // n_gpus, collate_fn=batcher, shuffle=True, num_workers=6) dev_loader = DataLoader(dataset=dev, batch_size=config['dev_batch_size'], collate_fn=batcher, shuffle=False) test_loader = DataLoader(dataset=test, batch_size=config['dev_batch_size'], collate_fn=batcher, shuffle=False) elif _dataset == 'multi': TEXT = [data.Field(batch_first=True) for _ in range(2)] dataset = get_mt_dataset('multi30k') train, dev, test = dataset.splits(exts=['.en.atok', '.de.atok'], fields=TEXT, root='./data') train = MTDataset(train, part=(proc_id, n_gpus)) dev = MTDataset(dev) test = MTDataset(test) vocab_en, vocab_de = dataset.load_vocab(root='./data') print('vocab size: ', len(vocab_en), len(vocab_de)) vocab_sizes = [len(vocab_en), len(vocab_de)] TEXT[0].vocab = vocab_en TEXT[1].vocab = vocab_de batcher = MTBatcher(TEXT, graph_type=config['graph_type'], **config.get('graph_attrs', {})) train_loader = DataLoader(dataset=train, batch_size=config['batch_size'] // n_gpus, collate_fn=batcher, shuffle=True, num_workers=6) dev_loader = DataLoader(dataset=dev, batch_size=config['dev_batch_size'], collate_fn=batcher, shuffle=False) test_loader = DataLoader(dataset=test, batch_size=config['dev_batch_size'], collate_fn=batcher, shuffle=False) dim_model = config['dim_model'] dim_ff = config['dim_ff'] num_heads = config['num_heads'] n_layers = config['n_layers'] m_layers = config['m_layers'] dropouti = config['dropouti'] dropouth = config['dropouth'] dropouta = config['dropouta'] dropoutc = config['dropoutc'] rel_pos = config['rel_pos'] model = make_translation_model(vocab_sizes, dim_model, dim_ff, num_heads, n_layers, m_layers, dropouti=dropouti, dropouth=dropouth, dropouta=dropouta, dropoutc=dropoutc, rel_pos=rel_pos) if checkpoint != -1: with open('checkpoints/{}-{}.pkl'.format(checkpoint, config['save_name']), 'rb') as f: state_dict = th.load(f, map_location=lambda storage, loc: storage) model.load_state_dict(state_dict) # tie weight if config.get('share_weight', False): model.embed[-1].lut.weight = model.generator.proj.weight criterion = LabelSmoothing(vocab_sizes[-1], smoothing=0.1) device = th.device(dev_id) th.cuda.set_device(device) model, criterion = model.to(device), criterion.to(device) n_epochs = config['n_epochs'] optimizer = get_wrapper('noam')( dim_model, config['factor'], config.get('warmup', 4000), optim.Adam(model.parameters(), lr=config['lr'], betas=(0.9, 0.98), eps=1e-9, weight_decay=config.get('weight_decay', 0))) for _ in range(checkpoint + 1): for _ in range(len(train_loader)): optimizer.step() log_interval = config['log_interval'] for epoch in range(checkpoint + 1, n_epochs): if proc_id == 0: print("epoch {}".format(epoch)) print("training...") model.train() tot = 0 hit = 0 loss_accum = 0 for i, batch in enumerate(train_loader): batch.y = batch.y.to(device) batch.g_enc.edata['etype'] = batch.g_enc.edata['etype'].to(device) batch.g_enc.ndata['x'] = batch.g_enc.ndata['x'].to(device) batch.g_enc.ndata['pos'] = batch.g_enc.ndata['pos'].to(device) batch.g_dec.edata['etype'] = batch.g_dec.edata['etype'].to(device) batch.g_dec.ndata['x'] = batch.g_dec.ndata['x'].to(device) batch.g_dec.ndata['pos'] = batch.g_dec.ndata['pos'].to(device) out = model(batch) loss = criterion(out, batch.y) / len(batch.y) loss_accum += loss.item() * len(batch.y) tot += len(batch.y) hit += (out.max(dim=-1)[1] == batch.y).sum().item() if proc_id == 0: if (i + 1) % log_interval == 0: print('step {}, loss : {}, acc : {}'.format(i, loss_accum / tot, hit / tot)) tot = 0 hit = 0 loss_accum = 0 loss.backward() if (i + 1) % grad_accum == 0: for param in model.parameters(): if param.requires_grad and param.grad is not None: if n_gpus > 1: th.distributed.all_reduce(param.grad.data, op=th.distributed.ReduceOp.SUM) param.grad.data /= (n_gpus * grad_accum) optimizer.step() optimizer.zero_grad() model.eval() tot = 0 hit = 0 loss_accum = 0 for batch in dev_loader: with th.no_grad(): batch.y = batch.y.to(device) batch.g_enc.edata['etype'] = batch.g_enc.edata['etype'].to(device) batch.g_enc.ndata['x'] = batch.g_enc.ndata['x'].to(device) batch.g_enc.ndata['pos'] = batch.g_enc.ndata['pos'].to(device) batch.g_dec.edata['etype'] = batch.g_dec.edata['etype'].to(device) batch.g_dec.ndata['x'] = batch.g_dec.ndata['x'].to(device) batch.g_dec.ndata['pos'] = batch.g_dec.ndata['pos'].to(device) out = model(batch) loss_accum += criterion(out, batch.y) tot += len(batch.y) hit += (out.max(dim=-1)[1] == batch.y).sum().item() if n_gpus > 1: th.distributed.barrier() if proc_id == 0: print('evaluate...') print('loss : {}, acc : {}'.format(loss_accum / tot, hit / tot)) tot = 0 hit = 0 loss_accum = 0 for batch in test_loader: with th.no_grad(): batch.y = batch.y.to(device) batch.g_enc.edata['etype'] = batch.g_enc.edata['etype'].to(device) batch.g_enc.ndata['x'] = batch.g_enc.ndata['x'].to(device) batch.g_enc.ndata['pos'] = batch.g_enc.ndata['pos'].to(device) batch.g_dec.edata['etype'] = batch.g_dec.edata['etype'].to(device) batch.g_dec.ndata['x'] = batch.g_dec.ndata['x'].to(device) batch.g_dec.ndata['pos'] = batch.g_dec.ndata['pos'].to(device) out = model(batch) loss_accum += criterion(out, batch.y) tot += len(batch.y) hit += (out.max(dim=-1)[1] == batch.y).sum().item() if n_gpus > 1: th.distributed.barrier() if proc_id == 0: print('testing...') print('loss : {}, acc : {}'.format(loss_accum / tot, hit / tot)) if not os.path.exists('checkpoints'): os.mkdir('checkpoints') with open('checkpoints/{}-{}.pkl'.format(epoch, config['save_name']), 'wb') as f: th.save(model.state_dict(), f) if __name__ == '__main__': argparser = argparse.ArgumentParser("machine translation") argparser.add_argument('--config', type=str) argparser.add_argument('--gpu', type=str, default='0') argparser.add_argument('--checkpoint', type=int, default=-1) args = argparser.parse_args() with open(args.config, 'r') as f: config = yaml.load(f) devices = list(map(int, args.gpu.split(','))) n_gpus = len(devices) if n_gpus == 1: run(0, n_gpus, devices, config, args.checkpoint) else: mp = th.multiprocessing mp.spawn(run, args=(n_gpus, devices, config, args.checkpoint), nprocs=n_gpus)
108333
from stretch_body.dynamixel_hello_XL430 import DynamixelHelloXL430 from stretch_body.hello_utils import * class WristPitch(DynamixelHelloXL430): def __init__(self, chain=None): DynamixelHelloXL430.__init__(self, 'wrist_pitch', chain) self.poses = {'tool_up': deg_to_rad(45), 'tool_down': deg_to_rad(-45)} def pose(self,p,v_r=None,a_r=None): self.move_to(self.poses[p],v_r,a_r)
108337
import os from haikunator import Haikunator from azure.common.credentials import ServicePrincipalCredentials from azure.mgmt.resource import ResourceManagementClient from azure.mgmt.eventgrid import EventGridManagementClient from azure.mgmt.eventgrid.models import Topic, EventSubscriptionFilter, EventSubscription, WebHookEventSubscriptionDestination # If you wish to debug # import logging # logging.basicConfig(level=logging.DEBUG) _haikunator = Haikunator() # Resource LOCATION = 'westus' GROUP_NAME = 'event-grid-python-sample-rg' # Event grid # Using a random topic name. Optionally, replace this with a topic name of your choice. TOPIC_NAME = "topicsample-" + _haikunator.haikunate(delimiter='') # Replace the endpoint URL with the URL of your Azure function, or whatever endpoint you want to sent the event. # See the EventGridConsumer sample for a sample of an Azure function that can handle EventGridEvents # Publish the ConsumerFunction sample as an Azure function and use the URL of that function for the below. # # Your endpoint will be validated, see https://aka.ms/esvalidation for details ENDPOINT_URL = "replace with your Azure function-URL that support validation" # To run the sample, you must first create an Azure service principal. To create the service principal, follow one of these guides: # Azure Portal: https://azure.microsoft.com/documentation/articles/resource-group-create-service-principal-portal/) # PowerShell: https://azure.microsoft.com/documentation/articles/resource-group-authenticate-service-principal/ # Azure CLI: https://azure.microsoft.com/documentation/articles/resource-group-authenticate-service-principal-cli/ # # This script expects that the following environment vars are set: # # AZURE_TENANT_ID: with your Azure Active Directory tenant id or domain # AZURE_CLIENT_ID: with your Azure Active Directory Application Client ID # AZURE_CLIENT_SECRET: with your Azure Active Directory Application Secret # AZURE_SUBSCRIPTION_ID: with your Azure Subscription Id # def run_example(): """Resource Group management example.""" # # Create the Resource Manager Client with an Application (service principal) token provider # subscription_id = os.environ.get( 'AZURE_SUBSCRIPTION_ID', '11111111-1111-1111-1111-111111111111') # your Azure Subscription Id credentials = ServicePrincipalCredentials( client_id=os.environ['AZURE_CLIENT_ID'], secret=os.environ['AZURE_CLIENT_SECRET'], tenant=os.environ['AZURE_TENANT_ID'] ) resource_client = ResourceManagementClient(credentials, subscription_id) event_grid_client = EventGridManagementClient(credentials, subscription_id) # Create Resource group print('\nCreating a Resource Group...') resource_group = resource_client.resource_groups.create_or_update( GROUP_NAME, {'location': LOCATION} ) print_item(resource_group) # Create EventGrid topic print('\nCreating an EventGrid topic...') topic_result_async_poller = event_grid_client.topics.create_or_update( resource_group.name, TOPIC_NAME, Topic( location=resource_group.location, tags={'key1': 'value1', 'key2': 'value2'} ) ) # Blocking call for the Topic to be created topic = topic_result_async_poller.result() # type: Topic print_item(topic) # Get the keys for the topic print('\nGetting the topic keys...') keys = event_grid_client.topics.list_shared_access_keys( resource_group.name, topic.name ) # type: TopicSharedAccessKeys print('The key1 value of topic {} is: {}'.format(topic.name, keys.key1)) # Create an event subscription print('\nCreating an event subscription') event_subscription_name = 'EventSubscription1' destination = WebHookEventSubscriptionDestination( endpoint_url=ENDPOINT_URL ) filter = EventSubscriptionFilter( # By default, "All" event types are included is_subject_case_sensitive=False, subject_begins_with='', subject_ends_with='' ) event_subscription_info = EventSubscription( destination=destination, filter=filter) event_subscription_async_poller = event_grid_client.event_subscriptions.create_or_update( topic.id, event_subscription_name, event_subscription_info, ) # Blocking call for the EventSubscription to be created event_subscription = event_subscription_async_poller.result() # type: EventSubscription print_item(event_subscription) input("Press enter to delete all created resources.") # Delete the EventSubscription print('\nDeleting the event subscription') delete_async_operation = event_grid_client.event_subscriptions.delete( topic.id, event_subscription_name ) delete_async_operation.wait() print("\nDeleted: {}".format(event_subscription_name)) # Delete the topic print('\nDeleting the topic') delete_async_operation = event_grid_client.topics.delete( resource_group.name, topic.name ) delete_async_operation.wait() print("\nDeleted: {}".format(topic.name)) # Delete Resource group and everything in it print('\nDelete Resource Group') delete_async_operation = resource_client.resource_groups.delete(GROUP_NAME) delete_async_operation.wait() print("\nDeleted: {}".format(GROUP_NAME)) def print_item(group): """Print a ResourceGroup instance.""" print("\tName: {}".format(group.name)) print("\tId: {}".format(group.id)) if hasattr(group, 'location'): print("\tLocation: {}".format(group.location)) print_properties(getattr(group, 'properties', None)) def print_properties(props): """Print a ResourceGroup propertyies instance.""" if props and hasattr(props, 'provisioning_state'): print("\tProperties:") print("\t\tProvisioning State: {}".format(props.provisioning_state)) print("\n\n") if __name__ == "__main__": run_example()
108346
def test_get_custom_properties(exporters, mocker): blender_data = mocker.MagicMock() vector = mocker.MagicMock() vector.to_list.return_value = [0.0, 0.0, 1.0] blender_data.items.return_value = [ ['str', 'spam'], ['float', 1.0], ['int', 42], ['bool', False], ['vector', vector], ] assert exporters.BaseExporter.get_custom_properties(blender_data) == { 'str': 'spam', 'float': 1.0, 'int': 42, 'bool': False, 'vector': [0.0, 0.0, 1.0] } def test_ignore_properties(exporters, mocker): blender_data = mocker.MagicMock() blender_data.items.return_value = [ ['_RNA_UI', None], ['cycles', None], ['cycles_visibility', None], ['str', 'remains'], ] assert exporters.BaseExporter.get_custom_properties(blender_data) == { 'str': 'remains', } def test_invalid_properties(exporters, mocker): blender_data = mocker.MagicMock() blender_data.items.return_value = [ ['unserializable', set()], ['str', 'remains'], ] assert exporters.BaseExporter.get_custom_properties(blender_data) == { 'str': 'remains', } def test_check(exporters): assert exporters.BaseExporter.check(None, None) def test_default(exporters, mocker): blender_data = mocker.MagicMock() blender_data.name = 'Name' assert exporters.BaseExporter.default(None, blender_data) == {'name': 'Name'} def test_export(exporters): assert exporters.BaseExporter.export(None, None) == {}
108361
import xadmin from .models import Video, HotSearchWords class VideoAdmin(object): list_display = ["content", "cover_duration", "cover_start_second", "video", "longitude", "latitude" , "poi_name", "poi_address", "first_create_time", "source"] search_fields = ['content', ] list_editable = ["is_hot", ] list_filter = ["content", "click_num", "is_hot", "first_create_time", "upload_time"] style_fields = {"content": "ueditor"} class HotSearchAdmin(object): list_display = ["keywords", "index", "add_time"] xadmin.site.register(Video, VideoAdmin) xadmin.site.register(HotSearchWords, HotSearchAdmin)
108410
import factory from django_google_optimize.models import ( ExperimentCookie, ExperimentVariant, GoogleExperiment, ) # pylint: disable=too-few-public-methods class GoogleExperimentFactory(factory.django.DjangoModelFactory): class Meta: model = GoogleExperiment experiment_id = factory.Faker("sha1") experiment_alias = factory.Faker("city") active = True # pylint: disable=too-few-public-methods class ExperimentVariantFactory(factory.django.DjangoModelFactory): class Meta: model = ExperimentVariant alias = factory.Faker("city") index = factory.Faker("pyint") experiment = factory.SubFactory(GoogleExperimentFactory) class ExperimentCookieFactory(factory.django.DjangoModelFactory): class Meta: model = ExperimentCookie active_variant_index = factory.Faker("pyint") active = True experiment = factory.SubFactory(GoogleExperimentFactory)
108439
import json import pytest from clld.db.models.common import Parameter, Language from clld.web.adapters import geojson from clld.web.datatables.base import DataTable geojson.pacific_centered() def test_GeoJson(mocker): adapter = geojson.GeoJson(None) assert len(list(adapter.feature_iterator(None, None))) == 0 assert len(list(adapter.feature_iterator(Language(), None))) == 1 assert len(list(adapter.feature_iterator(mocker.Mock(languages=[Language()]), None))) == 1 def test_GeoJsonParameter(env, request_factory): adapter = geojson.GeoJsonParameter(None) assert '{' in adapter.render(Parameter.get('no-domain'), env['request']) with request_factory(params=dict(domainelement='de')) as req: res = json.loads(adapter.render(Parameter.get('parameter'), req)) assert len(res['features']) > 0 assert 'label' in res['features'][0]['properties'] def test_GeoJsonParameterMultipleValueSets(env): adapter = geojson.GeoJsonParameterMultipleValueSets(None) assert '{' in adapter.render(Parameter.get('no-domain'), env['request']) def test_GeoJsonParameterFlatProperties(env): adapter = geojson.GeoJsonParameterFlatProperties(None) assert '{' in adapter.render(Parameter.get('no-domain'), env['request']) def test_GeoJsonLanguages(env): class MockLanguages(DataTable): def get_query(self, *args, **kw): return [Language.first()] adapter = geojson.GeoJsonLanguages(None) assert 'Point' in\ adapter.render( MockLanguages(env['request'], Language), env['request']) def test_get_lonlat(mocker): assert geojson.get_lonlat(None) is None assert geojson.get_lonlat((None, 5)) is None assert geojson.get_lonlat((-50, 1))[0] > 0 assert geojson.get_lonlat(mocker.Mock(latitude=1, longitude=1)) == (pytest.approx(1), pytest.approx(1)) def test_get_feature(data): l = Language.first() assert geojson.get_feature(l)['id'] == l.id assert geojson.get_feature(l)['properties']['name'] == l.name assert geojson.get_feature(l, name='geo')['properties']['name'] == 'geo'