id
stringlengths
1
8
text
stringlengths
6
1.05M
dataset_id
stringclasses
1 value
6422210
<gh_stars>0 from utils.lambda_decorators import async_handler, forward_exceptions_to_dlq from .diffing_sync import DiffingDynamoDbSync from ..dynamo_elastic_sync.dynamo_elastic_sync import sqs_client, dlq _syncer = DiffingDynamoDbSync() # N.B. DynamoDB streams call lambda synchronously, which means that setting the dead letter queue on # the lambda has no effect. Instead we report the error AND throw the exception which will mean that # dynamo will keep trying to resend until it is successful @forward_exceptions_to_dlq(sqs_client, dlq) @async_handler() def forward_record(event, context): return _syncer.forward_record(event, context)
StarcoderdataPython
4961021
import datetime import typing from .types import JsonicType Tex = typing.TypeVar("Tex", bound=Exception) def cause(ex: Tex, cause: Exception) -> Tex: ex.__cause__ = cause return ex class DateConstructorProtocol(typing.Protocol): def __call__(self, year: int, month: int, day: int): ... # pragma: nocover Td = typing.TypeVar("Td") # Td implements DateConstructorProtocol def date_clone(typ: typing.Type[Td], orig: datetime.datetime) -> Td: return typing.cast(DateConstructorProtocol, typ)( year=orig.year, month=orig.month, day=orig.day, ) class DateTimeConstructorProtocol(typing.Protocol): def __call__( self, year: int, month: int, day: int, hour: int, minute: int, second: int, microsecond: int, tzinfo: typing.Optional[datetime.tzinfo], ): ... Tdt = typing.TypeVar("Tdt") # Tdt implements DateTimeConstructorProtocol def datetime_clone(typ: typing.Type[Tdt], orig: datetime.datetime) -> Tdt: return typing.cast(DateTimeConstructorProtocol, typ)( year=orig.year, month=orig.month, day=orig.day, hour=orig.hour, minute=orig.minute, second=orig.second, microsecond=orig.microsecond, tzinfo=orig.tzinfo, ) def english_enumerate(items: typing.Iterable[str], conj: str = ", and ") -> str: buf = [] i = iter(items) try: x = next(i) except StopIteration: return "" buf.append(x) lx: typing.Optional[str] = None for x in i: if lx is not None: buf.append(", ") buf.append(lx) lx = x if lx is not None: buf.append(conj) buf.append(lx) return "".join(buf) def is_optional(typ: JsonicType) -> bool: return isinstance(typ, typing._GenericAlias) and isinstance(typing.get_origin(typ), typing._SpecialForm) and typing.get_origin(typ)._name == "Union" and type(None) in typ.__args__ # type: ignore
StarcoderdataPython
61752
<filename>taiwanpeaks/common/constants.py from model_utils import Choices class IndexableChoices(Choices): def index_of(self, item): for i in range(len(self._doubles)): if item == self._doubles[i][0]: return i raise ValueError(f"{item} is not a valid choice.") DIFFICULTY_CHOICES = IndexableChoices( ('beginner', 'Beginner'), ('intermediate', 'Intermediate'), ('advanced', 'Advanced'), ('expert', 'Expert') ) NP_CHOICES = Choices( ('taroko', 'Taroko National Park'), ('sheipa', 'Shei-pa National Park'), ('jade', 'Yushan National Park') )
StarcoderdataPython
214030
from src.exception.PlayerPathObstructedException import * from src.player.RandomBot import * class BuilderBot(RandomBot): def computeFencePlacingImpacts(self, board): fencePlacingImpacts = {} for fencePlacing in board.storedValidFencePlacings: try: impact = board.getFencePlacingImpactOnPaths(fencePlacing) except PlayerPathObstructedException as e: continue globalImpact = 0 for playerName in impact: globalImpact += (-1 if playerName == self.name else 1) * impact[playerName] fencePlacingImpacts[fencePlacing] = globalImpact return fencePlacingImpacts def getFencePlacingWithTheHighestImpact(self, fencePlacingImpacts): return max(fencePlacingImpacts, key=fencePlacingImpacts.get) def play(self, board) -> IAction: if self.remainingFences() < 1 or len(board.storedValidFencePlacings) < 1: return self.moveRandomly(board) fencePlacingImpacts = self.computeFencePlacingImpacts(board) if len(fencePlacingImpacts) < 1: return self.moveRandomly(board) bestFencePlacing = self.getFencePlacingWithTheHighestImpact(fencePlacingImpacts) if fencePlacingImpacts[bestFencePlacing] < 1: return self.moveRandomly(board) return bestFencePlacing
StarcoderdataPython
5011712
import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # os.environ['CUDA_VISIBLE_DEVICES'] = '-1' import numpy as np import cv2 import tensorflow.keras.backend as K IMAGE_SHAPE = (448, 448, 3) CLASS_NAME_TO_INDEX = { 'aeroplane': 0, 'bicycle': 1, 'bird': 2, 'boat': 3, 'bottle': 4, 'bus': 5, 'car': 6, 'cat': 7, 'chair': 8, 'cow': 9, 'diningtable': 10, 'dog': 11, 'horse': 12, 'motorbike': 13, 'person': 14, 'pottedplant': 15, 'sheep': 16, 'sofa': 17, 'train': 18, 'tvmonitor': 19 } INDEX_TO_CLASS_NAME = list(CLASS_NAME_TO_INDEX.keys()) def preprocess_image(image, newsize=IMAGE_SHAPE[:2]): """ Resizes and normalizes the image """ image = cv2.resize(image, newsize) image = image / 255. return image def bndbox_to_coords(bndbox, img_width, img_height, s): """ Given a bounding box in pixel coordinates (xmin, xmax, ymin, ymax), the image dimensions and the grid size (s), this function returns: - x, y: the center coordinates of the bounding box relative to the cell associated with the bounding box where (0, 0) is the top left of the cell and (1, 1) is the bottom right. - w, h: the size of the bounding box in grid units. - cell_x, cell_y: coordinates of the cell associated with the bounding box """ xmin, xmax, ymin, ymax = bndbox # absolute position in grid units x = (xmin + xmax) / 2 / img_width * s y = (ymin + ymax) / 2 / img_height * s # size in grid units w = (xmax - xmin) / img_width # * s h = (ymax - ymin) / img_height # * s # position relative to cell cell_x, cell_y = int(x), int(y) x, y = (x - cell_x), (y - cell_y) return x, y, w, h, cell_x, cell_y def coords_to_bndbox(x, y, w, h, cell_x, cell_y, img_width, img_height, s): """ Given bounding box data in the format specified by bndbox_to_coords() as well as the image dimensions and the grid size (s), this function returns the bounding box in pixel coordinates as (xmin, xmax, ymin, ymax). """ x = (x + cell_x) * img_width / s y = (y + cell_y) * img_height / s w = w * img_width # / s h = h * img_height # / s xmin = round(x - w / 2) xmax = round(x + w / 2) ymin = round(y - h / 2) ymax = round(y + h / 2) return xmin, xmax, ymin, ymax def get_truth_from_label(label, s=7, b=3, c=20): """ Creates a truth label tensor using a label dictionary - label is the dictionary in the format specified in create_labels.create_object_detection_label() - s is the size of the grid (there will be s*s cells) - b is the number of bounding boxes for each cell - c is the number of possible classes Note: this function uses the first bounding box only. The other bounding boxes are set to zeros. Returns an (s, s, (b * 5 + c)) tensor such that each cell has a (b * 5 * c) vector with the following: - one-hot encoding of the true class (c) - the confidence score for the box (1 per box) - the bounding box coordinates for the box (4 per box) """ img_width, img_height = label['image-size']['width'], label['image-size']['height'] truth_shape = (s, s, b * 5 + c) truth_tensor = np.zeros(truth_shape, dtype=np.float32) for object in label['objects']: # get object data class_index = CLASS_NAME_TO_INDEX[object['name']] x, y, w, h, cell_x, cell_y = bndbox_to_coords(object['bndbox'], img_width, img_height, s) # add the data to the tensor if truth_tensor[cell_y, cell_x, c] == 0: truth_tensor[cell_y, cell_x, class_index] = 1 # class probabilities truth_tensor[cell_y, cell_x, c] = 1 # box confidence score truth_tensor[cell_y, cell_x, c+b:c+b+4] = x, y, w, h # box coordinates return truth_tensor def get_label_from_tensor(tensor, s=7, b=3, c=20): """ Converts a tensor (either a truth tensor or a predicted tensor) to a label dictionary. - tensor is an (s, s, (b * 5 + c)) shaped tensor with the format specified in get_truth_from_label() - s is the size of the grid (there will be s*s cells) - b is the number of bounding boxes for each cell - c is the number of possible classes - Returns a label dictionary in the format specified in create_labels.create_object_detection_label() """ # TODO pass def filter_predictions(class_probs, box_confs, box_coords, threshold=0.0001): """ class_probs is an (s, s, c) tensor of class probabilities. box_confs is an (s, s, b) tensor of box confidence scores. box_coords is an (s, s, b, 4) tensor of coordinates. Returns an (s, s, b, c) tensor of class-specific confidence scores for each box after discarding (setting to zero) any score lower than the threshold. """ s = class_probs.shape[0] c = class_probs.shape[2] b = box_confs.shape[2] # class specific box confidence scores class_box_confs = np.tile(class_probs, (1, 1, b)).reshape(s, s, b, c) class_box_confs = class_box_confs * box_confs[:,:,:,None] # maximum class confidence score for each box box_classes = np.argmax(class_box_confs, axis=-1) box_class_scores = np.max(class_box_confs, axis=-1) print(box_class_scores) print(box_classes.shape) filter_mask = box_class_scores >= threshold print(filter_mask) # class_confs = np.zeros((s, s, b, c)) # for cell_y in range(s): # for cell_x in range(s): # for box in range(b): # box_confidence = box_confs[cell_y, cell_x, box] # for class_index in range(c): # class_probability = class_probs[cell_y, cell_x, class_index] # class_confidence = class_probability * box_confidence # if class_confidence >= threshold: # class_confs[cell_y, cell_x, box, class_index] = class_confidence # return class_confs def get_area(box): """ Returns the area of a box """ xmin, xmax, ymin, ymax = box area = (xmax - xmin) * (ymax - ymin) return area def get_intersection(box1, box2): """ Returns the area of intersection between 2 boxes """ xmin1, xmax1, ymin1, ymax1 = box1 xmin2, xmax2, ymin2, ymax2 = box2 inter_xmin = max(xmin1, xmin2) inter_xmax = min(xmax1, xmax2) inter_ymin = max(ymin1, ymin2) inter_ymax = min(ymax1, ymax2) if inter_xmin > inter_xmax: inter_xmin, inter_xmax = inter_xmax, inter_xmin if inter_ymin > inter_ymax: inter_ymin, inter_ymax = inter_ymax, inter_ymin inter_area = get_area([inter_xmin, inter_xmax, inter_ymin, inter_ymax]) return inter_area def get_iou(box1, box2): """ Returns the intersection over union for 2 boxes. """ inter_area = get_intersection(box1, box2) union_area = get_area(box1) + get_area(box2) - inter_area iou = inter_area / union_area return iou def keras_yolo_to_image_coords(boxes): """ boxes should have the shape (m, s, s, b, 4). Converts YOLO coordinates to coordinates (x, y, w, h) relative to the image, such that each of x, y, w and h would be in the range [0:1]. Returns: - xy, tensor of shape (m, s, s, b, 2) representing the x, y coordinates of the boxes - wh, tensor of shape (m, s, s, b, 2) representing the widths and heights of the boxes Uses functions from tf.keras.backend (K). """ boxes_shape = K.shape(boxes) s = boxes.shape[1] b = boxes.shape[3] cell_y = K.arange(s) cell_y = K.tile(cell_y, [s]) cell_x = K.arange(s) cell_x = K.tile(K.expand_dims(cell_x, 0), [s, 1]) cell_x = K.flatten(K.transpose(cell_x)) cell_xy = K.transpose(K.stack([cell_y, cell_x])) cell_xy = K.reshape(cell_xy, [1, s, s, 1, 2]) cell_xy = K.cast(cell_xy, K.dtype(boxes)) grid_dims = K.cast(K.constant([s, s]), K.dtype(boxes)) xy = boxes[..., :2] + cell_xy / grid_dims wh = boxes[..., 2:4] return xy, wh def keras_image_coords_to_minmax(xy, wh, image_shape=IMAGE_SHAPE[:2]): """ Converts image coordinates to min-max values """ image_wh = K.cast(K.transpose(image_shape[:2]), K.dtype(xy)) min_xy = (xy - wh / 2) * image_wh max_xy = (xy + wh / 2) * image_wh return min_xy, max_xy def keras_iou(min_xy1, max_xy1, min_xy2, max_xy2): """ Applies IoU to each pair of boxes in the given tensors. min_xy1 has the shape (m, s, s, b, 2) and contains xmin and ymin values for the first box. max_xy1 has the shape (m, s, s, b, 2) and contains xmax and ymax values for the first box. min_xy2 has the shape (m, s, s, b, 2) and contains xmin and ymin values for the second box. max_xy2 has the shape (m, s, s, b, 2) and contains xmax and ymax values for the second box. Uses functions from tf.keras.backend (K). """ # calculate the intersection areas inter_min_xy = K.maximum(min_xy1, min_xy2) inter_max_xy = K.minimum(max_xy1, max_xy2) inter_wh = K.maximum(inter_max_xy - inter_min_xy, 0) inter_areas = inter_wh[..., 0] * inter_wh[..., 1] # calculate the union areas wh1 = max_xy1 - min_xy1 wh2 = max_xy2 - min_xy2 areas1 = wh1[..., 0] * wh1[..., 1] areas2 = wh2[..., 0] * wh2[..., 1] union_areas = areas1 + areas2 - inter_areas iou_scores = inter_areas / union_areas return iou_scores def non_maximal_suppression(box_confs, box_coords, min_iou=0.5): """ Applies non maximal suppression and returns an (s, s, b, c) tensor representing the modified class confidence scores. NMS works by first choosing the box with maximum confidence and discarding any boxes which have an iou larger than min_iou with that box. Then repeats the process for the next box with maximum confidence. """ pass def get_label_from_prediction(pred, img_width, img_height, threshold=0.2, img_depth=3, s=7, b=3, c=20): """ Converts a prediction tensor (output from the network) to a label in the format specified by create_labels.create_object_detection_label() except that the 'difficult' property is omitted. """ # pred is a vector of length (s * s * (b * 5 + c)) class_probs_end = s*s*c box_confs_end = class_probs_end + s*s*b class_probs = pred[:class_probs_end].reshape((s, s, c)) box_confs = pred[class_probs_end:box_confs_end].reshape((s, s, b)) box_coords = pred[box_confs_end:].reshape((s, s, b, 4)) # assert np.all(class_probs > 0) # assert np.all(box_confs > 0) # get thresholded class-specific confidence scores class_probs, box_confs, box_coords = filter_predictions(class_probs, box_confs, box_coords, threshold=threshold) # objects = [] for cell_y in range(s): for cell_x in range(s): for box in range(b): if label_tensor[cell_y, cell_x, box*5+4] >= 0: # get bounding box x, y, w, h = label_tensor[cell_y,cell_x,box*5:box*5+4] w, h = w ** 2, h ** 2 xmin, xmax, ymin, ymax = coords_to_bndbox(x, y, w, h, cell_x, cell_y, img_width, img_height, s) # get class name class_index = np.argmax(label_tensor[cell_y, cell_x, b*5:]) class_name = INDEX_TO_CLASS_NAME[class_index] # add the object the objects list obj = {'name': class_name, 'bndbox': [xmin, xmax, ymin, ymax]} objects.append(obj) label = { "image-size": {"depth": img_depth, "width": img_width, "height": img_height}, "objects": objects } return label if __name__ == '__main__': pass # import matplotlib.pyplot as plt # import zipfile # from io_utils import open_example_from_zip, label_image, IMAGES_ZIP_PATH # with zipfile.ZipFile(IMAGES_ZIP_PATH, 'r') as images_zip: # img, label = open_example_from_zip(images_zip, "JPEG/2011_004301.jpg") # print(type(img)) # print(img.shape) # print(label) # img = preprocess_image(img) # label_tensor = preprocess_label(label) # resized_label = tensor_to_label(label_tensor, img.shape[1], img.shape[0], img.shape[2]) # img = label_image(img, resized_label) # plt.imshow(img) # plt.show()
StarcoderdataPython
3234325
## 2/3 n = 0 while abs(u(n)**2 - 2) >= 2e-5: n += 1 print(n) # n == 4 # √2 ≈ 1.4142156862745097 ## 2/4 n = 0 q = 4 # choose this def u_q(n): if n <= 0: return 1 return (1/q)*(u(n-1)+q/u(n)) while abs(u_q(n)**q - q) >= q*1e-5: n += 1 print(n) ## 3/1/a def fact(n): if n <= 1: return 1 return n*fact(n-1) ## 3/1/b def binom(n, p): return fact(n)//(fact(p)*fact(n-p)) ## 3/2 sans la formule de Pion def coefs_binomiaux(n): coefs = [] for p in range(0, n+1): coefs.append(binom(n, p)) return coefs def coefs_binomiaux_fast(n): u=1 coefs = [u] for p in range(n): u *= (n-p)//(p+1) coefs.append(u) return coefs ## bonus mdr def triangle_pascal_upto(n, align_char="<"): triangle = [coefs_binomiaux(ligne) for ligne in range(n)] max_int_length = max(max(len(str(i)) for i in ligne) for ligne in triangle) for ligne in triangle: print(" ".join([f"{i:{align_char}{max_int_length}}" for i in ligne])) ## 1/7 from math import sqrt, log as ln def isprime(n): i = 2 while i <= int(sqrt(n)): if n%i==0: return False i+=1 return True def π(n): return len([p for p in range(2, n+1) if isprime(p)]) for n in range(10**6): print(f""" ln {n} π({n}) --------------------- = {π(n)*(ln(n)/n)} {n} """)
StarcoderdataPython
1807465
# Copyright (c) OpenMMLab. All rights reserved. import os import os.path as osp import shutil from collections import defaultdict from mmdet.datasets import DATASETS from .sot_test_dataset import SOTTestDataset @DATASETS.register_module() class GOT10kDataset(SOTTestDataset): """GOT10k dataset for the testing of single object tracking. The dataset doesn't support training mode. """ def format_results(self, results, resfile_path=None): """Format the results to txts (standard format for GOT10k Challenge). Args: results (dict(list[ndarray])): Testing results of the dataset. resfile_path (str): Path to save the formatted results. Defaults to None. """ # prepare saved dir assert resfile_path is not None, 'Please give key-value pair \ like resfile_path=xxx in argparse' if not osp.isdir(resfile_path): os.makedirs(resfile_path, exist_ok=True) # transform tracking results format # from [bbox_1, bbox_2, ...] to {'video_1':[bbox_1, bbox_2, ...], ...} track_bboxes = results['track_bboxes'] print('-------- There are total {} images --------'.format( len(track_bboxes))) video_info = self.coco.videos format_results = defaultdict(list) for img_id, track_bbox in enumerate(track_bboxes): img_info = self.data_infos[img_id] assert img_info['id'] == img_id + 1, 'img id is not matched' video_name = video_info[img_info['video_id']]['name'] format_results[video_name].append(track_bbox[:4]) assert len(video_info) == len( format_results ), 'The number of video is not matched! There are {} videos in the \ dataset and {} videos in the testing results'.format( len(video_info), len(format_results)) # writing submitted results # TODO record test time print('writing submitted results to {}'.format(resfile_path)) for video_name, bboxes in format_results.items(): video_file_path = osp.join(resfile_path, video_name) if not osp.isdir(video_file_path): os.makedirs(video_file_path, exist_ok=True) video_txt = osp.join(video_file_path, '{}_001.txt'.format(video_name)) with open(video_txt, 'w') as f: for bbox in bboxes: bbox = [ str(f'{bbox[0]:.4f}'), str(f'{bbox[1]:.4f}'), str(f'{(bbox[2] - bbox[0]):.4f}'), str(f'{(bbox[3] - bbox[1]):.4f}') ] line = ','.join(bbox) + '\n' f.writelines(line) shutil.make_archive(resfile_path, 'zip', resfile_path) shutil.rmtree(resfile_path)
StarcoderdataPython
149700
<reponame>kitsuyui/cachepot<filename>example/__init__.py from typing import Any from cachepot.backend.filesystem import FileSystemCacheBackend from cachepot.serializer.json import JSONSerializer, JSONType from cachepot.serializer.pickle import PickleSerializer from cachepot.serializer.str import StringSerializer from cachepot.store import CacheStore class SimpleFileSystemCacheStore(CacheStore[str, Any]): namespace: str def __init__( self, namespace: str, *, directory: str = 'tmp', ): super().__init__( namespace=namespace, key_serializer=StringSerializer(), value_serializer=PickleSerializer(), backend=FileSystemCacheBackend(directory), default_expire_seconds=3600, ) class FileSystemJSONCacheStore(CacheStore[str, JSONType]): namespace: str def __init__( self, namespace: str, *, directory: str = 'tmp', ): super().__init__( namespace=namespace, key_serializer=StringSerializer(), value_serializer=JSONSerializer(), backend=FileSystemCacheBackend(directory), default_expire_seconds=3600, ) def example_usage() -> None: cachestore = SimpleFileSystemCacheStore('example', directory='./tmp') cachestore.put('x', 1) assert cachestore.get('x') == 1 cachestore.remove('x') assert cachestore.get('x') is None assert cachestore.proxy(lambda: 3)(cache_key='y') == 3 assert cachestore.proxy(lambda: 3)(cache_key='y') == 3
StarcoderdataPython
1860239
__author__ = 'Jwely' import os from TeXFigureGenerator import TeXFigureGenerator class TeXWriter: def __init__(self, main_path, texfile_path): self.main_path = os.path.abspath(main_path) self.texfile_path = os.path.abspath(texfile_path) self.content = [] # a list of content strings if not os.path.exists(self.main_path): raise Warning("main_path '{0}' does not exist".format(self.main_path)) def add_figure(self, figure_path, caption, width, create_from_function=None, create_kwargs=None): """ Adds a figure to the TexFile, if create params are given, the figure is generated from scratch. :param figure_path: filepath to the image :param caption: caption to use on the figure :param width: width of the figure as a string, such as '5in' :param create_from_function: input method to use to generate the figure :param create_kwargs: kwargs for the create_from_function """ fig = TeXFigureGenerator(self.main_path, figure_path, caption, width) self.content += fig.get_tex() # if creation parameters were given, create the figure from scratch if create_from_function is not None: fig.create_from(create_from_function, create_kwargs) def add_text(self, text): """ Adds a string of text to the TeX document, must be TeX formatted. Not really intended to be used extensively, but could be needed. """ self.content += [text] def chapter(self, name): self.content += ["\\chapter{{{0}}}".format(name)] def write(self, verbose=False, reset_content=False, include_labels=True): """ Writes a `.tex` file with the current text and figures """ with open(self.texfile_path, 'w+') as f: for line in self.content: if not include_labels: if r"\label" not in line: if verbose: print(line) f.write(line + "\n") else: if verbose: print(line) f.write(line + "\n") print("Wrote tex file at {0}".format(self.texfile_path)) # resets the content to ensure figure is only writen once, not subsequently if reset_content: self.content = []
StarcoderdataPython
12854027
<reponame>travisliu/data-spec-validator<filename>setup.py import os import setuptools CUR_DIR = os.path.abspath(os.path.dirname(__file__)) about = {} with open(os.path.join(CUR_DIR, "data_spec_validator", "__version__.py"), "r") as f: exec(f.read(), about) with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() setuptools.setup( name="data-spec-validator", version=about['__version__'], author="CJHwong, falldog, HardCoreLewis, kilikkuo, xeonchen", author_email="<EMAIL>", description="Simple validation tool for API", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/hardcoretech/data-spec-validator", classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], package_dir={"data_spec_validator": "data_spec_validator"}, packages=setuptools.find_packages(), install_requires=[ "python-dateutil", ], extras_require={ 'decorator': ['Django', 'djangorestframework'], }, python_requires=">=3.6", )
StarcoderdataPython
3337950
import numpy as np from torchvision.transforms.functional import normalize def denormalize(tensor, mean, std): mean = np.array(mean) std = np.array(std) mean = -mean / std std = 1 / std if isinstance(tensor, np.ndarray): return (tensor - mean.reshape(-1, 1, 1)) / std.reshape(-1, 1, 1) return normalize(tensor, mean, std)
StarcoderdataPython
9728039
<filename>materializationengine/workflows/dummy_workflow.py import time from celery import chain, chord from celery.utils.log import get_task_logger from materializationengine.celery_init import celery from materializationengine.shared_tasks import fin celery_logger = get_task_logger(__name__) @celery.task(name="process:start_test_workflow") def start_test_workflow(iterator_length: int = 50): """Test workflow for exploring scaling in kubernetes Args: iterator_length (int): Number of parallel tasks to run. Default = 50 """ workflow = [] for i in range(0, 3): test_workflow = chain( chord( [chain(dummy_task.si(i)) for i in range(0, iterator_length)], fin.si() ), # return here is required for chords fin.si(), ) # final task which will process a return status/timing etc... test_workflow_2 = chain( chord( [chain(dummy_task.si(i)) for i in range(0, iterator_length)], fin.si() ), # return here is required for chords fin.si(), ) update_roots_and_freeze = chain( chord([dummy_task.si(i) for i in range(0, iterator_length)], fin.si()), dummy_task.si(i), chord( [chain(dummy_task.si(i)) for i in range(0, iterator_length)], fin.si() ), fin.si(), ) # final task which will process a return status/timing etc... test_complete_workflow = chain( test_workflow, test_workflow_2, update_roots_and_freeze ) workflow.append(test_complete_workflow) final_workflow = chord(workflow, final_task.s()) status = final_workflow.apply_async() return status @celery.task( name="process:dummy_task", bind=True, acks_late=True, autoretry_for=(Exception,), max_retries=3, ) def dummy_task(self, i): time.sleep(1) return True @celery.task( name="process:dummy_arg_task", bind=True, acks_late=True, autoretry_for=(Exception,), max_retries=3, ) def dummy_arg_task(self, arg: str = None): time.sleep(1) return arg @celery.task( name="process:final_task", bind=True, acks_late=True, autoretry_for=(Exception,), max_retries=3, ) def final_task(self, *args, **kwargs): time.sleep(1) return "FINAL TASK"
StarcoderdataPython
11386134
<reponame>Warlockk/Intensio-Obfuscator # -*- coding: utf-8 -*- # https://github.com/Hnfull/Intensio-Obfuscator #---------------------------------------------------------- [Lib] -----------------------------------------------------------# import fileinput import random import textwrap import re import sys from progress.bar import Bar from core.obfuscation.intensio_mixer import Mixer from core.utils.intensio_utils import Utils from core.utils.intensio_error import EXIT_SUCCESS, EXIT_FAILURE #------------------------------------------------- [Function(s)/Class(es)] --------------------------------------------------# class Padding: def __init__(self): self.mixer = Mixer() self.utils = Utils() # -- Len of spaces -- # self.space0 = "" self.space4 = " " self.space8 = " " self.space12 = " " self.space16 = " " self.space20 = " " self.space24 = " " self.space28 = " " self.space32 = " " self.space36 = " " self.space40 = " " self.space44 = " " self.space48 = " " self.space52 = " " self.space56 = " " self.space60 = " " self.space64 = " " def ScriptsGenerator(self, mixerLengthArg, mixerLevelArg): varRandom1 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom2 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom3 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom4 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom5 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom6 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom7 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom8 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom9 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom10= self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom11= self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom12= self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom13= self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) varRandom14= self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) # ---------- Python random scripts ---------- # rand = random.randint(1, 7) # -- script 1 -- # if rand == 1: scriptAssPadding1 = textwrap.dedent(""" {0} = '{5}' {1} = '{6}' {2} = '{7}' {3} = '{8}' {4} = '{9}' if {0} in {1}: {0} = {4} if {1} in {2}: {1} = {3} elif {1} in {0}: {2} = {1} if {2} in {1}: {1} = {4} """).format(varRandom1, varRandom2, varRandom3, varRandom4, varRandom5, \ varRandom6, varRandom7, varRandom8, varRandom9, varRandom10) return scriptAssPadding1 # -- script 2 -- # elif rand == 2: scriptAssPadding2 = textwrap.dedent(""" {0} = '{2}' {1} = '{3}' if {0} != {1}: {0} = '{3}' {1} = {0} {0} = '{2}' """).format(varRandom1, varRandom2, varRandom3, varRandom4) return scriptAssPadding2 # -- script 3 -- # elif rand == 3: scriptAssPadding3 = textwrap.dedent(""" {0} = '{6}' {1} = '{7}' {2} = '{8}' {3} = '{9}' {4} = '{10}' {5} = '{11}' if {0} != {3}: {1} = {2} for {5} in {3}: if {5} != {2}: {1} = {1} else: {4} = {0} else: {2} = {0} {0} = {4} if {2} == {0}: for {5} in {0}: if {5} == {2}: {2} = {0} else: {2} = {4} """).format(varRandom1, varRandom2, varRandom3, varRandom4, varRandom5, \ varRandom6, varRandom7, varRandom8, varRandom9, varRandom10, \ varRandom11, varRandom12) return scriptAssPadding3 # -- script 4 -- # elif rand == 4: scriptAssPadding4 = textwrap.dedent(""" {0} = '{3}' {1} = '{4}' {2} = '{5}' if {0} == {1}: {2} = '{5}' {2} = {0} else: {2} = '{5}' {2} = '{3}' """).format(varRandom1, varRandom2, varRandom3, varRandom4, \ varRandom5, varRandom6,) return scriptAssPadding4 # -- script 5 -- # elif rand == 5: scriptAssPadding5 = textwrap.dedent(""" {0} = '{6}' {1} = '{7}' {2} = '{8}' {3} = '{9}' {4} = '{10}' {5} = '{11}' if {2} == {3}: for {5} in {4}: if {5} == {3}: {4} = {0} else: {3} = {1} """).format(varRandom1, varRandom2, varRandom3, \ varRandom4, varRandom5, varRandom6, \ varRandom7, varRandom8, varRandom9, \ varRandom10, varRandom11, varRandom12) return scriptAssPadding5 # -- script 6 -- # elif rand == 6: scriptAssPadding6 = textwrap.dedent(""" {0} = '{4}' {1} = '{5}' {2} = '{6}' {3} = '{7}' if {1} == {0}: for {0} in {1}: if {1} == {1}: {2} = '{3}' elif {2} == {3}: {3} = {0} else: {0} = {1} elif {2} == {2}: for {2} in {1}: if {3} == {1}: {2} = '{3}' elif {2} == {3}: {3} = {0} else: {0} = {1} for {2} in {1}: if {3} == {1}: {2} = '{3}' elif {2} == {3}: {3} = {0} else: {0} = {3} else: {0} = {1} """).format(varRandom1, varRandom2, varRandom3, \ varRandom4, varRandom5, varRandom6, \ varRandom7, varRandom8) return scriptAssPadding6 # -- script 7 -- # elif rand == 7: scriptAssPadding7 = textwrap.dedent(""" try: {0} = '{7}' {1} = '{8}' {2} = '{9}' {3} = '{10}' {4} = '{11}' {5} = '{12}' {6} = [ '{7}', '{9}', '{11}', '{13}' ] for {0} in {5}: for {1} in {2}: if {3} == {4}: {1} = {0} elif {4} == {1}: {1} = {5} else: {4} = {5} for {1} in {6}: {2} = {1} except Exception: pass """).format(varRandom1, varRandom2, varRandom3, \ varRandom4, varRandom5, varRandom6, \ varRandom7, varRandom8, varRandom9, \ varRandom10, varRandom11, varRandom12, \ varRandom13, varRandom14) return scriptAssPadding7 def AddRandomScripts(self, outputArg, mixerLengthArg, mixerLevelArg, verboseArg): countScriptsAdded = 0 countLineAdded = 0 countLine = 0 checkLine = 0 checkQuotePassing = 0 checkCharPassing = 0 checkOtherCharPassing = 0 countRecursFiles = 0 addIndentScript = r".*\:{1}\s+$|.*\:{1}\s*$" quotesIntoVariable = r".*={1}\s*[\"|\']{3}" quotesEndMultipleLines = r"^\s*[\"|\']{3}\)?\.?" quotesInRegex = r"={1}\s*r{1}[\"|\']{3}" noAddScript = r"^\@|\s+\@|\s+return|\s*def\s+.+\s*\:{1}|^class\s+.+\s*\:{1}|.*[\{|\[|\(|\)|\]|\}|,|\\|^]\s*$|\s+yield.*|\s+raise.*" quoteIntoVariable = r".*\={1}\s*\w*\.?\w*[\(|\.]{1}[\']{3}|.*\={1}\s*\w*\.?\w*[\(|\.]{1}[\"\"\"]{3}|.*\={1}\s*[\"]{3}|.*\={1}\s*[\']{3}" recursFiles = self.utils.CheckFileDir( output=outputArg, detectFiles="py", blockDir="__pycache__", blockFile=False, dirOnly=False ) for number in recursFiles: countRecursFiles += 1 print("\n[+] Running add of random scripts in {0} file(s)...\n".format(countRecursFiles)) # -- Count the number of lines that will be checked before filling -- # with Bar("Setting up ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar: for file in recursFiles: with open(file , "r") as readFile: readF = readFile.readlines() for eachLine in readF: if not eachLine: continue countLine += 1 bar.next(1) bar.finish() # -- Padding scripts added -- # with Bar("Obfuscation ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar: for file in recursFiles: with fileinput.input(file, inplace=True) as inputFile: for eachLine in inputFile: sys.stdout.write(eachLine) if eachLine == "\n": continue else: spaces = len(eachLine) - len(eachLine.lstrip()) # -- Detect code into 3 quotes excepted comments -- # if re.match(quotesIntoVariable, eachLine): if re.match(quotesInRegex, eachLine): pass else: checkQuotePassing += 1 continue elif re.match(quotesEndMultipleLines, eachLine): if re.match(quotesInRegex, eachLine): pass else: checkQuotePassing += 1 if checkQuotePassing == 2: checkQuotePassing = 0 continue if checkQuotePassing == 1: continue elif checkQuotePassing == 2: checkQuotePassing = 0 pass else: pass # -- Check dict, list, tuple in multiple lines -- # if checkCharPassing == 1: if re.match(r".*[\"|\'|\)|\]|\}|\w]\s*$", eachLine): checkCharPassing = 0 continue else: continue elif checkOtherCharPassing >= 1: if re.match(r".*[\"|\'|\)|\]|\}|\w]\s*$", eachLine): checkOtherCharPassing -= 1 continue else: if re.match(r".*[\(|\{|\[]\s*$", eachLine): checkOtherCharPassing += 1 continue else: pass if re.match(noAddScript, eachLine): if re.match(r".*[\\|,]\s*$", eachLine): if checkCharPassing == 1: continue else: checkCharPassing = 1 continue elif re.match(r".*[\(|\{|\[]\s*$", eachLine): checkOtherCharPassing += 1 continue else: continue # -- Adding scripts -- # elif re.match(addIndentScript, eachLine): if spaces == 0: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space4) ) countScriptsAdded += 1 elif spaces == 4: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space8) ) countScriptsAdded += 1 elif spaces == 8: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space12) ) countScriptsAdded += 1 elif spaces == 12: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space16) ) countScriptsAdded += 1 elif spaces == 16: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space20) ) countScriptsAdded += 1 elif spaces == 20: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space24) ) countScriptsAdded += 1 elif spaces == 24: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space28) ) countScriptsAdded += 1 elif spaces == 28: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space32) ) countScriptsAdded += 1 elif spaces == 32: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space36) ) countScriptsAdded += 1 elif spaces == 36: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space40) ) countScriptsAdded += 1 elif spaces == 40: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space44) ) countScriptsAdded += 1 elif spaces == 44: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space48) ) countScriptsAdded += 1 elif spaces == 48: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space52) ) countScriptsAdded += 1 elif spaces == 52: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space56) ) countScriptsAdded += 1 elif spaces == 56: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space60) ) countScriptsAdded += 1 elif spaces == 60: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space64) ) countScriptsAdded += 1 else: continue else: if spaces == 0: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space0) ) countScriptsAdded += 1 elif spaces == 4: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space4) ) countScriptsAdded += 1 elif spaces == 8: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space8) ) countScriptsAdded += 1 elif spaces == 12: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space12) ) countScriptsAdded += 1 elif spaces == 16: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space16) ) countScriptsAdded += 1 elif spaces == 20: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space20) ) countScriptsAdded += 1 elif spaces == 24: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space24) ) countScriptsAdded += 1 elif spaces == 28: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space28) ) countScriptsAdded += 1 elif spaces == 32: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space32) ) countScriptsAdded += 1 elif spaces == 36: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space36) ) countScriptsAdded += 1 elif spaces == 40: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space40) ) countScriptsAdded += 1 elif spaces == 44: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space44) ) countScriptsAdded += 1 elif spaces == 48: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space48) ) countScriptsAdded += 1 elif spaces == 52: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space52) ) countScriptsAdded += 1 elif spaces == 56: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space56) ) countScriptsAdded += 1 elif spaces == 60: sys.stdout.write(textwrap.indent(Padding.ScriptsGenerator( self, mixerLengthArg=mixerLengthArg, mixerLevelArg=mixerLevelArg), self.space60) ) countScriptsAdded += 1 else: continue bar.next(1) bar.finish() # -- Check if padding has added in output script -- # with Bar("Check ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar: for file in recursFiles: with open(file , "r") as readFile: readF = readFile.readlines() for eachLine in readF: if not eachLine: continue checkLine += 1 bar.next(1) bar.finish() countLineAdded = checkLine - countLine if checkLine > countLine: print("\n-> {0} scripts added in {1} file(s)\n".format(countScriptsAdded, countRecursFiles)) print("-> {0} lines added in {1} file(s)\n".format(countLineAdded, countRecursFiles)) return EXIT_SUCCESS else: return EXIT_FAILURE def EmptyClasses(self, outputArg, mixerLengthArg, mixerLevelArg, verboseArg): countRecursFiles = 0 counterToCheckIndent = 0 numberLine = 0 numberLineInFile = 0 emptyClassInfo = {} emptyClassInfoCheck = {} detectClass = r"^class\s+\w+|\s+class\s+\w+" classDefined = r"class\s+(\w+)" recursFiles = self.utils.CheckFileDir( output=outputArg, detectFiles="py", blockDir="__pycache__", blockFile=False, dirOnly=False ) for number in recursFiles: countRecursFiles += 1 with Bar("Correction ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar: for file in recursFiles: numberLineInFile = 0 numberLine = 0 # -- Count all line(s) in file -- # with open(file, "r") as readFile: readF = readFile.readlines() for eachLine in readF: numberLineInFile += 1 # -- Find and put empty class(es) in dict -- # with open(file, "r") as readFile: readF = readFile.readlines() for eachLine in readF: numberLine += 1 if counterToCheckIndent == 1: spacesAfterClass = len(eachLine) - len(eachLine.lstrip()) counterToCheckIndent = 0 if spacesAfterClass == spacesClass: if search: emptyClassInfo[search.group(1)] = file numberLineInFile += 1 # Adding one line because padding will be added numberLine += 1 # Adding one line because padding will be added if re.match(detectClass, eachLine): spacesClass = len(eachLine) - len(eachLine.lstrip()) if numberLine == numberLineInFile: search = re.search(classDefined, eachLine) if search: emptyClassInfo[search.group(1)] = file else: counterToCheckIndent += 1 search = re.search(classDefined, eachLine) # -- Add padding in empty class(es) -- # numberLine = 0 with fileinput.input(file, inplace=True) as inputFile: for eachLine in inputFile: numberLine += 1 if counterToCheckIndent == 1: spacesAfterClass = len(eachLine) - len(eachLine.lstrip()) counterToCheckIndent = 0 if spacesAfterClass == spacesClass: paddingVar1 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) paddingVar2 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) finalVarPadding = "{0} = '{1}'\n".format(paddingVar1, paddingVar2) if spacesClass == 0: sys.stdout.write(textwrap.indent(finalVarPadding, self.space4)) elif spacesClass == 4: sys.stdout.write(textwrap.indent(finalVarPadding, self.space8)) elif spacesClass == 8: sys.stdout.write(textwrap.indent(finalVarPadding, self.space12)) numberLine += 1 sys.stdout.write(eachLine) if re.match(detectClass, eachLine): spacesClass = len(eachLine) - len(eachLine.lstrip()) if numberLine == numberLineInFile: paddingVar1 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) paddingVar2 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) finalVarPadding = "{0} = '{1}'\n".format(paddingVar1, paddingVar2) if spacesClass == 0: sys.stdout.write(textwrap.indent(finalVarPadding, self.space4)) elif spacesClass == 4: sys.stdout.write(textwrap.indent(finalVarPadding, self.space8)) elif spacesClass == 8: sys.stdout.write(textwrap.indent(finalVarPadding, self.space12)) else: counterToCheckIndent += 1 bar.next(1) bar.finish() # -- Check if class(es) is still empty -- # if emptyClassInfo != {}: with Bar("Check ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar: for file in recursFiles: numberLineInFile = 0 numberLine = 0 with open(file, "r") as readFile: readF = readFile.readlines() for eachLine in readF: numberLine += 1 if counterToCheckIndent == 1: spacesAfterClass = len(eachLine) - len(eachLine.lstrip()) counterToCheckIndent = 0 if spacesAfterClass == spacesClass: if search: emptyClassInfo[search.group(1)] = file numberLineInFile += 1 numberLine += 1 if re.match(detectClass, eachLine): spacesClass = len(eachLine) - len(eachLine.lstrip()) if numberLine == numberLineInFile: search = re.search(classDefined, eachLine) if search: emptyClassInfo[search.group(1)] = file else: counterToCheckIndent += 1 search = re.search(classDefined, eachLine) bar.next(1) bar.finish() if emptyClassInfoCheck == {}: for key, value in emptyClassInfo.items(): print("\n-> File : {0}".format(value)) print("-> Padding added in : {0} ( empty class )".format(key)) return EXIT_SUCCESS else: if verboseArg: print("\n[!] No padding added to empty class(es)... :\n") for key, value in emptyClassInfoCheck.items(): print("\n-> File : {0}".format(value)) print("-> Class : {0}".format(key)) return EXIT_FAILURE else: print("[!] No empty class found in {0}".format(outputArg)) return EXIT_SUCCESS def EmptyFunctions(self, outputArg, mixerLengthArg, mixerLevelArg, verboseArg): countRecursFiles = 0 counterToCheckIndent = 0 numberLine = 0 numberLineInFile = 0 emptyFuncInfo = {} emptyFuncInfoCheck = {} detectFunction = r"^def\s+\w+|\s+def\s\w+" functionDefined = r"def\s+(\w+)" recursFiles = self.utils.CheckFileDir( output=outputArg, detectFiles="py", blockDir="__pycache__", blockFile=False, dirOnly=False ) for number in recursFiles: countRecursFiles += 1 with Bar("Correction ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar: for file in recursFiles: numberLineInFile = 0 numberLine = 0 # -- Count all line(s) in file -- # with open(file, "r") as readFile: readF = readFile.readlines() for eachLine in readF: numberLineInFile += 1 # -- Find and put empty function(s) in dict -- # with open(file, "r") as readFile: readF = readFile.readlines() for eachLine in readF: numberLine += 1 if counterToCheckIndent == 1: spacesAfterFunc = len(eachLine) - len(eachLine.lstrip()) counterToCheckIndent = 0 if spacesAfterFunc == spacesFunc: if search: emptyFuncInfo[search.group(1)] = file numberLineInFile += 1 # Adding one line because padding will be added numberLine += 1 # Adding one line because padding will be added if re.match(detectFunction, eachLine): spacesFunc = len(eachLine) - len(eachLine.lstrip()) if numberLine == numberLineInFile: search = re.search(functionDefined, eachLine) if search: emptyFuncInfo[search.group(1)] = file else: counterToCheckIndent += 1 search = re.search(functionDefined, eachLine) # -- Add padding in empty function(s) -- # numberLine = 0 with fileinput.input(file, inplace=True) as inputFile: for eachLine in inputFile: numberLine += 1 if counterToCheckIndent == 1: spacesAfterFunc = len(eachLine) - len(eachLine.lstrip()) counterToCheckIndent = 0 if spacesAfterFunc == spacesFunc: paddingVar1 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) paddingVar2 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) finalVarPadding = "{0} = '{1}'\n".format(paddingVar1, paddingVar2) if spacesFunc == 0: sys.stdout.write(textwrap.indent(finalVarPadding, self.space4)) elif spacesFunc == 4: sys.stdout.write(textwrap.indent(finalVarPadding, self.space8)) elif spacesFunc == 8: sys.stdout.write(textwrap.indent(finalVarPadding, self.space12)) elif spacesFunc == 12: sys.stdout.write(textwrap.indent(finalVarPadding, self.space16)) elif spacesFunc == 16: sys.stdout.write(textwrap.indent(finalVarPadding, self.space20)) elif spacesFunc == 20: sys.stdout.write(textwrap.indent(finalVarPadding, self.space24)) numberLine += 1 sys.stdout.write(eachLine) if re.match(detectFunction, eachLine): spacesFunc = len(eachLine) - len(eachLine.lstrip()) if numberLine == numberLineInFile: paddingVar1 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) paddingVar2 = self.mixer.GetStringMixer( mixerLengthArgDefined=mixerLengthArg, mixerLevelArgDefined=mixerLevelArg ) finalVarPadding = "{0} = '{1}'\n".format(paddingVar1, paddingVar2) if spacesFunc == 0: sys.stdout.write(textwrap.indent(finalVarPadding, self.space4)) elif spacesFunc == 4: sys.stdout.write(textwrap.indent(finalVarPadding, self.space8)) elif spacesFunc == 8: sys.stdout.write(textwrap.indent(finalVarPadding, self.space12)) elif spacesFunc == 12: sys.stdout.write(textwrap.indent(finalVarPadding, self.space16)) elif spacesFunc == 16: sys.stdout.write(textwrap.indent(finalVarPadding, self.space20)) elif spacesFunc == 20: sys.stdout.write(textwrap.indent(finalVarPadding, self.space24)) else: counterToCheckIndent += 1 bar.next(1) bar.finish() # -- Check if function(s) is still empty -- # if emptyFuncInfo != {}: with Bar("Check ", fill="=", max=countRecursFiles, suffix="%(percent)d%%") as bar: for file in recursFiles: numberLineInFile = 0 numberLine = 0 with open(file, "r") as readFile: readF = readFile.readlines() for eachLine in readF: numberLine += 1 if counterToCheckIndent == 1: spacesAfterFunc = len(eachLine) - len(eachLine.lstrip()) counterToCheckIndent = 0 if spacesAfterFunc == spacesFunc: if search: emptyFuncInfoCheck[search.group(1)] = file numberLineInFile += 1 numberLine += 1 if re.match(detectFunction, eachLine): spacesFunc = len(eachLine) - len(eachLine.lstrip()) if numberLine == numberLineInFile: search = re.search(functionDefined, eachLine) if search: emptyFuncInfoCheck[search.group(1)] = file else: counterToCheckIndent += 1 search = re.search(functionDefined, eachLine) bar.next(1) bar.finish() if emptyFuncInfoCheck == {}: for key, value in emptyFuncInfo.items(): print("\n-> File : {0}".format(value)) print("-> Padding added in : {0} ( empty function )".format(key)) return EXIT_SUCCESS else: if verboseArg: print("\n[!] No padding added to empty function(s)... :\n") for key, value in emptyFuncInfoCheck.items(): print("\n-> File : {0}".format(value)) print("-> Function : {0}".format(key)) return EXIT_FAILURE else: print("[!] No empty function found in {0}".format(outputArg)) return EXIT_SUCCESS
StarcoderdataPython
353479
<reponame>dczifra/lightly import unittest import torch from lightly.models.modules.heads import BarlowTwinsProjectionHead from lightly.models.modules.heads import BYOLProjectionHead from lightly.models.modules.heads import DINOProjectionHead from lightly.models.modules.heads import MoCoProjectionHead from lightly.models.modules.heads import NNCLRProjectionHead from lightly.models.modules.heads import NNCLRPredictionHead from lightly.models.modules.heads import SimCLRProjectionHead from lightly.models.modules.heads import SimSiamProjectionHead from lightly.models.modules.heads import SimSiamPredictionHead from lightly.models.modules.heads import SwaVProjectionHead from lightly.models.modules.heads import SwaVPrototypes class TestProjectionHeads(unittest.TestCase): def setUp(self): self.n_features = [ (8, 16, 32), (8, 32, 16), (16, 8, 32), (16, 32, 8), (32, 8, 16), (32, 16, 8), ] self.heads = [ BarlowTwinsProjectionHead, BYOLProjectionHead, MoCoProjectionHead, NNCLRProjectionHead, NNCLRPredictionHead, SimCLRProjectionHead, SimSiamProjectionHead, SimSiamPredictionHead, SwaVProjectionHead, DINOProjectionHead, ] def test_single_projection_head(self, device: str = 'cpu', seed=0): for head_cls in self.heads: for in_features, hidden_features, out_features in self.n_features: torch.manual_seed(seed) if head_cls == DINOProjectionHead: bottleneck_features = hidden_features head = head_cls(in_features, hidden_features, bottleneck_features, out_features) else: head = head_cls(in_features, hidden_features, out_features) head = head.eval() head = head.to(device) for batch_size in [1, 2]: msg = f'head: {head_cls}' + \ f'd_in, d_h, d_out = ' + \ f'{in_features}x{hidden_features}x{out_features}' with self.subTest(msg=msg): x = torch.torch.rand((batch_size, in_features)).to(device) with torch.no_grad(): y = head(x) self.assertEqual(y.shape[0], batch_size) self.assertEqual(y.shape[1], out_features) @unittest.skipUnless(torch.cuda.is_available(), "skip") def test_single_projection_head_cuda(self, seed=0): self.test_single_projection_head(device='cuda', seed=seed) def test_swav_prototypes(self, device: str = 'cpu', seed=0): for in_features, _, n_prototypes in self.n_features: torch.manual_seed(seed) prototypes = SwaVPrototypes(in_features, n_prototypes) prototypes = prototypes.eval() prototypes = prototypes.to(device) for batch_size in [1, 2]: msg = 'prototypes d_in, n_prototypes = ' +\ f'{in_features} x {n_prototypes}' with self.subTest(msg=msg): x = torch.torch.rand((batch_size, in_features)).to(device) with torch.no_grad(): y = prototypes(x) self.assertEqual(y.shape[0], batch_size) self.assertEqual(y.shape[1], n_prototypes) @unittest.skipUnless(torch.cuda.is_available(), "skip") def test_swav_prototypes_cuda(self, seed=0): self.test_swav_prototypes(device='cuda', seed=seed) def test_dino_projection_head(self, device="cpu", seed=0): input_dim, hidden_dim, output_dim = self.n_features[0] for bottleneck_dim in [8, 16, 32]: for batch_norm in [False, True]: torch.manual_seed(seed) head = DINOProjectionHead( input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, bottleneck_dim=bottleneck_dim, batch_norm=batch_norm, ) head = head.eval() head = head.to(device) for batch_size in [1, 2]: msg = ( f"bottleneck_dim={bottleneck_dim}, " f"batch_norm={batch_norm}" ) with self.subTest(msg=msg): x = torch.torch.rand((batch_size, input_dim)).to(device) with torch.no_grad(): y = head(x) self.assertEqual(y.shape[0], batch_size) self.assertEqual(y.shape[1], output_dim) @unittest.skipUnless(torch.cuda.is_available(), "skip") def test_dino_projection_head_cuda(self, seed=0): self.test_dino_projection_head(device="cuda", seed=seed)
StarcoderdataPython
6506483
<filename>FastSimulation/TrajectoryManager/python/ActivateDecays_cfi.py import FWCore.ParameterSet.Config as cms ActivateDecaysBlock = cms.PSet( ActivateDecays = cms.PSet( ActivateDecays = cms.bool(True), # Maximum angle to associate a charged daughter to a charged mother # Mostly done to associate muons to decaying pions DistCut = cms.double(0.02) ) )
StarcoderdataPython
3269370
import json import time from unittest.mock import patch from requests import Response from raindropio import * def test_refresh() -> None: api = API( { "access_token": "old", "refresh_token": "<PASSWORD>", "expires_at": time.time() - 100000, } ) with patch("requests.Session.request") as m: resp = Response() resp.status_code = 200 updated = {"access_token": "updated", "expires_at": time.time() + 100000} resp._content = json.dumps(updated).encode() m.return_value = resp api.get("https://localhost", {}) refresh, local = m.call_args_list assert refresh[0] == ("POST", "https://raindrop.io/oauth/access_token") assert local[0] == ("GET", "https://localhost") assert isinstance(api.token, dict) assert api.token["access_token"] == "updated"
StarcoderdataPython
9601806
<reponame>xcffl/valacef<filename>genvalacef.py import os import sys from valacefgen.cparser import Parser, Naming from valacefgen.types import Repository, Function from valacefgen.utils import TypeInfo try: CEF_INCLUDE_DIR = sys.argv[1] except IndexError: CEF_INCLUDE_DIR = "/app/include/cef/include" try: TOP = sys.argv[2] or '.' except IndexError: TOP = '.' try: OUT = sys.argv[3] or '.' except IndexError: OUT = 'build' header_files = [ ('%s/overrides/cef_primitives.h' % TOP, 'capi/cef_base_capi.h'), ('%s/overrides/cef_base.h' % TOP, 'capi/cef_base_capi.h'), ('%s/overrides/cef_string.h' % TOP, 'capi/cef_base_capi.h'), ('%s/overrides/cef_error_codes.h' % TOP, 'internal/cef_types.h'), 'internal/cef_types_linux.h', 'internal/cef_types.h', 'internal/cef_string_list.h', 'capi/cef_app_capi.h', 'capi/cef_base_capi.h', 'internal/cef_time.h', 'capi/cef_audio_handler_capi.h', 'capi/cef_command_line_capi.h', 'capi/cef_browser_process_handler_capi.h', 'capi/cef_render_process_handler_capi.h', 'capi/cef_resource_bundle_handler_capi.h', 'capi/cef_resource_handler_capi.h', 'capi/cef_resource_request_handler_capi.h', 'capi/cef_request_callback_capi.h', 'capi/cef_urlrequest_capi.h', 'capi/cef_scheme_capi.h', 'capi/cef_request_capi.h', 'capi/cef_browser_capi.h', 'capi/cef_path_util_capi.h', 'capi/cef_client_capi.h', 'capi/cef_dialog_handler_capi.h', 'capi/cef_keyboard_handler_capi.h', 'capi/cef_process_message_capi.h', 'capi/cef_life_span_handler_capi.h', 'capi/cef_load_handler_capi.h', 'capi/cef_drag_handler_capi.h', 'capi/cef_focus_handler_capi.h', 'capi/cef_context_menu_handler_capi.h', 'capi/cef_render_handler_capi.h', 'capi/cef_jsdialog_handler_capi.h', 'capi/cef_request_handler_capi.h', 'capi/cef_download_handler_capi.h', 'capi/cef_find_handler_capi.h', 'capi/cef_display_handler_capi.h', 'capi/cef_frame_capi.h', 'capi/cef_menu_model_capi.h', 'capi/cef_menu_model_delegate_capi.h', 'capi/cef_download_item_capi.h', 'capi/cef_drag_data_capi.h', 'capi/cef_image_capi.h', 'capi/cef_string_visitor_capi.h', 'capi/cef_dom_capi.h', 'capi/cef_v8_capi.h', 'capi/cef_stream_capi.h', 'capi/cef_values_capi.h', 'capi/cef_accessibility_handler_capi.h', 'capi/cef_response_capi.h', 'capi/cef_task_capi.h', 'capi/cef_response_filter_capi.h', 'capi/cef_ssl_info_capi.h', 'capi/cef_auth_callback_capi.h', 'capi/cef_x509_certificate_capi.h', 'capi/cef_request_context_capi.h', 'capi/cef_request_context_handler_capi.h', 'capi/cef_cookie_capi.h', 'capi/cef_web_plugin_capi.h', 'capi/cef_callback_capi.h', 'capi/cef_print_handler_capi.h', 'capi/cef_print_settings_capi.h', 'capi/cef_navigation_entry_capi.h', 'capi/cef_ssl_status_capi.h', 'capi/cef_extension_capi.h', 'capi/cef_extension_handler_capi.h', ] ignore = { 'XEvent', 'XDisplay', 'cef_get_xdisplay', 'TID_FILE', } base_structs = { "cef_base_scoped_t", } base_classes = { "cef_base_ref_counted_t", } class Overrides: def param__cef_string_utf8_to_utf16__src(self, info: TypeInfo): info.c_type = 'string' def param__cef_string_utf8_to_utf16__output(self, info: TypeInfo): info.ref = True def param__cef_string_utf16_to_utf8_output(self, info: TypeInfo): info.ref = True parser = Parser(Naming('Cef'), Repository('Cef', Overrides()), ignore, base_structs, base_classes) for entry in header_files: if isinstance(entry, str): c_include_path = entry path = os.path.join(CEF_INCLUDE_DIR, entry) else: path, c_include_path = entry parser.parse_header(path, c_include_path) repo = parser.repo ref_func = Function('cef_base_ref_counted_ref', 'ref', "valacef_api.h") unref_func = Function('cef_base_ref_counted_unref', 'unref', "valacef_api.h") base_refcounted = repo.structs['cef_base_ref_counted_t'] base_refcounted.add_method(ref_func) base_refcounted.add_method(unref_func) base_refcounted.set_ref_counting(ref_func.c_name, unref_func.c_name) ref_func = Function('cef_base_ref_counted_ref', 'ref', "capi/cef_base_capi.h", params=[("cef_base_ref_counted_t*", "self")], body=['self->add_ref(self);', 'return self;'], ret_type="cef_base_ref_counted_t*") unref_func = Function('cef_base_ref_counted_unref', 'unref', "capi/cef_base_capi.h", params=[("cef_base_ref_counted_t*", "self")], body=['self->release(self);']) parser.add_c_glue(ref_func, unref_func) add_ref_func = Function( 'cef_base_ref_counted_add_ref', 'base_ref_counted_add_ref', 'capi/cef_base_capi.h;stdio.h', params=[('void*', 'self_ptr')], body=[ 'cef_base_ref_counted_t* self = (cef_base_ref_counted_t*) self_ptr;', 'char* pointer = (char*) self + (self->size - (sizeof(int) > sizeof(void*) ? sizeof(int) : sizeof(void*)));', 'volatile int* ref_count = (volatile int*) pointer;', '// printf("%p++ (%d) size: %d\\n", self, *ref_count + 1, (int) self->size);', 'g_atomic_int_inc(ref_count);', ]) release_ref_func = Function( 'cef_base_ref_counted_release_ref', 'base_ref_counted_release_ref', 'stdlib.h;capi/cef_base_capi.h;stdio.h', 'int', params=[('void*', 'self_ptr')], body=[ 'gboolean is_dead = FALSE;' 'cef_base_ref_counted_t* self = (cef_base_ref_counted_t*) self_ptr;', 'char* pointer = (char*) self + (self->size - (sizeof(int) > sizeof(void*) ? sizeof(int) : sizeof(void*)));', 'volatile int* ref_count = (volatile int*) pointer;', '// printf("%p-- (%d) size: %d\\n", self, *ref_count - 1, (int) self->size);', 'is_dead = g_atomic_int_dec_and_test(ref_count);', 'if (is_dead) {', ' // printf("%p dealloc!\\n", self);', ' GData** priv_data = (GData**)(pointer - sizeof(void*));', ' g_datalist_clear(priv_data);', ' free(self_ptr);', '}', 'return is_dead;' ]) has_one_ref_func = Function( 'cef_base_ref_counted_has_one_ref', 'base_ref_counted_has_one_ref', 'capi/cef_base_capi.h;stdio.h', 'int', params=[('void*', 'self_ptr')], body=[ 'cef_base_ref_counted_t* self = (cef_base_ref_counted_t*) self_ptr;', 'char* pointer = (char*) self + (self->size - (sizeof(int) > sizeof(void*) ? sizeof(int) : sizeof(void*)));', 'volatile int* ref_count = (volatile int*) pointer;', '// printf("%p?? %d size: %d\\n", self, *ref_count, (int) self->size);', 'return g_atomic_int_get(ref_count) == 1;', ]) init_refcounting_func = Function( 'cef_base_ref_counted_init_ref_counting', 'init_refcounting', 'capi/cef_base_capi.h;stdio.h', params=[('void*', 'self_ptr'), ('size_t', 'base_size'), ('size_t', 'derived_size')], body=[ 'cef_base_ref_counted_t* self = (cef_base_ref_counted_t*) self_ptr;', 'self->size = derived_size;', 'self->add_ref = %s;' % add_ref_func.c_name, 'self->release = %s;' % release_ref_func.c_name, 'self->has_one_ref = %s;' % has_one_ref_func.c_name, 'g_assert(base_size + (sizeof(int) > sizeof(void*) ? sizeof(int) : sizeof(void*)) + sizeof(void*) == ' 'derived_size);', 'char* pointer = (char*) self + (self->size - (sizeof(int) > sizeof(void*) ? sizeof(int) : sizeof(void*)));', 'volatile int* ref_count = (volatile int*) pointer;', 'g_atomic_int_set(ref_count, 1);', '// printf("%p=%d size: %d\\n", self, *ref_count, (int) self->size);', ]) parser.add_c_glue(add_ref_func, release_ref_func, has_one_ref_func, init_refcounting_func) utf16_to_utf8_func = Function( 'cef_utf16_string_to_vala_string', 'get_string', 'capi/cef_base_capi.h;stdio.h', 'char*', params=[('cef_string_t*', 'utf16_str')], body=[ 'if (utf16_str == NULL) return NULL;', 'cef_string_utf8_t utf8_str = {};', 'cef_string_utf16_to_utf8(utf16_str->str, utf16_str->length, &utf8_str);', 'return utf8_str.str;', ]) parser.add_c_glue(utf16_to_utf8_func) utf16_to_utf8_func = Function( 'cef_utf16_string_to_vala_string', 'get_string', 'valacef_api.h', 'char*', params=[('cef_string_t*', 'utf16_str')]) repo.add_function(utf16_to_utf8_func) utf8_to_utf16_func = Function( 'cef_utf16_string_from_vala_string', 'set_string', 'string.h;capi/cef_base_capi.h;stdio.h', params=[('cef_string_t*', 'utf16_str'), ('char*', 'str')], body=[ 'cef_string_utf8_to_utf16(str, strlen(str), utf16_str);', ]) parser.add_c_glue(utf8_to_utf16_func) utf8_to_utf16_func = Function( 'cef_utf16_string_from_vala_string', 'set_string', 'valacef_api.h', params=[('cef_string_t*', 'utf16_str'), ('char*', 'str')]) repo.add_function(utf8_to_utf16_func) vapi, vala, c_header, c_glue = parser.finish() os.makedirs(OUT, exist_ok=True) with open(OUT + "/valacef_api.vapi", "wt") as f: f.write(vapi) with open(OUT + "/cef.vala", "wt") as f: f.write(vala) with open(OUT + "/valacef_api.c", "wt") as f: f.write(c_glue) with open(OUT + "/valacef_api.h", "wt") as f: f.write(c_header)
StarcoderdataPython
3546740
import os import tifffile as tif import argparse import glob def parse_args(): """Parse input arguments""" parser = argparse.ArgumentParser(description='Load CIDRE-processed images into structured folder.') parser.add_argument( '--in_path', dest='in_path', required=True, help='Processed images by Cidre') parser.add_argument( '--out_path', dest='out_path', required=True, help='Folder of all samples') return parser.parse_args() def split(word): return [char for char in word] def main(): args = parse_args() in_path = args.in_path out_path = args.out_path print ('in path: ' + in_path) print ('out path: ' + out_path) included_extensions = ['tif'] filenames = [fn for fn in os.listdir(in_path) if any(fn.endswith(ext) for ext in included_extensions)] for _f in filenames: print (_f) if not os.path.exists(out_path + '/' + _f.split('|')[0] + '/' + _f.split('|')[1] + '/processed/cut/fluor_cidre'): os.mkdir(out_path + '/' + _f.split('|')[0] + '/' + _f.split('|')[1] + '/processed/cut/fluor_cidre') tif.imsave(out_path + '/' + _f.split('|')[0] + '/' + _f.split('|')[1] + '/processed/cut/fluor_cidre/' + '|'.join(_f.split('|')[2:]),tif.imread(in_path + '/' + _f)) if __name__ == '__main__': main()
StarcoderdataPython
1626189
"""Calculator Locator Class""" # Created by <NAME>. # GitHub: https://github.com/ikostan # LinkedIn: https://www.linkedin.com/in/egor-kostan/ from appium.webdriver.common.mobileby import MobileBy class CalculatorPageLocator: """ Contains page locators for Calculator App Each locator is a tuple: locator method + locator """ DIGITS = { 0: (MobileBy.ID, 'com.android.calculator2:id/digit_0'), 1: (MobileBy.ID, 'com.android.calculator2:id/digit_1'), 2: (MobileBy.ID, 'com.android.calculator2:id/digit_2'), 3: (MobileBy.ID, 'com.android.calculator2:id/digit_3'), 4: (MobileBy.ID, 'com.android.calculator2:id/digit_4'), 5: (MobileBy.ID, 'com.android.calculator2:id/digit_5'), 6: (MobileBy.ID, 'com.android.calculator2:id/digit_6'), 7: (MobileBy.ID, 'com.android.calculator2:id/digit_7'), 8: (MobileBy.ID, 'com.android.calculator2:id/digit_8'), 9: (MobileBy.ID, 'com.android.calculator2:id/digit_9'), } POINT_BTN = (MobileBy.ID, 'com.android.calculator2:id/dec_point') PLUS_BTN = (MobileBy.ID, 'com.android.calculator2:id/op_add') MINUS_BTN = (MobileBy.ID, 'com.android.calculator2:id/op_sub') MULTIPLICATION_BTN = (MobileBy.ID, 'com.android.calculator2:id/op_mul') DIVISION_BTN = (MobileBy.ID, 'com.android.calculator2:id/op_div') DEL_BTN = (MobileBy.ID, 'com.android.calculator2:id/del') CLEAR_BTN = (MobileBy.ID, 'com.android.calculator2:id/clr') EQUAL_BTN = (MobileBy.ID, 'com.android.calculator2:id/eq') DISPLAY = (MobileBy.ID, 'com.android.calculator2:id/display') SCREEN_FORMULA = (MobileBy.ID, 'com.android.calculator2:id/formula') SCREEN_RESULT = (MobileBy.ID, 'com.android.calculator2:id/result') MORE_OPTIONS = (MobileBy.XPATH, '//android.widget.ImageButton[@content-desc="More options"]') OPEN_SOURCE_LICENSE = (MobileBy.ID, 'android:id/title')
StarcoderdataPython
5180949
#!/usr/bin/env python ''' Generates an AST_L1T-SO2 product ''' from __future__ import print_function import os import json import urllib3 import dateutil.parser import requests import numpy as np from hysds.celery import app import run_ratio urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) PROD_SHORT_NAME = 'AST_L1T-SO' VERSION = "v1.0" # determined globals PROD = "{}-{}-{}" # eg: AST_L1T-20190514T341405_20190514T341435-v1.0 INPUT_TYPE = 'AST_L1T' INDEX = 'grq_{}_{}'.format(VERSION, PROD_SHORT_NAME) def main(): '''Generates the ratio product if is not on GRQ''' # load parameters ctx = load_context() metadata = ctx.get("prod_metadata", False) prod_type = ctx.get("prod_type", False) input_prod_id = ctx.get("prod_id", False) if not prod_type == INPUT_TYPE: raise Exception("input needs to be {}. Input is of type: {}".format(INPUT_TYPE, prod_type)) starttime = ctx.get("starttime", False) endtime = ctx.get("endtime", False) location = ctx.get("location", False) #ingest the product generate_product(input_prod_id, starttime, endtime, location, metadata) def generate_product(input_prod_id, starttime, endtime, location, metadata): '''determines if the product has been generated. if not, generates the product''' # generate product id prod_id = gen_prod_id(starttime, endtime) #get the input product path input_product_path = False for afile in os.listdir(input_prod_id): if afile.endswith('hdf') or afile.endswith('HDF'): input_product_path = os.path.join(input_prod_id, afile) if not input_product_path: raise Exception('unable to find input hdf file in dir: {}'.format(input_prod_id)) # determine if product exists on grq if exists(prod_id): print('product with id: {} already exists. Exiting.'.format(prod_id)) return # make product dir if not os.path.exists(prod_id): os.mkdir(prod_id) output_filename = '{}.tif'.format(prod_id) output_product_path = os.path.join(prod_id, output_filename) print('attempting to generate product: {}'.format(output_filename)) # run product generation array = run_ratio.main(input_product_path, output_product_path) if not os.path.exists(output_product_path): raise Exception('Failed generating product') dst, met = gen_jsons(prod_id, starttime, endtime, location, metadata) met['max_val'] = np.ma.max(array) met['90_percentile'] = np.percentile(array, 90) # save the metadata fo;es save_product_met(prod_id, dst, met) # generate browse generate_browse(output_product_path, prod_id) def gen_prod_id(starttime, endtime): '''generates the product id from the input metadata & params''' start = dateutil.parser.parse(starttime).strftime('%Y%m%dT%H%M%S') end = dateutil.parser.parse(endtime).strftime('%Y%m%dT%H%M%S') time_str = '{}_{}'.format(start, end) return PROD.format(PROD_SHORT_NAME, time_str, VERSION) def exists(uid): '''queries grq to see if the input id exists. Returns True if it does, False if not''' grq_ip = app.conf['GRQ_ES_URL']#.replace(':9200', '').replace('http://', 'https://') grq_url = '{0}/{1}/_search'.format(grq_ip, INDEX) es_query = {"query": {"bool": {"must": [{"term": {"id.raw": uid}}]}}, "from": 0, "size": 1} return query_es(grq_url, es_query) def query_es(grq_url, es_query): '''simple single elasticsearch query, used for existence. returns count of result.''' print('querying: {} with {}'.format(grq_url, es_query)) response = requests.post(grq_url, data=json.dumps(es_query), verify=False) try: response.raise_for_status() except: # if there is an error (or 404,just publish return 0 results = json.loads(response.text, encoding='ascii') #results_list = results.get('hits', {}).get('hits', []) total_count = results.get('hits', {}).get('total', 0) return int(total_count) def generate_browse(product_path, prod_id): '''generates a browse from an input product path''' browse_path = os.path.join(prod_id, '{}.browse.png'.format(prod_id)) browse_small_path = os.path.join(prod_id, '{}.browse_small.png'.format(prod_id)) if os.path.exists(browse_path): return #conver to png os.system("convert {} -transparent black {}".format(product_path, browse_path)) #convert to small png os.system("convert {} -transparent black -resize 300x300 {}".format(product_path, browse_small_path)) def gen_jsons(prod_id, starttime, endtime, location, metadata): '''generates ds and met json blobs''' ds = {"label": prod_id, "starttime": starttime, "endtime": endtime, "location": location, "version": VERSION} met = metadata return ds, met def save_product_met(prod_id, ds_obj, met_obj): '''generates the appropriate product json files in the product directory''' if not os.path.exists(prod_id): os.mkdir(prod_id) outpath = os.path.join(prod_id, '{}.dataset.json'.format(prod_id)) with open(outpath, 'w') as outf: json.dump(ds_obj, outf) outpath = os.path.join(prod_id, '{}.met.json'.format(prod_id)) with open(outpath, 'w') as outf: json.dump(met_obj, outf) def load_context(): '''loads the context file into a dict''' try: context_file = '_context.json' with open(context_file, 'r') as fin: context = json.load(fin) return context except: raise Exception('unable to parse _context.json from work directory') if __name__ == '__main__': main()
StarcoderdataPython
11389665
""" :codeauthor: <NAME> (<EMAIL>) salt.config.schemas ~~~~~~~~~~~~~~~~~~~ Salt configuration related schemas for future validation """
StarcoderdataPython
3432356
# -*- coding: utf-8 -*- # Copyright 2017-TODAY LasLabs Inc. # License MIT (https://opensource.org/licenses/MIT). import mock import os import unittest from properties import HasProperties as BaseModel from ..daily_med import DailyMed from ..models import SPL mock_path = 'daily_med.daily_med' class TestDailyMed(unittest.TestCase): def setUp(self): self.dm = DailyMed() def get_sample_xml(self): xml_path = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'spl_doc_1.xml', ) with open(xml_path, 'r') as fh: return fh.read() @mock.patch('%s.RequestPaginator' % mock_path) def test_call(self, paginator): """ It should create a RequestPaginator w/ the proper args. """ self.dm.API_BASE = 'base' params = {'params': 1} self.dm.call('endpoint', BaseModel, params) paginator.assert_called_once_with( 'base/endpoint.json', params, output_type=BaseModel, ) @mock.patch('%s.RequestPaginator' % mock_path) def test_call_return(self, paginator): """ It should return the paginator. """ res = self.dm.call('endpoint', BaseModel) self.assertEqual(res, paginator()) @mock.patch('%s.requests' % mock_path) def test_get_spl_request(self, requests): """ It should request the proper URI. """ text_mock = mock.MagicMock() text_mock.text = self.get_sample_xml() requests.get.return_value = text_mock self.dm.API_BASE = 'base' self.dm.get_spl('set_id') requests.get.assert_called_once_with( url='base/spls/set_id.xml', ) @mock.patch('%s.requests' % mock_path) def test_get_spl_return(self, requests): """ It should return an SPLDocument """ text_mock = mock.MagicMock() text_mock.text = self.get_sample_xml() requests.get.return_value = text_mock res = self.dm.get_spl('set_id') self.assertIsInstance(res, SPL) def test_get_spls(self): """ It should make the proper call and return it. """ with mock.patch.object(self.dm, 'call') as call: kwargs = {'kwargs': 'sZdsdfd'} res = self.dm.get_spls(**kwargs) call.assert_called_once_with( 'spls', SPL, kwargs, ) self.assertEqual(res, call())
StarcoderdataPython
373428
import numpy as np from read_img import read_img from time import time import sys def get_outputs(img_name = 'wordle.png'): ''' Get outputs for the input image :param: img_name :type: str ''' t1 = time() COLORS = [np.array([[120, 124, 126]]), \ np.array([[201, 182, 95]]), \ np.array([[106, 172, 105]])] gray, yellow, green = set(), set(), set() poss = [gray, yellow, green] poss = read_img(poss, COLORS, img_name) print (poss) res, res_full = [], [] # check commonly used words with open('words_five.txt', 'rt') as f: for line in f: judge = True # check gray for c, idx in gray: if line[idx] == c: judge = False break if judge: # check green for c, idx in green: if line[idx] != c: judge = False break if judge: # check yellow for c, idx in yellow: if c not in line: judge = False break elif line[idx] == c: judge = False break if judge: res.append(line[: -1]) t2 = time() # check all words if there are not enough words produced by reading the most frequent word list if len(res) <= 10: with open('words_five_full.txt', 'rt') as f: for line in f: judge = True # check gray for c, idx in gray: if line[idx] == c: judge = False break if judge: # check green for c, idx in green: if line[idx] != c: judge = False break if judge: # check yellow for c, idx in yellow: if c not in line: judge = False break elif line[idx] == c: judge = False break if judge: res_full.append(line[: -1]) t3 = time() # print out results if len(res) > 10: print ('Some possible commonly used words are ' + ', '.join(res)) print ('Got these words for you in just %.2f seconds' % (t2 - t1)) elif len(res_full) < 1: print ('No words found based on your input!') elif len(res_full) == 1: print ('It got to be {}!'.format(res_full[0])) print ('Got this unique word for you in just %.2f seconds' % (t3 - t1)) else: print ('Some possible words are ' + ', '.join(res_full)) print ('Got these words for you in just %.2f seconds' % (t3 - t1)) if __name__ == '__main__': if len(sys.argv) > 1: get_outputs(sys.argv[1]) else: get_outputs()
StarcoderdataPython
5160591
from time import time import unicodedata import datetime def strip_accents(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn') def format_username(firstname, lastname): return strip_accents("{}.{}.external".format( ''.join(e for e in firstname if e.isalnum()), ''.join(e for e in lastname if e.isalnum()) ).lower()) def format_message(message): return { "created_at": message.get("created_at"), "ts_str": datetime.datetime.utcfromtimestamp( int(float(message.get("created_at"))) ).strftime('%d-%m-%Y %H:%M:%S'), "diff_to_today": int(int(time() - int(float(message.get("created_at")))) / 60 / 60 / 24), "name": message.get("name"), "text": message.get("text") } def variable_to_question(string): return "{} ?".format(string).replace("_", " ")
StarcoderdataPython
5065620
<reponame>lastweek/source-freebsd<filename>src/tests/sys/netinet6/scapyi386.py<gh_stars>0 #!/usr/bin/env python #- # SPDX-License-Identifier: BSD-2-Clause # # Copyright (c) 2019 Netflix, Inc. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # # $FreeBSD$ # import argparse import scapy.all as sp import socket import sys def main(): parser = argparse.ArgumentParser("scapyi386.py", description="IPv6 Ethernet Dest MAC test") parser.add_argument('--sendif', nargs=1, required=True, help='The interface through which the packet will be sent') parser.add_argument('--recvif', nargs=1, required=True, help='The interface on which to check for the packet') parser.add_argument('--src', nargs=1, required=True, help='The source IP address') parser.add_argument('--to', nargs=1, required=True, help='The destination IP address') parser.add_argument('--debug', required=False, action='store_true', help='Enable test debugging') args = parser.parse_args() ######################################################################## # # A test case to check that IPv6 packets are sent with a proper # (resolved) Ethernet Destination MAC address instead of the BCAST one. # This was needed as test cases did not work properly on i386 due to a # scapy BPF parsing bug. (See PR 239380 and duplicates). # bcmac = sp.Ether(dst="ff:ff:ff:ff:ff:ff").dst data = "6" * 88 pkt = sp.Ether() / \ sp.IPv6(src=args.src[0], dst=args.to[0]) / \ sp.UDP(dport=3456, sport=6543) / \ data sp.sendp(pkt, iface=args.sendif[0], verbose=False) eth = pkt.getlayer(sp.Ether) if eth is None: print("No Ether in packet") pkt.display() sys.exit(1) if eth.dst == bcmac: print("Broadcast dMAC on packet") eth.display() sys.exit(1) sys.exit(0) if __name__ == '__main__': main()
StarcoderdataPython
3297369
<filename>SnakeNest/scripts/Common_unitigs.py<gh_stars>10-100 #!/usr/bin/env python # FIXME this one needs refactoring and factoring out the hardcoded paths # FIXME normalize names and spaces from __future__ import print_function import re import sys import glob import argparse from os.path import basename, join, dirname from collections import defaultdict def get_overlaping_bins(mags, dict_cogbin_unitigs, cog_threshold, overlap_threshold): set_bins_global = {key for dict_ in dict_cogbin_unitigs.values() for key in dict_} dict_bins_common_cogs = defaultdict(list) for Cog, dict__bin_unitig in dict_cogbin_unitigs.items(): for index, (bin1, set1) in enumerate(list(dict__bin_unitig.items())[:-1]): for bin2, set2 in list(dict__bin_unitig.items())[index+1:]: if set1 & set2: if max(len(set1 & set2)/float(len(set1)), len(set1 & set2)/float(len(set2))) >= overlap_threshold: # if bin1 in mags and bin2 in mags: dict_bins_common_cogs[tuple(sorted([bin1, bin2]))].append(Cog) # Summarize for each bin how many cogs are shared dict_bin_cogs = defaultdict(set) for (bin1, bin2), list_cogs in dict_bins_common_cogs.items(): dict_bin_cogs[bin1] |= set(list_cogs) dict_bin_cogs[bin2] |= set(list_cogs) # So which bins should be merged and which should just be flagged candidate_to_merge = {} dict_to_flag = {} for bins in set_bins_global: list_cog = dict_bin_cogs[bins] if len(list_cog) < cog_threshold: dict_to_flag[bins] = list_cog if len(list_cog) >= cog_threshold: candidate_to_merge[bins] = list_cog # list bins to merge list_sets_tomerge = [] for (bin1, bin2) in dict_bins_common_cogs.keys(): if bin1 in candidate_to_merge and bin2 in candidate_to_merge: if bin1 in mags and bin2 in mags: list_sets_tomerge.append({bin1, bin2}) # take into accounts bins with too many shared COGs but not going to be merged bins_going_to_merge={bins for set_bin in list_sets_tomerge for bins in set_bin} for bins in candidate_to_merge.keys(): if bins not in bins_going_to_merge: dict_to_flag[bins] = dict_bin_cogs[bins] # deal with the possibility that more than 2 bins must be merged together. Len = 0 while len(list_sets_tomerge) != Len: new_list_sets_tomerge = [] Len = len(list_sets_tomerge) for set_bins in list_sets_tomerge: intersect = 0 for index, element in enumerate(new_list_sets_tomerge): if set_bins & element: intersect = 1 new_list_sets_tomerge[index] |= set_bins if not intersect: new_list_sets_tomerge.append(set_bins) list_sets_tomerge = new_list_sets_tomerge # name bins from bin_to_merge, in a way that names don't collide if get_overlaping_bins is run multiple time dict_merge_bins={} for list_bins in list_sets_tomerge: index=1 new_name="Bin_merged_"+str(index) while new_name in set_bins_global: index+=1 new_name="Bin_merged_"+str(index) set_bins_global.add(new_name) dict_merge_bins[new_name] = list_bins return dict_to_flag, dict_merge_bins def update_cogbin_unitigs(dict_merge_bins,dict_cogbin_unitigs): # remove merged bins Set_bins_merged={bin for Tuple in dict_merge_bins.values() for bin in Tuple} dict_cogbin_unitigs_merged = {cog: {bin_: unitigs for bin_, unitigs in dict_bin_unitig.items() if bin_ not in Set_bins_merged} for cog, dict_bin_unitig in dict_cogbin_unitigs.items()} # add merged bins in the datastructure for cog in dict_cogbin_unitigs_merged: for name, list_bins in dict_merge_bins.items(): dict_cogbin_unitigs_merged[cog][name] = set.union(*[dict_cogbin_unitigs[cog][bins] for bins in list_bins]) return dict_cogbin_unitigs_merged def main(mag_list, cog_threshold, bins_to_merge, cogs_to_ignore, bins_to_process, rel_path, overlap_threshold): dict_cogbin_unitigs = defaultdict(lambda: defaultdict(set)) for bin_path in bins_to_process: bin_ = basename(bin_path) for cog_file in glob.glob(join(bin_path,rel_path)) : cog = basename(cog_file).replace(".gfa","") test = {line.split("\t")[2] for line in open(cog_file) if line[0] == "S"} dict_cogbin_unitigs[cog][bin_] = test mags = set(["Bin_%s" % line.rstrip() for line in open(mag_list)]) # find out which bin needs to be merged or have cogs to flags dict_to_flag, dict_merge_bins = get_overlaping_bins(mags, dict_cogbin_unitigs, cog_threshold, overlap_threshold) # add the merged bin in this datastructure before checking again if they share cogs or need to be merged flag=True dict_merg_bins_global={} dict_merg_bins_global.update(dict_merge_bins) while flag==True: dict_cogbin_unitigs_merged=update_cogbin_unitigs(dict_merge_bins,dict_cogbin_unitigs) dict_to_flag_new, dict_merge_bins_new = get_overlaping_bins(mags, dict_cogbin_unitigs_merged, cog_threshold, overlap_threshold) if dict_merge_bins_new!={}: # merge the dict_merge_bins to_forget=[] for bins,composition in dict_merge_bins_new.items(): real_composition=[] for comp_bin in composition : if comp_bin in dict_merg_bins_global: real_composition+=dict_merg_bins_global[comp_bin] to_forget.append(comp_bin) else: real_composition.append(comp_bin) dict_merg_bins_global[bins]=real_composition for bins in to_forget : del dict_merg_bins_global[bins] else : flag=False # output cog to be ignored with open(bins_to_merge, "w") as out: #TODO list(List) looks very weird! out.write("\n".join(["\t".join([key]+list(List))for key, List in dict_merg_bins_global.items()])) # output bins to be merged with open(cogs_to_ignore, "w") as out: #TODO list(List) looks very weird! out.write("\n".join(["\t".join([key]+list(List))for key, List in dict_to_flag_new.items()])) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-b", nargs='+', help="list of all bin considered",required=True) parser.add_argument("-g", help="relative path of graph files from the Bin directory, must end in COG*.gfa",required=True) parser.add_argument("mag_list", help="List of mags that can be merged") parser.add_argument("cog_threshold", help="number of cogs in common between bins to require merging") parser.add_argument("bins_to_merge", help="Output merge plan (.tsv)") parser.add_argument("cogs_to_ignore", help="Output bin cogs to ignore (.tsv)") parser.add_argument("-t", help="overlap treshold, percent of unitigs shared between graphs, to consider the graphs shared by multiple bins",default='0.1') args = parser.parse_args() #import ipdb; ipdb.set_trace() main(args.mag_list, int(args.cog_threshold), args.bins_to_merge, args.cogs_to_ignore, args.b, args.g, float(args.t))
StarcoderdataPython
11352287
from django.conf.urls import url from . import views app_name = 'leave' urlpatterns = [ url(r'^', views.leave, name='leave'), ]
StarcoderdataPython
3243928
<gh_stars>0 import datetime import hashlib import io import json import logging import os import socket import getpass from base64 import b64encode try: from urlparse import urlunparse except ImportError: from urllib.parse import urlunparse from smb.SMBConnection import SMBConnection from smb.base import OperationFailure from smb.smb_constants import ATTR_DIRECTORY, ATTR_NORMAL from nmb.NetBIOS import NetBIOS from dtoolcore.storagebroker import BaseStorageBroker, DiskStorageBroker from dtoolcore.filehasher import FileHasher, md5sum_hexdigest, md5sum_digest from dtoolcore.storagebroker import StorageBrokerOSError from dtoolcore.utils import ( generate_identifier, get_config_value, generous_parse_uri, timestamp, DEFAULT_CACHE_PATH, ) from dtool_smb import __version__ logger = logging.getLogger(__name__) _STRUCTURE_PARAMETERS = { "data_directory": ["data"], "dataset_readme_relpath": ["README.yml"], "dtool_directory": ["_dtool"], "admin_metadata_relpath": ["_dtool", "dtool"], "structure_metadata_relpath": ["_dtool", "structure.json"], "dtool_readme_relpath": ["_dtool", "README.txt"], "manifest_relpath": ["_dtool", "manifest.json"], "overlays_directory": ["_dtool", "overlays"], "annotations_directory": ["_dtool", "annotations"], "tags_directory": ["_dtool", "tags"], "metadata_fragments_directory": ["_dtool", "tmp_fragments"], "storage_broker_version": __version__, } _DTOOL_README_TXT = """README ====== This is a Dtool dataset stored in an SMB share. Content provided during the dataset creation process ---------------------------------------------------- Directory named $UUID, where UUID is the unique identifier for the dataset. Dataset descriptive metadata: README.yml Dataset items. The keys for these blobs are item identifiers. An item identifier is the sha1sum hexdigest of the relative path used to represent the file on traditional file system disk. Administrative metadata describing the dataset is encoded as metadata on the container. Automatically generated blobs ----------------------------- This file: README.txt Structural metadata describing the dataset: structure.json Structural metadata describing the data items: manifest.json Per item descriptive metadata prefixed by: overlays/ Dataset key/value pairs metadata prefixed by: annotations/ Dataset tags metadata prefixed by: tags/ """ class SMBStorageBrokerValidationWarning(Warning): pass class SMBStorageBroker(BaseStorageBroker): #: Attribute used to define the type of storage broker. key = "smb" #: Attribute used by :class:`dtoolcore.ProtoDataSet` to write the hash #: function name to the manifest. hasher = FileHasher(md5sum_hexdigest) # Attribute used to define the structure of the dataset. _structure_parameters = _STRUCTURE_PARAMETERS # Attribute used to document the structure of the dataset. _dtool_readme_txt = _DTOOL_README_TXT # Encoding _encoding = 'utf-8' def __init__(self, uri, config_path=None): parse_result = generous_parse_uri(uri) self.config_name = parse_result.netloc uuid = parse_result.path[1:] self.uuid = uuid # Connect to SMB server. self.conn, self.service_name, self.path = \ SMBStorageBroker._connect(uri, config_path) # Define some other more abspaths. self._data_path = self._generate_path("data_directory") self._overlays_path = self._generate_path("overlays_directory") self._annotations_path = self._generate_path( "annotations_directory" ) self._tags_path = self._generate_path( "tags_directory" ) self._metadata_fragments_path = self._generate_path( "metadata_fragments_directory" ) # Define some essential directories to be created. self._essential_subdirectories = [ self._generate_path("dtool_directory"), self._data_path, self._overlays_path, self._annotations_path, self._tags_path, ] # Cache for file hashes computed on upload self._hash_cache = {} def _count_calls(func): def wrapper(*args, **kwargs): wrapper.num_calls += 1 return func(*args, **kwargs) wrapper.num_calls = 0 return wrapper @classmethod @_count_calls def _connect(cls, uri, config_path): parse_result = generous_parse_uri(uri) config_name = parse_result.netloc username = get_config_value( "DTOOL_SMB_USERNAME_{}".format(config_name), config_path=config_path ) server_name = get_config_value( "DTOOL_SMB_SERVER_NAME_{}".format(config_name), config_path=config_path ) server_port = get_config_value( "DTOOL_SMB_SERVER_PORT_{}".format(config_name), config_path=config_path ) domain = get_config_value( "DTOOL_SMB_DOMAIN_{}".format(config_name), config_path=config_path ) service_name = get_config_value( "DTOOL_SMB_SERVICE_NAME_{}".format(config_name), config_path=config_path ) path = get_config_value( "DTOOL_SMB_PATH_{}".format(config_name), config_path=config_path ) if not username: raise RuntimeError("No username specified for service '{name}', " "please set DTOOL_SMB_USERNAME_{name}." .format(name=config_name)) if not server_name: raise RuntimeError("No server name specified for service '{name}', " "please set DTOOL_SMB_SERVER_NAME_{name}." .format(name=config_name)) if not server_port: raise RuntimeError("No server port specified for service '{name}', " "please set DTOOL_SMB_SERVER_PORT_{name}." .format(name=config_name)) if not domain: raise RuntimeError("No domain specified for service '{name}', " "please set DTOOL_SMB_DOMAIN_{name}." .format(name=config_name)) if not service_name: raise RuntimeError("No service name specified for service '{name}', " "please set DTOOL_SMB_SERVICE_NAME_{name}. " "(The service name is the name of the 'share'.)" .format(name=config_name)) if not path: raise RuntimeError("No path specified for service '{name}', " "please set DTOOL_SMB_PATH_{name}." .format(name=config_name)) # server_port might be string, i.e. if specified via env vars if not isinstance(server_port, int): server_port = int(server_port) server_ip = socket.gethostbyname(server_name) host_name = socket.gethostname() password = get_config_value( "DTOOL_SMB_PASSWORD_{}".format(config_name), config_path=config_path ) if password is None: if cls._connect.num_calls == 1: password = <PASSWORD>() cls.password = password else: password = cls.password conn = SMBConnection(username, password, host_name, server_name, domain=domain, use_ntlm_v2=True, is_direct_tcp=True) logger.info( ( "Connecting from '{host:s}' to " "'smb://{user:s}@{ip:s}({server:s}):{port:d}', " "DOMAIN '{domain:s}'").format(user=username, ip=server_ip, server=server_name, port=server_port, host=host_name, domain=domain) ) # for testing, see types of arguments logger.debug( ( "Types HOST '{host:s}', USER '{user:s}', IP '{ip:s}', " "SERVER '{server:s}', PORT '{port:s}', DOMAIN '{domain:s}'").format( user=type(username).__name__, ip=type(server_ip).__name__, server=type(server_name).__name__, port=type(server_port).__name__, host=type(host_name).__name__, domain=type(domain).__name__)) conn.connect(server_ip, port=server_port) return conn, service_name, path # Generic helper functions. def _generate_path(self, structure_dict_key): logger.debug("_generate_path, structure_dict_key='{}'" .format(structure_dict_key)) logger.debug("_generate_path, self.path='{}', self.uuid='{}', {}" .format(self.path, self.uuid, self._structure_parameters[structure_dict_key])) return os.path.join(self.path, self.uuid, *self._structure_parameters[structure_dict_key]) def _fpath_from_handle(self, handle): return os.path.join(self._data_path, handle) def _handle_to_fragment_prefixpath(self, handle): stem = generate_identifier(handle) logger.debug("_handle_to_fragment_prefixpath, handle='{}', stem='{}'" .format(handle, stem)) return os.path.join(self._metadata_fragments_path, stem) def _path_exists(self, path): try: self.conn.getAttributes(self.service_name, path) except OperationFailure: return False return True def _create_directory(self, path): paths = [] while not self._path_exists(path): paths += [path] path = os.path.dirname(path) while len(paths) > 0: path = paths.pop() logger.debug("_create_directory, path = '{}'".format(path)) self.conn.createDirectory(self.service_name, path) # Class methods to override. @classmethod def generate_uri(cls, name, uuid, base_uri): scheme, netloc, path, _, _, _ = generous_parse_uri(base_uri) assert scheme == 'smb' # Force path (third component of tuple) to be the dataset UUID uri = urlunparse((scheme, netloc, uuid, _, _, _)) return uri @classmethod def list_dataset_uris(cls, base_uri, config_path): """Return list containing URIs with base URI.""" conn, service_name, path = \ SMBStorageBroker._connect(base_uri, config_path) files = conn.listPath(service_name, path) uri_list = [] for f in files: if f.filename != '.' and f.filename != '..': if f.file_attributes & ATTR_DIRECTORY: uuid = f.filename uri = cls.generate_uri(None, uuid, base_uri) uri_list.append(uri) return uri_list # Methods to override. def get_admin_metadata_key(self): "Return the path to the admin metadata file.""" return self._generate_path("admin_metadata_relpath") def get_readme_key(self): "Return the path to the readme file.""" return self._generate_path("dataset_readme_relpath") def get_manifest_key(self): "Return the path to the readme file.""" return self._generate_path("manifest_relpath") def get_structure_key(self): "Return the path to the structure parameter file.""" return self._generate_path("structure_metadata_relpath") def get_dtool_readme_key(self): "Return the path to the dtool readme file.""" return self._generate_path("dtool_readme_relpath") def get_overlay_key(self, overlay_name): "Return the path to the overlay file.""" return os.path.join(self._overlays_path, overlay_name + '.json') def get_annotation_key(self, annotation_name): "Return the path to the annotation file.""" return os.path.join( self._annotations_path, annotation_name + '.json' ) def get_tag_key(self, tag): "Return the path to the tag file.""" return os.path.join( self._tags_path, tag ) def get_text(self, key): """Return the text associated with the key.""" logger.debug("get_text, key='{}'".format(key)) f = io.BytesIO() self.conn.retrieveFile(self.service_name, key, f) return f.getvalue().decode(self._encoding) def put_text(self, key, text): """Put the text into the storage associated with the key.""" logger.debug("put_text, key='{}', text='{}'".format(key, text)) parent_directory = os.path.dirname(key) self._create_directory(parent_directory) f = io.BytesIO(text.encode(self._encoding)) self.conn.storeFile(self.service_name, key, f) def delete_key(self, key): """Delete the file/object associated with the key.""" self.conn.deleteFile(self.service_name, key) def get_size_in_bytes(self, handle): """Return the size in bytes.""" fpath = self._fpath_from_handle(handle) return self.conn.getAttributes(self.service_name, fpath).file_size def get_utc_timestamp(self, handle): """Return the UTC timestamp.""" fpath = self._fpath_from_handle(handle) datetime_obj = datetime.datetime.utcfromtimestamp( self.conn.getAttributes(self.service_name, fpath).last_write_time ) return timestamp(datetime_obj) def get_hash(self, handle): """Return the hash.""" logger.debug("get_hash, handle='{}'".format(handle)) logger.debug("get_hash, hash_cache={}".format(self._hash_cache)) fpath = self._fpath_from_handle(handle) logger.debug("get_hash, fpath='{}'".format(fpath)) try: return self._hash_cache[fpath] except KeyError: logger.debug("get_hash, fpath not found in cache") f = io.BytesIO() self.conn.retrieveFile(self.service_name, fpath, f) hasher = hashlib.md5() hasher.update(f.getvalue()) h = hasher.hexdigest() self._hash_cache[fpath] = h return h def has_admin_metadata(self): """Return True if the administrative metadata exists. This is the definition of being a "dataset". """ return self._path_exists(self.get_admin_metadata_key()) def _list_names(self, path): names = [] for shf in self.conn.listPath(self.service_name, path): if shf.file_attributes & ATTR_NORMAL: name, ext = os.path.splitext(shf.filename) names.append(name) return names def list_overlay_names(self): """Return list of overlay names.""" return self._list_names(self._overlays_path) def list_annotation_names(self): """Return list of annotation names.""" return self._list_names(self._annotation_path) def list_tags(self): """Return list of tags.""" return self._list_names(self._tags_path) def get_item_path(self, identifier): """Return absolute path at which item content can be accessed. :param identifier: item identifier :returns: absolute path from which the item content can be accessed """ manifest = self.get_manifest() relpath = hitem["relpath"] item_path = os.path.join(self._data_path, relpath) return item_path def _create_structure(self): """Create necessary structure to hold a dataset.""" uuid_path = os.path.join(self.path, self.uuid) # Ensure that the specified path does not exist and create it. if self._path_exists(uuid_path): raise StorageBrokerOSError( "Path '{}' already exists on share '{}'.".format(uuid_path, self.service_name)) logger.debug( "_create_structure, creating directory '{}' on share '{}'." \ .format(os.path.join(self.path, self.uuid), self.service_name)) self._create_directory(uuid_path) # Create more essential subdirectories. for abspath in self._essential_subdirectories: logger.debug( "_create_structure, creating directory '{}' on share '{}'." \ .format(abspath, self.service_name)) self._create_directory(abspath) def put_item(self, fpath, relpath): """Put item with content from fpath at relpath in dataset. Missing directories in relpath are created on the fly. :param fpath: path to the item on disk :param relpath: relative path name given to the item in the dataset as a handle, i.e. a Unix-like relpath :returns: the handle given to the item """ logger.debug("put_item, fpath='{}', relpath='{}'".format(fpath, relpath)) # Define the destination path and make any missing parent directories. dest_path = os.path.join(self._data_path, relpath) dirname = os.path.dirname(dest_path) self._create_directory(dirname) # Copy the file across. self.conn.storeFile(self.service_name, dest_path, open(fpath, 'rb')) # Compute hash and store to cache self._hash_cache[dest_path] = SMBStorageBroker.hasher(fpath) return relpath def iter_item_handles(self, path=None): """Return iterator over item handles.""" if path is None: path = self._data_path relpaths = [None] while len(relpaths) > 0: relpath = relpaths.pop() logger.debug("iter_item_handles, path='{}', relpath='{}'" .format(path, relpath)) if relpath is None: fullpath = path else: fullpath = os.path.join(path, relpath) for shf in self.conn.listPath(self.service_name, fullpath): logger.debug("iter_item_handles, shf.filename='{}', DIRECTORY={}" .format(shf.filename, shf.file_attributes & ATTR_DIRECTORY)) if shf.filename != '.' and shf.filename != '..': if relpath is None: new_relpath = shf.filename else: new_relpath = os.path.join(relpath, shf.filename) if shf.file_attributes & ATTR_DIRECTORY: relpaths.append(new_relpath) else: yield new_relpath def add_item_metadata(self, handle, key, value): """Store the given key:value pair for the item associated with handle. :param handle: handle for accessing an item before the dataset is frozen :param key: metadata key :param value: metadata value """ self._create_directory(self._metadata_fragments_path) prefix = self._handle_to_fragment_prefixpath(handle) logger.debug("add_item_metadata, prefix='{}'".format(prefix)) fpath = prefix + '.{}.json'.format(key) f = io.BytesIO() json.dump(value, f) self.conn.storeFile(self.service_name, path, f) def get_item_metadata(self, handle): """Return dictionary containing all metadata associated with handle. In other words all the metadata added using the ``add_item_metadata`` method. :param handle: handle for accessing an item before the dataset is frozen :returns: dictionary containing item metadata """ try: if not self.conn.getAttributes(self.service_name, self._metadata_fragments_path).file_attributes & ATTR_DIRECTORY: return {} except OperationFailure: return {} prefix = self._handle_to_fragment_prefixpath(handle) logger.debug("get_item_metadata, prefix='{}'".format(prefix)) def list_paths(dirname): for shf in self.conn.listPath(self.service_name, dirname): if shf.file_attributes & ATTR_NORMAL: yield os.path.join(dirname, shf.filename) files = [f for f in list_paths(self._metadata_fragments_path) if f.startswith(prefix)] metadata = {} for filename in files: key = filename.split('.')[-2] # filename: identifier.key.json f = io.StringIO() self.conn.retrieveFile(self.service_name, filename, f) f.seek(0) value = json.load(f) metadata[key] = value return metadata def pre_freeze_hook(self): """Pre :meth:`dtoolcore.ProtoDataSet.freeze` actions. This method is called at the beginning of the :meth:`dtoolcore.ProtoDataSet.freeze` method. It may be useful for remote storage backends to generate caches to remove repetitive time consuming calls """ allowed = set([v[0] for v in _STRUCTURE_PARAMETERS.values()]) logger.debug('pre_freeze_hook, allowed = {}'.format(allowed)) for d in self.conn.listPath(self.service_name, os.path.join(self.path, self.uuid)): logger.debug("pre_freeze_hook, d.filename='{}'".format(d.filename)) if d.file_attributes & ATTR_NORMAL and d.filename not in allowed: raise SMBStorageBrokerValidationWarning("Rogue content in base " "of dataset: {}".format(d.filename)) def post_freeze_hook(self): """Post :meth:`dtoolcore.ProtoDataSet.freeze` cleanup actions. This method is called at the end of the :meth:`dtoolcore.ProtoDataSet.freeze` method. In the :class:`dtoolcore.storage_broker.DiskStorageBroker` it removes the temporary directory for storing item metadata fragment files. """ if self._path_exists(self._metadata_fragments_path): self.conn.deleteFiles(self.service_name, self._metadata_fragments_path) def _list_historical_readme_keys(self): historical_readme_keys = [] for shf in self.conn.listPath(self.service_name, self.path): if shf.filename.startswith("README.yml-"): key = os.path.join(self.path, shf.filename) historical_readme_keys.append(key) return historical_readme_keys
StarcoderdataPython
3501191
<gh_stars>10-100 #!/usr/bin/python3 import argparse import signal import sys import AutoTuner """ Example script for interacting with the dhammer API Usage: autotune.py --tune-stat-name OfferReceived --tune-stat-compare-name DiscoverSent """ ##### Install a signal handler for CTRL+C ##### def signal_handler(signal, frame, tuner): try: # Try to shut things down gracefully. print('Stopping...') tuner.stop() except BaseException as e: print(str(e)) pass print('Shutting down...') sys.exit(0) # Raise a SystemExit exception def main(): ##### Get command-line arguments parser = argparse.ArgumentParser(description='Find the max request rate.') parser.add_argument('--api-address','-a', dest='api_address', default='localhost', help='Address for stats API.') parser.add_argument('--api-port','-p', dest='api_port', default=8080, help='Port for stats API') parser.add_argument('--tune-stat-name','-t', dest='tune_stat_name', default=None, required=True, help='Stat field used to decide if ramp up or down is necessary.') parser.add_argument('--tune-stat-compare-name','-c', dest='tune_stat_compare_name', default=None, required=True, help='Stat used for comparison to determine if goal is being reached.') parser.add_argument('--tune-compare-min-percentage','-cp', dest='tune_compare_min_percentage', default=0.95, help='The maximum percentage difference between the tuning stat and the comparison stat.') parser.add_argument('--ramp-up-factor','-ru', dest='ramp_up_factor', default=2, type=int, help='Factor by which to ramp up the target RPS.') parser.add_argument('--ramp-down-factor','-rd', dest='ramp_down_factor', default=0.9, type=int, help='Factor by which to reduce the ramp-up factor.') parser.add_argument('--refresh-rate_seconds','-rr', dest='refresh_rate_seconds', default=6, type=int, help='Rate to check stats. Should be slightly longer than the dhammer refresh rate.') args = parser.parse_args() ##### Prep the result handler tuner = AutoTuner.AutoTuner(args) # Register our signal handler. signal.signal(signal.SIGINT, lambda signal, frame: signal_handler(signal, frame, tuner)) try: tuner.prepare() tuner.start() except SystemExit: pass except BaseException as e: print("Tuner broke down: %s" % str(e)) try: tuner.stop() except: pass return(0) if __name__ == "__main__": main()
StarcoderdataPython
8196806
"""DGX Remote Shell is a program that allows remote access to the DGX shell for debugging and information gathering. The MIT License (MIT) Copyright (c) 2015 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" import wx import telnetlib import dgx_rs_gui from threading import Thread import requests from bs4 import BeautifulSoup from distutils.version import StrictVersion import os import sys class DGXRSFrame(dgx_rs_gui.DGXRSFrame): def __init__(self, parent): dgx_rs_gui.DGXRSFrame.__init__(self, parent) self.parent = parent self.version = 'v0.1.2' icon_bundle = wx.IconBundle() icon_bundle.AddIconFromFile(r"icon/dgx_rs.ico", wx.BITMAP_TYPE_ANY) self.SetIcons(icon_bundle) self.SetTitle("DGX Remote Shell " + self.version) self.display_txt.SetValue( 'Please connect Netlinx Studio to the DGX you would like to ' + 'query. \r\rTurn on device notifications for device 5002, ' + 'port 3, and the DGX\'s system number. \rOnly enable \'Commands ' + 'From Device\'. \r\rEnsure the DGX IP above is correct and then ' + 'select one of the predefined commands or enter your own.') self.display_txt.Enable(False) self.cert_path = self.resource_path('cacert.pem') Thread(target=self.update_check).start() def establish_telnet(self, ip_address, tel_port=23): """Creates the telnet instance""" telnet_session = telnetlib.Telnet(ip_address, tel_port, 5) telnet_session.set_option_negotiation_callback(self.call_back) return telnet_session def call_back(self, sock, cmd, opt): """ Turns on server side echoing""" if opt == telnetlib.ECHO and cmd in (telnetlib.WILL, telnetlib.WONT): sock.sendall(telnetlib.IAC + telnetlib.DO + telnetlib.ECHO) def send_command(self, command, cmd_type='DGX Shell>'): """Sends a command to the master""" try: feedback = '' telnet_session = self.establish_telnet(self.dgx_ip_txt.GetValue()) feedback = feedback + telnet_session.read_until('>') if cmd_type == 'DGX Shell>': telnet_session.write('send_command 5002:3:0, \"$03, \'' + str(command) + '\',13,10\"\r') else: telnet_session.write('send_command 5002:3:0, \"\'' + str(command) + '\'\"\r') feedback = feedback + telnet_session.read_until('>') telnet_session.close() except Exception as error: self.display_txt.SetValue( 'Error: ' + str(error)) return self.show_process_directions(feedback) def show_process_directions(self, feedback): """Tell them how to do it""" self.display_txt.SetValue( feedback + '\r' + 'In about 3 seconds, when the notifications show up in ' + 'Netlinx Studio notifications. \rPlease copy these ' + 'notifications and paste them here.\r\rPress Clear to paste\r' + 'Then click Process.\r') self.display_txt.Enable(False) def on_clear(self, event): """Clears the window""" self.display_txt.SetValue('') self.display_txt.Enable(True) event.Skip() def on_command_button(self, event): """Send command from button event""" command = event.GetEventObject().GetLabel() self.send_command(command.lower(), cmd_type='DGX Shell>') def on_bcs_button(self, event): """Send bcs from button event""" command = event.GetEventObject().GetLabel() self.send_command(command.lower(), cmd_type='BCS>') def on_submit(self, event): """Get the DGX command """ command = self.dgx_command_txt.GetValue() self.dgx_command_txt.SetValue('') self.send_command(command, self.type_cmb.GetValue()) def on_apply(self, event): """process text""" text = self.display_txt.GetValue() self.display_txt.SetValue(self.process_text(text)) def process_text(self, text): """remove and clean up text""" lines = text.split('\n') unwanted = [('^13', ''), ('$03', ''), ('1G', ''), ('$1B1;1H$1B2J', ''), ('$0D$0A', '\r'), ('$0D $0A', '\r'), ('$09', '\t'), ('$1BDGX_SHELL>$1B1', 'DGX_SHELL>')] output = '' for line in lines: try: my_line = line.split('-', 1)[1] except: break my_line = my_line.replace('[', '') my_line = my_line.replace(']', '') output = output + my_line for item in unwanted: output = output.replace(item[0], item[1]) return output def on_save(self, event): """Save the results""" dlg = wx.FileDialog( self, message='Select file to save', defaultFile="", wildcard="TXT files (*.txt)|*.txt", style=wx.SAVE) if dlg.ShowModal() == wx.ID_OK: path = dlg.GetPath() with open(path, 'w') as f: f.write(self.display_txt.GetValue()) def update_check(self): """Checks on line for updates""" # print 'in update' try: webpage = requests.get( 'https://github.com/AMXAUNZ/DGX-Remote-Shell/releases', verify=self.cert_path) # Scrape page for latest version soup = BeautifulSoup(webpage.text) # Get the <div> sections in lable-latest # print 'divs' divs = soup.find_all("div", class_="release label-latest") # Get the 'href' of the release url_path = divs[0].find_all('a')[-3].get('href') # Get the 'verison' number online_version = url_path.split('/')[-2][1:] if StrictVersion(online_version) > StrictVersion(self.version[1:]): # Try update # print 'try update' self.do_update(url_path, online_version) else: # All up to date pass # print 'up to date' return except Exception as error: # print 'error'error # we have had a problem, maybe update will work next time. # print 'error ', error pass def do_update(self, url_path, online_version): """download and install""" # ask if they want to update dlg = wx.MessageDialog( parent=self, message='A new DGX Remote Shell is available v' + str(StrictVersion(online_version)) + '\r' + 'Do you want to download and update?', caption='Do you want to update?', style=wx.OK | wx.CANCEL) if dlg.ShowModal() == wx.ID_OK: response = requests.get('https://github.com' + url_path, verify=self.cert_path, stream=True) # print response if not response.ok: return total_length = response.headers.get('content-length') if total_length is None: # no content length header pass else: total_length = int(total_length) dlg2 = wx.ProgressDialog( "Download Progress", "Downloading update now", maximum=total_length, parent=self, style=wx.PD_APP_MODAL | wx.PD_AUTO_HIDE | wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME) temp_folder = os.environ.get('temp') with open(temp_folder + '\DGX_Remote_Shell_Setup_' + str(StrictVersion(online_version)) + '.exe', 'wb') as handle: count = 0 for data in response.iter_content(1024): count += len(data) handle.write(data) (cancel, skip) = dlg2.Update(count) if not cancel: break dlg2.Destroy() if not cancel: return self.install_update(online_version, temp_folder) def install_update(self, online_version, temp_folder): """Installs the downloaded update""" dlg = wx.MessageDialog( parent=self, message='Do you want to update to v' + str(StrictVersion(online_version)) + ' now?', caption='Update program', style=wx.OK | wx.CANCEL) if dlg.ShowModal() == wx.ID_OK: os.startfile(temp_folder + '\DGX_Remote_Shell_Setup_' + str(StrictVersion(online_version)) + '.exe') self.Destroy() def resource_path(self, relative): return os.path.join(getattr(sys, '_MEIPASS', os.path.abspath(".")), relative) def main(): """Launch the main program""" dgx_rx_interface = wx.App() # redirect=True, filename="log.txt") main_window = DGXRSFrame(None) main_window.Show() dgx_rx_interface.MainLoop() if __name__ == '__main__': main()
StarcoderdataPython
5092585
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################# ##ask user to specify a mode to execute ## 0: visit rooms on a given map (default) ## 1: step by step control ############################################# import rospy from state_machine.srv import State, StateResponse def stateCallback(req): # return the mode to nodes return StateResponse(state) def state_server(): # init this node rospy.init_node('state_server') # ask user to specify the mode global state choice = raw_input('Do you want to choose mode 0 visit rooms on a given map (default) \n or mode 1 step by step control ? ') if len(choice)!=1: state = 0 elif choice.find('0') != -1: state = 0 else: state = 1 # create a server named /state,register the callback function stateCallback s = rospy.Service('/state', State, stateCallback) # this node could shut down after mode specification delivered to nodes rospy.spin() if __name__ == "__main__": state_server()
StarcoderdataPython
1873368
import json import os import requests import mimetypes import requests.cookies from requests_toolbelt import MultipartEncoder from bs4 import BeautifulSoup from py3pin.BookmarkManager import BookmarkManager from py3pin.Registry import Registry from py3pin.RequestBuilder import RequestBuilder from requests.structures import CaseInsensitiveDict from selenium import webdriver from selenium.webdriver.chrome.options import Options from webdriver_manager.chrome import ChromeDriverManager from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.proxy import Proxy, ProxyType AGENT_STRING = "Mozilla/5.0 (Windows NT 6.1; Win64; x64) " \ "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36" # Pinterest endpoints HOME_PAGE = 'https://www.pinterest.com/' LOGIN_PAGE = 'https://www.pinterest.com/login/?referrer=home_page' CREATE_USER_SESSION = 'https://www.pinterest.com/resource/UserSessionResource/create/' DELETE_USER_SESSION = 'https://www.pinterest.com/resource/UserSessionResource/delete/' USER_RESOURCE = 'https://www.pinterest.com/_ngjs/resource/UserResource/get/' BOARD_PICKER_RESOURCE = 'https://www.pinterest.com/resource/BoardPickerBoardsResource/get/' BOARDS_RESOURCE = 'https://www.pinterest.com/_ngjs/resource/BoardsResource/get/' CREATE_BOARD_RESOURCE = 'https://www.pinterest.com/resource/BoardResource/create/' FOLLOW_BOARD_RESOURCE = 'https://www.pinterest.com/resource/BoardFollowResource/create/' UNFOLLOW_BOARD_RESOURCE = 'https://www.pinterest.com/resource/BoardFollowResource/delete/' FOLLOW_USER_RESOURCE = 'https://www.pinterest.com/resource/UserFollowResource/create/' UNFOLLOW_USER_RESOURCE = 'https://www.pinterest.com/resource/UserFollowResource/delete/' USER_FOLLOWING_RESOURCE = 'https://www.pinterest.com/_ngjs/resource/UserFollowingResource/get/' USER_FOLLOWERS_RESOURCE = 'https://www.pinterest.com/resource/UserFollowersResource/get/' PIN_RESOURCE_CREATE = 'https://www.pinterest.com/resource/PinResource/create/' REPIN_RESOURCE_CREATE = 'https://www.pinterest.com/resource/RepinResource/create/' PIN_LIKE_RESOURCE = 'https://www.pinterest.com/resource/PinLikeResource/create/' PIN_UNLIKE_RESOURCE = 'https://www.pinterest.com/resource/PinLikeResource/delete/' DELETE_PIN_RESOURCE = 'https://www.pinterest.com/resource/PinResource/delete/' PIN_COMMENT_RESOURCE = 'https://www.pinterest.com/resource/PinCommentResource/create/' BOARD_INVITE_RESOURCE = 'https://www.pinterest.com/_ngjs/resource/BoardInviteResource/create/' BOARD_DELETE_INVITE_RESOURCE = 'https://www.pinterest.com/_ngjs/resource/BoardCollaboratorResource/delete/' VISUAL_LIVE_SEARCH_RESOURCE = 'https://www.pinterest.com/resource/VisualLiveSearchResource/get/' SEARCH_RESOURCE = 'https://www.pinterest.com/resource/SearchResource/get/' TYPE_AHEAD_RESOURCE = "https://www.pinterest.com/resource/AdvancedTypeaheadResource/get/" BOARD_RECOMMEND_RESOURCE = 'https://www.pinterest.com/_ngjs/resource/BoardContentRecommendationResource/get/' PINNABLE_IMAGES_RESOURCE = 'https://www.pinterest.com/_ngjs/resource/FindPinImagesResource/get/' BOARD_FEED_RESOURCE = 'https://www.pinterest.com/resource/BoardFeedResource/get/' USER_HOME_FEED_RESOURCE = 'https://www.pinterest.com/_ngjs/resource/UserHomefeedResource/get/' BASE_SEARCH_RESOURCE = 'https://www.pinterest.com/resource/BaseSearchResource/get/' BOARD_INVITES_RESOURCE = 'https://www.pinterest.com/_ngjs/resource/BoardInvitesResource/get/' CREATE_COMMENT_RESOURCE = 'https://www.pinterest.com/_ngjs/resource/AggregatedCommentResource/create/' GET_PIN_COMMENTS_RESOURCE = 'https://www.pinterest.com/_ngjs/resource/AggregatedCommentFeedResource/get/' LOAD_PIN_URL_FORMAT = 'https://www.pinterest.com/pin/{}/' DELETE_COMMENT = 'https://www.pinterest.com/_ngjs/resource/AggregatedCommentResource/delete/' CONVERSATION_RESOURCE = 'https://www.pinterest.com/resource/ConversationsResource/get/' CONVERSATION_RESOURCE_CREATE = 'https://www.pinterest.com/resource/ConversationsResource/create/' LOAD_CONVERSATION = 'https://www.pinterest.com/resource/ConversationMessagesResource/get/' SEND_MESSAGE = 'https://www.pinterest.com/resource/ConversationMessagesResource/create/' BOARD_SECTION_RESOURCE = 'https://www.pinterest.com/resource/BoardSectionResource/create/' GET_BOARD_SECTIONS = 'https://www.pinterest.com/resource/BoardSectionsResource/get/' BOARD_SECTION_EDIT_RESOURCE = 'https://www.pinterest.com/resource/BoardSectionEditResource/delete/' GET_BOARD_SECTION_PINS = 'https://www.pinterest.com/resource/BoardSectionPinsResource/get/' UPLOAD_IMAGE = 'https://www.pinterest.com/upload-image/' class Pinterest: def __init__(self, password='', proxies=None, username='', email='', cred_root='data', user_agent=None): self.email = email self.username = username self.password = password self.req_builder = RequestBuilder() self.bookmark_manager = BookmarkManager() self.http = requests.session() self.proxies = proxies self.user_agent = user_agent self.registry = Registry(cred_root, email) cookies = self.registry.get_all() for key in cookies.keys(): self.http.cookies.set(key, cookies[key]) if self.user_agent is None: self.user_agent = AGENT_STRING def request(self, method, url, data=None, files=None, extra_headers=None): headers = CaseInsensitiveDict([ ('Referer', HOME_PAGE), ('X-Requested-With', 'XMLHttpRequest'), ('Accept', 'application/json'), ('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8'), ('User-Agent', self.user_agent)]) csrftoken = self.http.cookies.get('csrftoken') if csrftoken: headers.update([('X-CSRFToken', csrftoken)]) if extra_headers is not None: for h in extra_headers: headers.update([(h, extra_headers[h])]) response = self.http.request(method, url, data=data, headers=headers, files=files, proxies=self.proxies) response.raise_for_status() return response def get(self, url): return self.request('GET', url=url) def post(self, url, data=None, files=None, headers=None): return self.request('POST', url=url, data=data, files=files, extra_headers=headers) def login(self, headless=True, wait_time=15, proxy=None, lang="en"): """ Logs user in with the provided credentials User session is stored in the 'cred_root' folder and reused so there is no need to login every time. Pinterest sessions lasts for about 15 days Ideally you need to call this method 3-4 times a month at most. :return python dict object describing the pinterest response """ chrome_options = Options() chrome_options.add_argument("--lang=%s" % lang) if headless: chrome_options.add_argument("--headless") if proxy is not None: http_proxy = Proxy() http_proxy.proxy_type = ProxyType.MANUAL http_proxy.http_proxy = proxy http_proxy.socks_proxy = proxy http_proxy.ssl_proxy = proxy http_proxy.add_to_capabilities(chrome_options) driver = webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options) driver.get("https://pinterest.com/login") try: WebDriverWait(driver, wait_time).until(EC.element_to_be_clickable((By.ID, 'email'))) driver.find_element_by_id("email").send_keys(self.email) driver.find_element_by_id("password").send_keys(self.password) logins = driver.find_elements_by_xpath("//*[contains(text(), 'Log in')]") for login in logins: login.click() WebDriverWait(driver, wait_time).until(EC.invisibility_of_element((By.ID, 'email'))) cookies = driver.get_cookies() self.http.cookies.clear() for cookie in cookies: self.http.cookies.set(cookie['name'], cookie['value']) self.registry.update_all(self.http.cookies.get_dict()) except Exception as e: print("Failed to login", e) print("Successfully logged in with account " + self.email) driver.close() def logout(self): """ Logs current user out. Takes few seconds for the session to be invalidated on pinterest's side """ options = { 'disable_auth_failure_redirect': True } data = self.req_builder.buildPost(options=options) return self.post(url=DELETE_USER_SESSION, data=data) def get_user_overview(self, username=None): """ :param username target username, if left blank current user is assumed :return python dict describing the pinterest user profile response """ if username is None: username = self.username options = { "isPrefetch": 'false', "username": username, "field_set_key": "profile" } url = self.req_builder.buildGet(url=USER_RESOURCE, options=options) result = self.get(url=url).json() return result['resource_response']['data'] def boards(self, username=None, page_size=50): """ The data returned is chunked, this comes from pinterest's rest api. Some users might have huge number of boards that is why it make sense to chunk the data. In order to obtain all boards this method needs to be called until it returns empty list :param username: target username, if left blank current user is assumed :param page_size: controls the batch size for each request :return python dict describing all the boards of a user. """ if username is None: username = self.username next_bookmark = self.bookmark_manager.get_bookmark(primary='boards', secondary=username) options = { "page_size": page_size, "privacy_filter": "all", "sort": "custom", "username": username, "isPrefetch": False, "include_archived": True, "field_set_key": "profile_grid_item", "group_by": "visibility", "redux_normalize_feed": True, "bookmarks": [next_bookmark] } source_url = '/{}/boards/'.format(username) url = self.req_builder.buildGet(url=BOARDS_RESOURCE, options=options, source_url=source_url) result = self.get(url=url).json() bookmark = result['resource']['options']['bookmarks'][0] self.bookmark_manager.add_bookmark(primary='boards', secondary=username, bookmark=bookmark) return result['resource_response']['data'] def boards_all(self, username=None): """ Obtains all boards of a user. NOTE: some users might have huge number of boards. In such cases 'boards' method (which is batched) should be used in order to avoid memory issues :param username: target user, if left blank current user is assumed :return all boards of a user """ boards = [] board_batch = self.boards(username=username) while len(board_batch) > 0: boards += board_batch board_batch = self.boards(username=username) return boards def create_board(self, name, description='', category='other', privacy='public', layout='default'): """ Creates a new board and returns the response from pinterest. :param name: board name (should be unique per user) :param description: board description :param category: if you have defined categories (it is not visible to external users) :param privace: can be public or private :param layout: looks like a legacy parameter but it is mandatory (can be left as default) """ options = { "name": name, "description": description, "category": category, "privacy": privacy, "layout": layout, "collab_board_email": 'true', "collaborator_invites_enabled": 'true' } source_url = '/{}/boards/'.format(self.email) data = self.req_builder.buildPost(options=options, source_url=source_url) return self.post(url=CREATE_BOARD_RESOURCE, data=data) def follow_board(self, board_id): """ Follows a board with current user. :param board_id: the id of the board to follow :return python dict with the pinterest response """ options = {"board_id": board_id} data = self.req_builder.buildPost(options=options) return self.post(url=FOLLOW_BOARD_RESOURCE, data=data) def unfollow_board(self, board_id): """ UnFollows a board with current user. :param board_id: the id of the board to follow :return python dict with the pinterest response """ options = {"board_id": board_id} data = self.req_builder.buildPost(options=options) return self.post(url=UNFOLLOW_BOARD_RESOURCE, data=data) def follow_user(self, user_id): """ Follows a user with current user. :param user_id: the id of the user to follow :return python dict with the pinterest response """ options = {"user_id": user_id} data = self.req_builder.buildPost(options=options) return self.post(url=FOLLOW_USER_RESOURCE, data=data) def unfollow_user(self, user_id): """ UnFollows a user with current user. :param user_id: the id of the user to follow :return python dict with the pinterest response """ options = {"user_id": user_id} data = self.req_builder.buildPost(options=options) return self.post(url=UNFOLLOW_USER_RESOURCE, data=data) def get_following(self, username=None, page_size=250): """ Get all users following this particular user. The response of this method is batched, meaning it needs to be called until empty list is returned :param username: target user, if left blank current user is assumed :param page_size: :return: python dict describing the 'following' list """ if username is None: username = self.username next_bookmark = self.bookmark_manager.get_bookmark(primary='following', secondary=username) if next_bookmark == '-end-': return [] source_url = '/{}/_following/'.format(self.email) options = { 'isPrefetch': 'false', 'hide_find_friends_rep': 'false', 'username': username, 'page_size': page_size, 'bookmarks': [next_bookmark] } url = self.req_builder.buildGet(url=USER_FOLLOWING_RESOURCE, options=options, source_url=source_url) result = self.get(url=url).json() result = result['resource_response'] bookmark = '-end-' if 'bookmark' in result: bookmark = result['bookmark'] self.bookmark_manager.add_bookmark(primary='following', secondary=username, bookmark=bookmark) return result['data'] def get_following_all(self, username=None): """ Obtains list of all users that the specified user follows. NOTE: Some users might have huge following lists. In such cases using 'get_following' (which is batched) is preferred. :param username: target username :return: python dict containing all following """ following = [] following_batch = self.get_following(username=username) while len(following_batch) > 0: following += following_batch following_batch = self.get_following(username=username) return following def get_user_followers(self, username=None, page_size=250): """ Obtains a list of user's followers. The response from this method is batched, meaning it needs to be called until empty list is returned. :param username: target username, is left blank current user is assumed :param page_size: batch size :return: python dict describing user followers """ if username is None: username = self.username next_bookmark = self.bookmark_manager.get_bookmark(primary='followers', secondary=username) if next_bookmark == '-end-': return [] options = { 'isPrefetch': False, 'hide_find_friends_rep': True, 'username': username, 'page_size': page_size, 'bookmarks': [next_bookmark] } source_url = '/{}/_followers/'.format(self.username) url = self.req_builder.buildGet(url=USER_FOLLOWERS_RESOURCE, options=options, source_url=source_url) result = self.get(url=url).json() result = result['resource_response'] bookmark = '-end-' if 'bookmark' in result: bookmark = result['bookmark'] self.bookmark_manager.add_bookmark(primary='followers', secondary=username, bookmark=bookmark) return result['data'] def get_user_followers_all(self, username=None): """ Obtains a list of all the followers a user has. NOTE: Some users might have huge followers lists. In such cases 'get_user_followers' should be used to avoid memory errors :param username: target user, is left blank current user is assumed :return: list of follower objects """ followers = [] followers_batch = self.get_user_followers(username=username) while len(followers_batch) > 0: followers += followers_batch followers_batch = self.get_user_followers(username=username) return followers def pin(self, board_id, image_url, description='', link='', title='', section_id=None): """ Perfoms a pin operation. If you want to upload local image use 'upload_pin' :param board_id: id of the target board (current user should have rights to pin to it) :param image_url: web url of an image (not local one) :param description: pin description (can be blank) :param link: link to include (can be blank) :param title: title can be blank :param section_id: board section should be previously defined and its optional :return: python dict describing the pinterest response """ options = { "board_id": board_id, "image_url": image_url, "description": description, "link": link if link else image_url, "scrape_metric": {"source": "www_url_scrape"}, "method": "scraped", "title": title, "section": section_id } source_url = '/pin/find/?url={}'.format(self.req_builder.url_encode(image_url)) data = self.req_builder.buildPost(options=options, source_url=source_url) return self.post(url=PIN_RESOURCE_CREATE, data=data) def upload_pin(self, board_id, image_file, description='', link='', title='', section_id=None): """ This method is simmilar to 'pin' except the image for the pin is local file. """ image_url = self._upload_image(image_file=image_file).json()['image_url'] return self.pin(board_id=board_id, description=description, image_url=image_url, link=link, title=title, section_id=section_id) def repin(self, board_id, pin_id, section_id=None): """ Repin/Save action :param board_id: board id, current user should have right to pin to this board :param pin_id: pin id to repin :param section_id: board section should be previously defined and its optional :return: python dict describing the pinterest response """ options = { "board_id": board_id, "pin_id": pin_id, "section": section_id, "is_buyable_pin": False } source_url = '/pin/{}/'.format(pin_id) data = self.req_builder.buildPost(options=options, source_url=source_url) return self.post(url=REPIN_RESOURCE_CREATE, data=data) def _upload_image(self, image_file): file_name = os.path.basename(image_file) mime_type = mimetypes.guess_type(image_file)[0] form_data = MultipartEncoder(fields={ 'img': ('%s' % file_name, open(image_file, 'rb'), mime_type) }) headers = { 'Content-Length': '%s' % form_data.len, 'Content-Type': form_data.content_type, 'X-UPLOAD-SOURCE': 'pinner_uploader' } return self.post(url=UPLOAD_IMAGE, data=form_data, headers=headers) def delete_pin(self, pin_id): """ Deletes a pint the user owns :param pin_id: pin id to delete :return: python dict describing the pinterest response """ options = {"id": pin_id} source_url = '/{}/'.format(self.username) data = self.req_builder.buildPost(options=options, source_url=source_url) return self.post(url=DELETE_PIN_RESOURCE, data=data) def comment(self, pin_id, text): """ Put comment on a pin :param pin_id: pin id to comment on :param text: text of the comment :return: python dict describing the pinterest response """ pin_data = self.load_pin(pin_id=pin_id) options = { "objectId": pin_data['aggregated_pin_data']['id'], "pinId": pin_id, "tags": "[]", "text": text } data = self.req_builder.buildPost(options=options, source_url=pin_id) return self.post(url=CREATE_COMMENT_RESOURCE, data=data) def load_pin(self, pin_id): """ Loads full information about a pin :param pin_id: pin id to load :return: python dict describing the pinterest response """ resp = self.get(url=LOAD_PIN_URL_FORMAT.format(pin_id)) soup = BeautifulSoup(resp.text, 'html.parser') scripts = soup.findAll('script') pin_data = {} for s in scripts: if 'id' in s.attrs and s.attrs['id'] == 'initial-state': pinJsonData = json.loads(s.contents[0])['resources']['data']['PinResource'] pinJsonData = pinJsonData[list(pinJsonData.keys())[0]]['data'] return pinJsonData raise Exception("Pin data not found. Probably pintereset chagned their API") def get_comments(self, pin_id, page_size=50): """ Get comments on a pin. The response is batched, meaning this method should be called util empty list is returned :param pin_id: target pin id :param page_size: batch size :return: list of comment objects """ pin_data = self.load_pin(pin_id=pin_id) next_bookmark = self.bookmark_manager.get_bookmark(primary='pin_comments', secondary=pin_id) if next_bookmark == '-end-': return [] options = { "isPrefetch": False, "objectId": pin_data['aggregated_pin_data']['id'], "page_size": page_size, "redux_normalize_feed": True, "bookmarks": [next_bookmark] } source_url = '/pin/{}/'.format(pin_id) url = self.req_builder.buildGet(url=GET_PIN_COMMENTS_RESOURCE, options=options, source_url=source_url) resp = self.get(url=url).json() resp = resp['resource_response'] bookmark = '-end-' if 'bookmark' in resp: bookmark = resp['bookmark'] self.bookmark_manager.add_bookmark(primary='pin_comments', secondary=pin_id, bookmark=bookmark) return resp['data'] def get_comments_all(self, pin_id): """ Obtains all comments of a pin. NOTE: IF pin has too many comments this might cause memory issues. In such cases use 'get_comments' which is batched :param pin_id: :return: list of comment objects """ results = [] search_batch = self.get_comments(pin_id=pin_id) while len(search_batch) > 0: results += search_batch search_batch = self.get_comments(pin_id=pin_id) return results def delete_comment(self, pin_id, comment_id): """ Deletes a comment :param pin_id: pin id to search the comment in :param comment_id: comment id :return: """ options = {"commentId": comment_id} source_url = "/pin/{}/".format(pin_id) data = self.req_builder.buildPost(options=options, source_url=source_url) return self.post(url=DELETE_COMMENT, data=data) def invite(self, board_id, user_id): """ Invite a user to one of the current user's boards :param board_id: board to invite to :param user_id: user to invite :return: python dict describing the pinterest response """ options = {"board_id": board_id, "invited_user_ids": [user_id]} data = self.req_builder.buildPost(options=options) return self.post(url=BOARD_INVITE_RESOURCE, data=data) def get_board_invites(self, board_id, page_size=100): """ Returns a list of users invited to the specified board. This method is batched and needs to be called until empty list is returned. :param board_id: id of target board :param page_size: batch size :return: list of board objects """ options = { "isPrefetch": False, "board_id": board_id, "sort": "viewer_first", "field_set_key": "boardEdit", "status_filters": "new,accepted,contact_request_not_approved,pending_approval", "include_inactive": True, "page_size": page_size } url = self.req_builder.buildGet(url=BOARD_INVITES_RESOURCE, options=options) resp = self.get(url=url).json() return resp['resource_response']['data'] def get_board_invites_all(self, board_id): """ Obtains all invites of a board. NOTE: If board has too many invites this might cause memory issues. In such cases use 'get_board_invites' which is batched :param board_id: :return: list of board invite objects """ results = [] search_batch = self.get_board_invites(board_id=board_id) while len(search_batch) > 0: results += search_batch search_batch = self.get_board_invites(board_id=board_id) return results def delete_invite(self, board_id, invited_user_id, also_block=False): """ Deletes invite for a board :param board_id: board id :param invited_user_id: invited user id :param also_block: you can also block the user (default false) :return: python dict describing the pinterest response """ options = { "ban": also_block, "board_id": board_id, "field_set_key": "boardEdit", "invited_user_id": invited_user_id } data = self.req_builder.buildPost(options=options) return self.post(url=BOARD_DELETE_INVITE_RESOURCE, data=data) def visual_search(self, pin_data, x=None, y=None, w=None, h=None, padding=10): """ Gives access to pinterest search api This method is batched, meaning is needs to be called until empty list is returned. :param pin_data: pin data :param x: x position of the cropped part of the image used for searching :param y: y position of the cropped part of the image used for searching :param w: width of the cropped part of the image used for searching :param h: height of the cropped part of the image used for searching :param padding: Default padding for cropped image. :return: python dict describing the pinterest response """ orig = pin_data['images']['orig'] width = orig['width'] height = orig['height'] image_signature = pin_data['image_signature'] pin_id = pin_data['id'] x = padding if x is None else x y = padding if y is None else y w = width - padding * 2 if w is None else w h = height - padding * 2 if h is None else h source_url = '/pin/{}/visual-search/?x={}&y={}&w={}&h={}'.format(pin_id, x, y, w, h) next_bookmark = self.bookmark_manager.get_bookmark(primary='visual_search', secondary=source_url) if next_bookmark == '-end-': return [] options = { "isPrefetch": False, "pin_id": pin_id, "image_signature": image_signature, "crop": { "x": x / width, "y": y / height, "w": w / width, "h": h / height }, "bookmarks": [next_bookmark], "no_fetch_context_on_resource": False } url = self.req_builder.buildGet(url=VISUAL_LIVE_SEARCH_RESOURCE, options=options, source_url=source_url) resp = self.get(url=url).json() bookmark = resp['resource']['options']['bookmarks'][0] self.bookmark_manager.add_bookmark(primary='visual_search', secondary=source_url, bookmark=bookmark) return resp['resource_response']['data']['results'] def search(self, scope, query, page_size=250): """ Gives access to pinterest search api This method is batched, meaning is needs to be called until empty list is returned. NOTE: there is a max number of results set by Pinterest -> 1000 :param scope: can be pins, buyable_pins, my_pins, videos, boards :param query: search phrase :param page_size: batch size :return: list of search results """ next_bookmark = self.bookmark_manager.get_bookmark(primary='search', secondary=query) if next_bookmark == '-end-': return [] terms = query.split(' ') escaped_query = "%20".join(terms) term_meta_arr = [] for t in terms: term_meta_arr.append('term_meta[]=' + t) term_arg = "%7Ctyped&".join(term_meta_arr) source_url = '/search/{}/?q={}&rs=typed&{}%7Ctyped'.format(scope, escaped_query, term_arg) options = { "isPrefetch": False, "auto_correction_disabled": False, "query": query, "redux_normalize_feed": True, "rs": "typed", "scope": scope, "page_size": page_size, "bookmarks": [next_bookmark] } url = self.req_builder.buildGet(url=BASE_SEARCH_RESOURCE, options=options, source_url=source_url) resp = self.get(url=url).json() bookmark = resp['resource']['options']['bookmarks'][0] self.bookmark_manager.add_bookmark(primary='search', secondary=query, bookmark=bookmark) return resp['resource_response']['data']['results'] def board_recommendations(self, board_id='', page_size=50): """ This gives the list of pins you see when you open a board and click on 'More Ideas' This method is batched and needs to be called until empty list is returned in order to obtain all of the results. :param board_id: target board id :param page_size: batch size :return: """ next_bookmark = self.bookmark_manager.get_bookmark(primary='boards', secondary=board_id) if next_bookmark == '-end-': return [] options = { "isPrefetch": False, "type": "board", "id": board_id, "page_size": page_size, "bookmarks": [next_bookmark] } url = self.req_builder.buildGet(url=BOARD_RECOMMEND_RESOURCE, options=options) response = self.get(url=url).json() bookmark = response['resource']['options']['bookmarks'][0] self.bookmark_manager.add_bookmark(primary='boards', secondary=board_id, bookmark=bookmark) return response['resource_response']['data'] def get_pinnable_images(self, url): """ Simple API pinterest uses to suggest images from site. """ options = {"isPrefetch": 'false', "url": url, "source": "pin_create", "appendItems": 'false', "followRedirects": 'true' } url = self.req_builder.buildGet(url=PINNABLE_IMAGES_RESOURCE, source_url='/pin-builder/', options=options) res = self.get(url=url).json() res = res['resource_response']['data']['items'] urls = [] for item in res: if 'url' in item: urls.append(item['url']) return urls def home_feed(self, page_size=100): """ This gives the list of pins you see when you open the pinterest home page. This method is batched, in order to obtain all home feed items it needs to be called until empty list is returned :param page_size: :return: """ next_bookmark = self.bookmark_manager.get_bookmark(primary='home_feed') if next_bookmark == '-end-': return [] options = { "bookmarks": [next_bookmark], "isPrefetch": False, "field_set_key": "hf_grid_partner", "in_nux": False, "prependPartner": True, "prependUserNews": False, "static_feed": False, "page_size": page_size } url = self.req_builder.buildGet(url=USER_HOME_FEED_RESOURCE, options=options) response = self.get(url=url).json() bookmark = '-end-' if 'bookmark' in response['resource_response']: bookmark = response['resource_response']['bookmark'] self.bookmark_manager.add_bookmark(primary='home_feed', bookmark=bookmark) return response['resource_response']['data'] def board_feed(self, board_id='', page_size=250): """ Gives a list of all pins in a board. This method is batched, meaning in order to obtain all pins in a board you need to call it until empty list is returned. """ next_bookmark = self.bookmark_manager.get_bookmark(primary='board_feed', secondary=board_id) if next_bookmark == '-end-': return [] options = { "isPrefetch": False, "board_id": board_id, "field_set_key": "partner_react_grid_pin", "filter_section_pins": True, "layout": "default", "page_size": page_size, "redux_normalize_feed": True, "bookmarks": [next_bookmark] } url = self.req_builder.buildGet(url=BOARD_FEED_RESOURCE, options=options) response = self.get(url=url).json() bookmark = response['resource']['options']['bookmarks'][0] self.bookmark_manager.add_bookmark(primary='board_feed', secondary=board_id, bookmark=bookmark) return response['resource_response']['data'] def initiate_conversation(self, user_ids, message='hi'): """ Initiates a new conversation with one or more users :return: python dict object describing the pinterest response """ options = { "user_ids": user_ids, "text": message } data = self.req_builder.buildPost(options=options) return self.post(url=CONVERSATION_RESOURCE_CREATE, data=data) def send_message(self, message='', conversation_id='', pin_id=''): """ Sends a new mesage to an already initiated conversation """ options = { "conversation_id": conversation_id, "text": message, "pin": pin_id } data = self.req_builder.buildPost(options=options) return self.post(url=SEND_MESSAGE, data=data) def load_conversation(self, conversation_id=''): """ Loads a list of all messages in a conversation """ messages = [] message_batch = self._load_conversation_batch(conversation_id=conversation_id) while len(message_batch) > 0: messages += message_batch message_batch = self._load_conversation_batch(conversation_id=conversation_id) return messages def _load_conversation_batch(self, conversation_id='', page_size=25): next_bookmark = self.bookmark_manager.get_bookmark(primary='conversations', secondary=conversation_id) if next_bookmark == '-end-': return [] options = { "isPrefetch": False, "page_size": page_size, "conversation_id": conversation_id, "bookmarks": [next_bookmark] } url = self.req_builder.buildGet(url=LOAD_CONVERSATION, options=options) response = self.get(url=url).json() bookmark = response['resource']['options']['bookmarks'][0] self.bookmark_manager.add_bookmark(primary='conversations', secondary=conversation_id, bookmark=bookmark) return response['resource_response']['data'] def get_conversations(self): """ Loads a list of all conversations the current user has """ conversations = [] conv_batch = self._get_conversation_batch() while len(conv_batch) > 0: conversations += conv_batch conv_batch = self._get_conversation_batch() return conversations def _get_conversation_batch(self): next_bookmark = self.bookmark_manager.get_bookmark(primary='conversations') if next_bookmark == '-end-': return [] options = { "isPrefetch": False, "field_set_key": "default", "bookmarks": [next_bookmark] } url = self.req_builder.buildGet(url=CONVERSATION_RESOURCE, options=options) response = self.get(url=url).json() next_bookmark = response['resource']['options']['bookmarks'][0] self.bookmark_manager.add_bookmark(primary='conversations', bookmark=next_bookmark) return response['resource_response']['data'] def create_board_section(self, board_id='', section_name=''): """ Creates a new section in a board the current user owns """ options = { "board_id": board_id, "initial_pins": [], "name": section_name, "name_source": 0 } data = self.req_builder.buildPost(options=options) return self.post(url=BOARD_SECTION_RESOURCE, data=data) def get_board_sections(self, board_id='', reset_bookmark=False): """ Obtains a list of all sections of a board """ next_bookmark = self.bookmark_manager.get_bookmark(primary='board_sections', secondary=board_id) if next_bookmark == '-end-': if reset_bookmark: self.bookmark_manager.reset_bookmark(primary='board_sections', secondary=board_id) return [] options = { "isPrefetch": False, "board_id": board_id, "redux_normalize_feed": True, "bookmarks": [next_bookmark] } url = self.req_builder.buildGet(url=GET_BOARD_SECTIONS, options=options) response = self.get(url=url).json() bookmark = response['resource']['options']['bookmarks'][0] self.bookmark_manager.add_bookmark(primary='board_sections', secondary=board_id, bookmark=bookmark) return response['resource_response']['data'] def get_section_pins(self, section_id='', page_size=250, reset_bookmark=False): """ Returns a list of all pins in a board section. This method is batched meaning in order to obtain all pins in the section you need to call is until empty list is returned """ next_bookmark = self.bookmark_manager.get_bookmark(primary='section_pins', secondary=section_id) if next_bookmark == '-end-': if reset_bookmark: self.bookmark_manager.reset_bookmark(primary='section_pins', secondary=section_id) return [] options = { "isPrefetch": False, "field_set_key": "react_grid_pin", "is_own_profile_pins": True, "page_size": page_size, "redux_normalize_feed": True, "section_id": section_id, "bookmarks": [next_bookmark] } url = self.req_builder.buildGet(url=GET_BOARD_SECTION_PINS, options=options) response = self.get(url=url).json() bookmark = response['resource']['options']['bookmarks'][0] self.bookmark_manager.add_bookmark(primary='section_pins', secondary=section_id, bookmark=bookmark) pins = [d for d in response['resource_response']['data'] if 'pinner' in d] return pins def delete_board_section(self, section_id=''): """ Deletes a board section by id """ options = { "section_id": section_id } data = self.req_builder.buildPost(options=options) return self.post(url=BOARD_SECTION_EDIT_RESOURCE, data=data) def type_ahead(self, scope="pins", count=5, term=""): """ returns Pinterest predictions for given term. Response may include user profiles. Example term "dada" gives ["dadaism","dada art"] etc. :param scope: always "pins" :param count: max guess number :param term: word to be typed ahead :return: response items """ source_url = "/" options = {"pin_scope": scope, "count": count, "term": term, "no_fetch_context_on_resource": False} url = self.req_builder.buildGet(TYPE_AHEAD_RESOURCE, options, source_url) resp = self.get(url=url).json() return resp["resource_response"]["data"]["items"]
StarcoderdataPython
1684344
import numpy as np class BinaryTree: root = None node_index = None def __init__(self, index, value): self.root = BinaryTreeNode(index, value) self.node_index = {index: self.root} def add_left_descendant(self, index, value, parent_index): parent = self.node_index[parent_index] new_node = BinaryTreeNode(index, value, parent) self.node_index[index] = new_node parent.add_left_descendant(new_node) def has_left_descendant_at_node(self, index): return self.node_index[index].has_left_descendant() def add_right_descendant(self, index, value, parent_index): parent = self.node_index[parent_index] new_node = BinaryTreeNode(index, value, parent) self.node_index[index] = new_node parent.add_right_descendant(new_node) def has_right_descendant_at_node(self, index): return self.node_index[index].has_right_descendant() def set_word(self, index, word): self.node_index[index].set_word(word) def print_tree(self): self.root.recursive_print() def get_sentence(self): sentence = ' '.join([n.word for n in self.node_index.values() if n.word != '_']) return sentence def get_words(self): return [n.word for n in self.node_index.values()] def convert_to_ptb_format(self): return self.root.convert_to_ptb() def get_all_sequences_and_masks(self, root_only=False): """ Get the sequences, masks, and values associated with all subtrees in this tree :param root_only: if True, only return the values for the whole tree, not the subtrees :return: a list of (words, left_mask, right_mask, value) tuples """ seqs_and_masks = [] if root_only: words, left_mask, right_mask, value = self.convert_to_sequence_and_masks(self.root) seqs_and_masks.append((words, left_mask, right_mask, value)) else: nodes = self.node_index.values() for node in nodes: words, left_mask, right_mask, value = self.convert_to_sequence_and_masks(node) seqs_and_masks.append((words, left_mask, right_mask, value)) return seqs_and_masks def convert_to_sequence_and_masks(self, head_node): """ Convert a subtree into a sequence of words, corresponding masks, and the value of the root :param head_node: the node to treat as the root of the (sub)tree :return words: list of words in tree order :return left_mask, right_mask: masks denoting the tree structure :return value: the sentiment value of the root of this (sub)tree """ sequence = head_node.get_children_in_sequence() sequence.reverse() sequence_map = {s: s_i for s_i, s in enumerate(sequence)} n_elements = len(sequence) left_mask = np.zeros([n_elements, n_elements], dtype=np.int32) right_mask = np.zeros([n_elements, n_elements], dtype=np.int32) for s_i, n_i in enumerate(sequence): node = self.node_index[n_i] if node.has_left_descendant(): left_mask[s_i, sequence_map[node.left_descendant.index]] = 1 if node.has_right_descendant(): right_mask[s_i, sequence_map[node.right_descendant.index]] = 1 words = [self.node_index[n_i].word for n_i in sequence] value = int(self.node_index[sequence[-1]].value) return words, left_mask, right_mask, value class BinaryTreeNode: word = None index = None value = None parent = None left_descendant = None right_descendant = None def __init__(self, index, value, parent=None): self.value = value self.index = index self.word = '_' if parent is not None: self.parent = parent def __str__(self): return '(%s %s %s)' % (self.value, self.word, self.index) def set_word(self, word): self.word = word def add_left_descendant(self, new_node): self.left_descendant = new_node def has_left_descendant(self): return self.left_descendant is not None def add_right_descendant(self, new_node): self.right_descendant = new_node def has_right_descendant(self): return self.right_descendant is not None def recursive_print(self, depth=0): print ' '*depth, self.value, self.word, self.index if self.left_descendant is not None: self.left_descendant.recursive_print(depth+1) if self.right_descendant is not None: self.right_descendant.recursive_print(depth+1) def convert_to_ptb(self): ptb_string = '(' + self.value if self.word != '_': ptb_string += ' ' + self.word if self.left_descendant is not None: ptb_string += ' ' + self.left_descendant.convert_to_ptb() if self.right_descendant is not None: ptb_string += ' ' + self.right_descendant.convert_to_ptb() ptb_string += ')' return ptb_string def get_leaf_nodes(self): leaves = [] if self.left_descendant is None and self.right_descendant is None: leaves.append(self.word) else: if self.left_descendant is not None: leaves.extend(self.left_descendant.get_leaf_nodes()) if self.right_descendant is not None: leaves.extend(self.right_descendant.get_leaf_nodes()) return leaves def get_children_in_sequence(self): sequence = [] if self.left_descendant is None and self.right_descendant is None: sequence.append(self.index) else: if self.left_descendant is not None: sequence.extend(self.left_descendant.get_children_in_sequence()) if self.right_descendant is not None: sequence.extend(self.right_descendant.get_children_in_sequence()) sequence = [self.index] + sequence return sequence
StarcoderdataPython
83717
"""A collection of simple bandit algorithms for comparison purposes.""" import math from collections import defaultdict from typing import Any, Dict, Tuple, Sequence, Optional, cast, Hashable from coba.simulations import Context, Action from coba.statistics import OnlineVariance from coba.learners.core import Learner, Key class RandomLearner(Learner): """A Learner implementation that selects an action at random and learns nothing.""" @property def family(self) -> str: """The family of the learner. See the base class for more information """ return "random" @property def params(self) -> Dict[str, Any]: """The parameters of the learner. See the base class for more information """ return {} def predict(self, key: Key, context: Context, actions: Sequence[Action]) -> Sequence[float]: """Choose a random action from the action set. Args: key: The key identifying the interaction we are choosing for. context: The context we're currently in. See the base class for more information. actions: The actions to choose from. See the base class for more information. Returns: The probability of taking each action. See the base class for more information. """ return [1/len(actions)] * len(actions) def learn(self, key: Key, context: Context, action: Action, reward: float, probability: float) -> None: """Learns nothing. Args: key: The key identifying the interaction this observed reward came from. context: The context we're learning about. See the base class for more information. action: The action that was selected in the context. See the base class for more information. reward: The reward that was gained from the action. See the base class for more information. probability: The probability that the given action was taken. """ pass class EpsilonBanditLearner(Learner): """A lookup table bandit learner with epsilon-greedy exploration.""" def __init__(self, epsilon: float) -> None: """Instantiate an EpsilonBanditLearner. Args: epsilon: A value between 0 and 1. We explore with probability epsilon and exploit otherwise. include_context: If true lookups are a function of context-action otherwise they are a function of action. """ self._epsilon = epsilon self._N: Dict[Hashable, int ] = defaultdict(int) self._Q: Dict[Hashable, Optional[float]] = defaultdict(int) @property def family(self) -> str: """The family of the learner. See the base class for more information """ return "bandit_epsilongreedy" @property def params(self) -> Dict[str, Any]: """The parameters of the learner. See the base class for more information """ return {"epsilon": self._epsilon } def predict(self, key: Key, context: Context, actions: Sequence[Action]) -> Sequence[float]: """Determine a PMF with which to select the given actions. Args: key: The key identifying the interaction we are choosing for. context: The context we're currently in. See the base class for more information. actions: The actions to choose from. See the base class for more information. Returns: The probability of taking each action. See the base class for more information. """ keys = [ self._key(action) for action in actions ] values = [ self._Q[key] for key in keys ] max_value = None if set(values) == {None} else max(v for v in values if v is not None) max_indexes = [i for i in range(len(values)) if values[i]==max_value] prob_selected_randomly = [1/len(actions) * self._epsilon] * len(actions) prob_selected_greedily = [ int(i in max_indexes)/len(max_indexes) * (1-self._epsilon) for i in range(len(actions))] return [p1+p2 for p1,p2 in zip(prob_selected_randomly,prob_selected_greedily)] def learn(self, key: Key, context: Context, action: Action, reward: float, probability: float) -> None: """Learn from the given interaction. Args: key: The key identifying the interaction this observed reward came from. context: The context we're learning about. See the base class for more information. action: The action that was selected in the context. See the base class for more information. reward: The reward that was gained from the action. See the base class for more information. probability: The probability that the given action was taken. """ a_key = self._key(action) alpha = 1/(self._N[a_key]+1) old_Q = cast(float, 0 if self._Q[a_key] is None else self._Q[a_key]) self._Q[a_key] = (1-alpha) * old_Q + alpha * reward self._N[a_key] = self._N[a_key] + 1 def _key(self, action: Action) -> Hashable: return tuple(action.items()) if isinstance(action,dict) else action class UcbBanditLearner(Learner): """This is an implementation of Auer et al. (2002) UCB1-Tuned algorithm. This algorithm assumes that the reward distribution has support in [0,1]. References: Auer, Peter, <NAME>, and <NAME>. "Finite-time analysis of the multiarmed bandit problem." Machine learning 47.2-3 (2002): 235-256. """ def __init__(self): """Instantiate a UcbBanditLearner.""" #these variable names were selected for easier comparison with the original paper self._init_a: int = 0 self._t : int = 0 self._s : Dict[Action, int ] = defaultdict(int) self._m : Dict[Action, float ] = {} self._v : Dict[Action, OnlineVariance] = defaultdict(OnlineVariance) @property def family(self) -> str: """The family of the learner. See the base class for more information """ return "bandit_UCB" @property def params(self) -> Dict[str, Any]: """The parameters of the learner. See the base class for more information """ return { } def predict(self, key: Key, context: Context, actions: Sequence[Action]) -> Sequence[float]: """Determine a PMF with which to select the given actions. Args: key: The key identifying the interaction we are choosing for. context: The context we're currently in. See the base class for more information. actions: The actions to choose from. See the base class for more information. Returns: The probability of taking each action. See the base class for more information. """ actions = [ self._key(a) for a in actions ] #initialize by playing every action once if self._init_a < len(actions): self._init_a += 1 return [ int(i == (self._init_a-1)) for i in range(len(actions)) ] else: values = [ self._m[a] + self._Avg_R_UCB(a) if a in self._m else None for a in actions ] max_value = None if set(values) == {None} else max(v for v in values if v is not None) max_indexes = [i for i in range(len(values)) if values[i]==max_value] return [ int(i in max_indexes)/len(max_indexes) for i in range(len(actions)) ] def learn(self, key: Key, context: Context, action: Action, reward: float, probability: float) -> None: """Learn from the given interaction. Args: key: The key identifying the interaction this observed reward came from. context: The context we're learning about. See the base class for more information. action: The action that was selected in the context. See the base class for more information. reward: The reward that was gained from the action. See the base class for more information. probability: The probability that the given action was taken. """ assert 0 <= reward and reward <= 1, "This algorithm assumes that reward has support in [0,1]." action = self._key(action) if action not in self._m: self._m[action] = reward else: self._m[action] = (1-1/self._s[action]) * self._m[action] + 1/self._s[action] * reward self._t += 1 self._s[action] += 1 self._v[action].update(reward) def _key(self, action: Action) -> Hashable: return tuple(action.items()) if isinstance(action,dict) else action def _Avg_R_UCB(self, action: Action) -> float: """Produce the estimated upper confidence bound (UCB) for E[R|A]. Args: action: The action for which we want to retrieve UCB for E[R|A]. Returns: The estimated UCB for E[R|A]. Remarks: See the beginning of section 4 in the algorithm's paper for this equation. """ ln = math.log; n = self._t; n_j = self._s[action]; V_j = self._Var_R_UCB(action) return math.sqrt(ln(n)/n_j * min(1/4,V_j)) def _Var_R_UCB(self, action: Action) -> float: """Produce the upper confidence bound (UCB) for Var[R|A]. Args: action: The action for which we want to retrieve UCB for Var[R|A]. Returns: The estimated UCB for Var[R|A]. Remarks: See the beginning of section 4 in the algorithm's paper for this equation. """ ln = math.log; t = self._t; s = self._s[action]; var = self._v[action].variance return var + math.sqrt(2*ln(t)/s)
StarcoderdataPython
3558323
# -*- coding: utf-8 -*- import click from loguru import logger from pathlib import Path from dotenv import find_dotenv, load_dotenv @click.command() def make(): """ Runs data processing scripts to turn raw data from (../raw) into cleaned data ready to be analyzed (saved in ../processed). """ logger.info("Nothing implemented yet") if __name__ == '__main__': # not used in this stub but often useful for finding various files project_dir = Path(__file__).resolve().parents[2] # find .env automagically by walking up directories until it's found, then # load up the .env entries as environment variables load_dotenv(find_dotenv()) make()
StarcoderdataPython
5169415
""" author: @endormi Small script to check computer memory (total, available, usage, used and free) """ import psutil file = 'file.txt' mem = psutil.virtual_memory() print(str(mem) + '\n') print('Total memory: ' + str(mem.total)) print('Available memory: ' + str(mem.available)) print('Memory usage: ' + str(mem.percent)) print('Used memory: ' + str(mem.used)) print('Free memory: ' + str(mem.free)) with open(file, 'a') as i: i.write('Total memory: ' + str(mem.total) + '\n') i.write('Available memory: ' + str(mem.available) + '\n') i.write('Memory usage: ' + str(mem.percent) + '\n') i.write('Used memory: ' + str(mem.used) + '\n') i.write('Free memory: ' + str(mem.free) + '\n') i.write('\n') print('\nWrote results to ' + file)
StarcoderdataPython
3254199
<reponame>polkapolka/pybay from django.db import models from django.utils import timezone class Countdown(models.Model): title = models.TextField(help_text="Text above the countdown") date = models.DateTimeField(help_text="The date the countdown counts to") cta = models.TextField(help_text="Text on the button below the countdown", verbose_name='Call to action') link = models.TextField(help_text="Target of the button below the countdown") def context_for_template(self): ret = dict(self.__dict__, reference=timezone.now().timestamp()) ret['date'] = ret['date'].timestamp() return ret def __str__(self): return '{0.title}: {0.date}'.format(self)
StarcoderdataPython
8065678
<gh_stars>10-100 # src: https://github.com/OpenNMT/OpenNMT-py/blob/master/onmt/decoders/decoder.py import torch import torch.nn as nn from neuroir.decoders.decoder import RNNDecoderBase from neuroir.utils.misc import aeq class RNNDecoder(RNNDecoderBase): """ Standard fully batched RNN decoder with attention. Faster implementation, uses CuDNN for implementation. See :obj:`RNNDecoderBase` for options. Based around the approach from "Neural Machine Translation By Jointly Learning To Align and Translate" :cite:`Bahdanau2015` """ def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None): """ Private helper for running the specific RNN forward pass. Must be overriden by all subclasses. Args: tgt (LongTensor): a sequence of input tokens tensors [batch x len x nfeats]. memory_bank (FloatTensor): output(tensor sequence) from the encoder RNN of size (batch x src_len x hidden_size). state (FloatTensor): hidden state from the encoder RNN for initializing the decoder. memory_lengths (LongTensor): the source memory_bank lengths. Returns: decoder_final (Tensor): final hidden state from the decoder. decoder_outputs (Tensor): output from the decoder (after attn) `[batch x tgt_len x hidden]`. attns (Tensor): distribution over src at each tgt `[batch x tgt_len x src_len]`. """ # Initialize local and return variables. attns = {} emb = tgt assert emb.dim() == 3 coverage = state.coverage if isinstance(self.rnn, nn.GRU): rnn_output, decoder_final = self.rnn(emb, state.hidden[0]) else: rnn_output, decoder_final = self.rnn(emb, state.hidden) # Check tgt_batch, tgt_len, _ = tgt.size() output_batch, output_len, _ = rnn_output.size() aeq(tgt_len, output_len) aeq(tgt_batch, output_batch) # END # Calculate the attention. if self.attn is not None: decoder_outputs, p_attn, coverage_v = self.attn( rnn_output.contiguous(), memory_bank, memory_lengths=memory_lengths, coverage=coverage ) attns["std"] = p_attn else: decoder_outputs = rnn_output.contiguous() # Update the coverage attention. if self._coverage: if coverage_v is None: coverage = coverage + p_attn \ if coverage is not None else p_attn else: coverage = coverage + coverage_v \ if coverage is not None else coverage_v attns["coverage"] = coverage decoder_outputs = self.dropout(decoder_outputs) # Run the forward pass of the copy attention layer. if self._copy and not self._reuse_copy_attn: _, copy_attn, _ = self.copy_attn(decoder_outputs, memory_bank, memory_lengths=memory_lengths) attns["copy"] = copy_attn elif self._copy: attns["copy"] = attns["std"] return decoder_final, decoder_outputs, attns
StarcoderdataPython
11397578
#!/usr/bin/env python3 # -*- coding: utf-8 -*- def circle(rad): p_circle = rad * rad * 3.14 return p_circle def cylinder(): radius = float(input("Радиус цилиндра: ")) height = float(input("Высота цилиндра: ")) mes = input("Для вывода площади боковой поверхности" " введиите 1\nДля вывода полной площади" " цилиндра введите 2\n" " >>>> ") if mes.lower() == '1': bok = 2 * 3.14 * height * radius print(f'Площадь боковой поверхности = {bok}') elif mes.lower() == '2': all_p = 2 * 3.14 * height * radius * (2 * circle(radius)) print(f'Площадь всей поверхности = {all_p}') else: print("Команда не опознана") if __name__ == '__main__': cylinder()
StarcoderdataPython
3499377
<filename>examples/basic_auth/app_oldap.py from flask import Flask, g, request, session, redirect, url_for from flask_simpleldap import LDAP app = Flask(__name__) app.secret_key = 'dev key' app.debug = True app.config['LDAP_OPENLDAP'] = True app.config['LDAP_OBJECTS_DN'] = 'dn' app.config['LDAP_REALM_NAME'] = 'OpenLDAP Authentication' app.config['LDAP_HOST'] = 'openldap.example.org' app.config['LDAP_BASE_DN'] = 'dc=users,dc=openldap,dc=org' app.config['LDAP_USERNAME'] = 'cn=user,ou=servauth-users,dc=users,dc=openldap,dc=org' app.config['LDAP_PASSWORD'] = 'password' app.config['LDAP_USER_OBJECT_FILTER'] = '(&(objectclass=inetOrgPerson)(uid=%s))' ldap = LDAP(app) @app.route('/') @ldap.basic_auth_required def index(): return 'Welcome, {0}!'.format(g.ldap_username) if __name__ == '__main__': app.run()
StarcoderdataPython
11317089
<filename>webstruct/webannotator.py """ :mod:`webstruct.webannotator` provides functions for working with HTML pages annotated with WebAnnotator_ Firefox extension. .. _WebAnnotator: https://github.com/xtannier/WebAnnotator """ from __future__ import absolute_import import re import warnings import random import itertools from copy import deepcopy from collections import defaultdict, OrderedDict, namedtuple from lxml import etree from lxml.etree import Element, LXML_VERSION import six from webstruct.utils import html_document_fromstring DEFAULT_COLORS = [ # foreground, background ("#000000", "#33CCFF"), ("#000000", "#FF0000"), ("#000000", "#33FF33"), ("#000000", "#CC66CC"), ("#000000", "#FF9900"), ("#000000", "#99FFFF"), ("#000000", "#FF6666"), ("#000000", "#66FF99"), ("#FFFFFF", "#3333FF"), ("#FFFFFF", "#660000"), ("#FFFFFF", "#006600"), ("#FFFFFF", "#663366"), ("#FFFFFF", "#993300"), ("#FFFFFF", "#336666"), ("#FFFFFF", "#666600"), ("#FFFFFF", "#009900"), ] def _get_colors(index): try: return DEFAULT_COLORS[index] except IndexError: fg = random.choice(["#000000", "#FFFFFF"]) bg = "#" + "".join(random.choice("01234567890ABCDEF") for x in range(6)) return fg, bg class EntityColors(defaultdict): """ ``{"entity_name": ("fg_color", "bg_color", entity_index)}`` mapping that generates entries for new entities on first access. """ def __init__(self, **kwargs): self.next_index = len(kwargs) super(EntityColors, self).__init__(self._new_item_factory, **kwargs) def _new_item_factory(self): fg, bg = _get_colors(self.next_index) self.next_index += 1 return fg, bg, self.next_index-1 @classmethod def from_htmlfile(cls, path, encoding=None): """ Load the color mapping from WebAnnotator-annotated HTML file """ with open(path, 'rb') as f: return cls.from_htmlbytes(f.read(), encoding=encoding) @classmethod def from_htmlbytes(cls, html_bytes, encoding=None): colors = cls() tree = html_document_fromstring(html_bytes, encoding=encoding) for wa_color in tree.xpath('//wa-color'): assert wa_color.get('id').lower().startswith('wa-color-') idx = int(wa_color.get('id')[len("WA-color-"):]) fg = wa_color.get('fg') bg = wa_color.get('bg') typ = wa_color.get('type') colors[typ] = (fg, bg, idx) return colors def apply_wa_title(tree): """ Replace page's ``<title>`` contents with a contents of ``<wa-title>`` element and remove ``<wa-title>`` tag. WebAnnotator > 1.14 allows annotation of ``<title>`` contents; it is stored after body in ``<wa-title>`` elements. """ for wa_title in tree.xpath('//wa-title'): titles = tree.xpath('//title') if not titles: wa_title.drop_tree() return title = titles[0] head = title.getparent() head.insert(head.index(title), wa_title) title.drop_tree() wa_title.tag = 'title' for attr in wa_title.attrib: wa_title.attrib.pop(attr) return def _fix_sax_attributes(attrs): """ Fix sax startElement attributes for lxml < 3.1.2 """ if LXML_VERSION >= (3, 1, 2): return attrs items = [((None, key), value) for key, value in attrs.items()] return OrderedDict(items) def _add_wacolor_elements(tree, entity_colors): """ Add <wa-color> elements after <body>:: <wa-color id="WA-color-0" bg="#33CCFF" fg="#000000" class="WebAnnotator_ORG" type="ORG"> """ body = tree.find('.//body') if body is None: warnings.warn("html has no <body>, <wa-color> elements are not added") return for wa_color in tree.xpath('//wa-color'): wa_color.drop_tree() items = sorted(entity_colors.items(), key=lambda it: -it[1][2]) for ent, (fg, bg, idx) in items: attrs = OrderedDict([ ('id', "WA-color-%s" % idx), ('bg', bg), ('fg', fg), ('class', "WebAnnotator_%s" % ent), ('type', ent), ]) wa_color = Element("wa-color", attrs) body.addnext(wa_color) def _copy_title(tree): # <wa-title style="box-shadow:0 0 1em black;border:2px solid blue;padding:0.5em;">Contact</wa-title> title = tree.find('.//title') if title is None: return body = tree.find('.//body') if body is None: warnings.warn("html has no <body>, <wa-title> element is not added") return for wa_title in tree.xpath('//wa-title'): wa_title.drop_tree() wa_title = deepcopy(title) wa_title.tag = 'wa-title' wa_title.set('style', 'box-shadow:0 0 1em black;border:2px solid blue;padding:0.5em;') body.addnext(wa_title) text = title.xpath('string()') title.clear() title.text = text def _ensure_head(tree): """ Insert <head> element if it is missing. """ heads = tree.xpath('//head') if heads: return heads[0] htmls = tree.xpath('//html') root = htmls[0] if htmls else tree.root head = Element("head") root.insert(0, head) return head def _set_base(tree, baseurl): """ Add <base> tag to the tree. If <base> tag already exists do nothing. """ if tree.xpath('//base'): return head = _ensure_head(tree) head.insert(0, Element("base", href=baseurl)) _TagPosition = namedtuple('_TagPosition', ['element', 'tag', 'position', 'length', 'is_tail', 'dfs_number']) def _translate_to_dfs(positions, ordered): for position in positions: number = ordered[(position.element, position.is_tail)] yield _TagPosition(element=position.element, tag=position.tag, position=position.position, length=position.length, is_tail=position.is_tail, dfs_number=number) def _enclose(to_enclosure, entity_colors): if not to_enclosure: return first = to_enclosure[0][0] element = first.element is_tail = first.is_tail source = element.text if is_tail: source = element.tail if not source or not source.strip(): return remainder = source[:first.position] nodes = list() for idx, (start, end, _id) in enumerate(to_enclosure): limit = len(source) is_last = idx == len(to_enclosure) - 1 if not is_last: limit = to_enclosure[idx + 1][0].position tag = start.tag text = source[start.position + start.length:end.position] tail = source[end.position + end.length:limit] fg, bg, _ = entity_colors[tag] attrs = OrderedDict([ ('wa-id', str(_id)), ('wa-type', str(tag)), ('wa-subtypes', ''), ('style', 'color:%s; background-color:%s;' % (fg, bg)), ('class', 'WebAnnotator_%s' % tag), ]) node = Element('span', _fix_sax_attributes(attrs)) node.text = text node.tail = tail nodes.append(node) if is_tail: element.tail = remainder else: element.text = remainder if is_tail: parent = element.getparent() shift = parent.index(element) + 1 else: parent = element shift = 0 for idx, node in enumerate(nodes): parent.insert(idx + shift, node) def _fabricate_start(element, is_tail, tag): return _TagPosition(element=element, tag=tag, position=0, length=0, is_tail=is_tail, dfs_number=0) def _fabricate_end(element, is_tail, tag): target = element.text if is_tail: target = element.tail length = 0 if target: length = len(target) return _TagPosition(element=element, tag=tag, position=length, length=0, is_tail=is_tail, dfs_number=0) def _find_enclosures(starts, ends, dfs_order): for _id, (start, end) in enumerate(zip(starts, ends)): start_number = start.dfs_number end_number = end.dfs_number if start_number == end_number: yield start, end, _id continue for text_node in dfs_order[start_number + 1:end_number]: if text_node is None: continue element, is_tail = text_node if not isinstance(element.tag, six.string_types): continue if element.tag in ['script', 'style']: continue fictive_start = _fabricate_start(element, is_tail, start.tag) fictive_end = _fabricate_end(element, is_tail, start.tag) yield fictive_start, fictive_end, _id fictive_end = _fabricate_end(start.element, start.is_tail, start.tag) yield start, fictive_end, _id fictive_start = _fabricate_start(end.element, end.is_tail, end.tag) yield fictive_start, end, _id def _enumerate_nodes_in_dfs_order(root): ordered = dict() number = 0 for action, element in etree.iterwalk(root, events=('start', 'end')): if action == 'end': is_tail = True ordered[(element, is_tail)] = number number = number + 1 # for tail if action == 'start': is_tail = False ordered[(element, is_tail)] = number number = number + 1 # for text number = number + 1 # for element return ordered def _find_tag_limits(root): START_RE = re.compile(r' __START_(\w+)__ ') END_RE = re.compile(r' __END_(\w+)__ ') starts = list() ends = list() for _, element in etree.iterwalk(root, events=('start',)): tasks = [(element.text, START_RE, starts, False), (element.text, END_RE, ends, False), (element.tail, START_RE, starts, True), (element.tail, END_RE, ends, True)] for text, regexp, storage, is_tail in tasks: if not text: continue for match in regexp.finditer(text): if not match: continue storage.append(_TagPosition(element=element, tag=match.group(1), position=match.start(), length=match.end() - match.start(), is_tail=is_tail, dfs_number=-1)) return starts, ends def to_webannotator(tree, entity_colors=None, url=None): """ Convert a tree loaded by one of WebStruct loaders to WebAnnotator format. If you want a predictable colors assignment use ``entity_colors`` argument; it should be a mapping ``{'entity_name': (fg, bg, entity_idx)}``; entity names should be lowercased. You can use :class:`EntityColors` to generate this mapping automatically: >>> from webstruct.webannotator import EntityColors, to_webannotator >>> # trees = ... >>> entity_colors = EntityColors() >>> wa_trees = [to_webannotator(tree, entity_colors) for tree in trees] # doctest: +SKIP """ if not entity_colors: entity_colors = EntityColors() root = deepcopy(tree) # We walk the DOM tree in depth first order and number all nodes. # Also we number each text node as first child and tail node as last child. # So when we have start tag in node with number n # and stop tag in node with number n+m, # we should annotate all nodes with numbers n+i, where i in range [1,m). # All these nodes are located on the rigth and below the start # and on the left and below the end. starts, ends = _find_tag_limits(root) if len(ends) != len(starts): raise ValueError('len(ends) != len(starts)') ordered = _enumerate_nodes_in_dfs_order(root) starts = [s for s in _translate_to_dfs(starts, ordered)] ends = [e for e in _translate_to_dfs(ends, ordered)] starts.sort(key=lambda t: (t.dfs_number, t.position)) ends.sort(key=lambda t: (t.dfs_number, t.position)) dfs_order = (max(ordered.values()) + 1) * [None] for text_node, dfs_number in ordered.items(): dfs_order[dfs_number] = text_node to_enclosure = [e for e in _find_enclosures(starts, ends, dfs_order)] def byelement(rec): return (rec[0].element, rec[0].is_tail) to_enclosure.sort(key=lambda rec: (ordered[byelement(rec)], rec[0].position)) for _, enclosures in itertools.groupby(to_enclosure, byelement): enclosures = [e for e in enclosures] _enclose(enclosures, entity_colors) _copy_title(root) _add_wacolor_elements(root, entity_colors) if url is not None: _set_base(root, url) return root
StarcoderdataPython
11381767
"""Morse potential dataset tests. Scientific Machine Learning Benchmark: A benchmark of regression models in chem- and materials informatics. (c) <NAME> 2019, Citrine Informatics. """ def test_morse_potential_examples(): """Tests instantiating Morse potential datasets.""" from smlb.datasets.synthetic.morse_potential.morse_potential import MorsePotentialData (D, r0, a) = (1, 2, 3) mp = MorsePotentialData(D=D, r0=r0, a=a) assert mp.labels([[r0]]) == -1 * D assert (mp.D, mp.r0, mp.a) == (D, r0, a)
StarcoderdataPython
6443942
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for entity linking layer.""" import itertools from absl.testing import absltest import jax import jax.numpy as jnp from language.mentionmemory.modules import entity_attention_layer import numpy as np import scipy.spatial class EntityAttentionLayerTest(absltest.TestCase): """Entity attention layer tests.""" entity_vocab_size = 1000 hidden_size = 32 entity_dim = 16 dtype = jnp.float32 bsz = 4 seq_len = 20 n_mentions = 10 def setUp(self): super().setUp() self.model = entity_attention_layer.EntityAttentionLayer( entity_dim=self.entity_dim, hidden_size=self.hidden_size, dtype=self.dtype, ) entity_embeddings = np.random.rand(self.entity_vocab_size, self.entity_dim) self.entity_embeddings = jnp.asarray(entity_embeddings) self.encoded_input = jnp.ones( shape=(self.bsz, self.seq_len, self.hidden_size), dtype=self.dtype) # input transformed by layer norm, neededed for comparison self.normed_input = jnp.zeros( shape=(self.bsz, self.seq_len, self.hidden_size), dtype=self.dtype) self.rng = jax.random.PRNGKey(0) self.mention_batch_positions = np.random.randint( self.bsz, size=(self.n_mentions)) self.mention_start_positions = np.random.randint( self.seq_len - 1, size=(self.n_mentions)) self.mention_end_positions = self.mention_start_positions + 1 def test_attention_layer(self): """Testing entity attention layer.""" mention_mask = jnp.ones(shape=(self.n_mentions)) output, params = self.model.init_with_output( self.rng, encoded_input=self.encoded_input, mention_batch_positions=self.mention_batch_positions, mention_start_positions=self.mention_start_positions, mention_end_positions=self.mention_end_positions, mention_mask=mention_mask, entity_embeddings=self.entity_embeddings, ) params = params['params'] # Check input was changed self.assertFalse(jnp.allclose(output['encoded_output'], self.normed_input)) # Check input was not changed where it should not be all_indices = set( itertools.product(jnp.arange(self.bsz), jnp.arange(self.seq_len))) start_indices = set( zip(self.mention_batch_positions, self.mention_start_positions)) non_start_indices = all_indices.difference(start_indices) non_start_indices_1, non_start_indices_2 = zip(*non_start_indices) non_start_indices_1 = jnp.asarray(non_start_indices_1) non_start_indices_2 = jnp.asarray(non_start_indices_2) non_start_outputs = output['encoded_output'][non_start_indices_1, non_start_indices_2] non_start_inputs = self.normed_input[non_start_indices_1, non_start_indices_2] self.assertTrue(jnp.allclose(non_start_outputs, non_start_inputs)) self.assertSequenceEqual(output['encoded_output'].shape, (self.bsz, self.seq_len, self.hidden_size)) for i in range(self.n_mentions): mention_start_encodings = self.encoded_input[ self.mention_batch_positions[i], self.mention_start_positions[i]] mention_end_encodings = self.encoded_input[ self.mention_batch_positions[i], self.mention_end_positions[i]] mention_encodings = jnp.concatenate( [mention_start_encodings, mention_end_encodings], axis=-1) mention_encodings = jnp.matmul( mention_encodings, params['mention_query_projector']['kernel']) mention_encodings = mention_encodings + params['mention_query_projector'][ 'bias'] self.assertSequenceAlmostEqual( mention_encodings, output['mention_encodings'][i], places=4) self.assertSequenceEqual(output['cosine_similarity'].shape, (self.n_mentions, self.entity_vocab_size)) for i in range(self.n_mentions): for j in range(self.entity_vocab_size): self.assertAlmostEqual( output['cosine_similarity'][i, j], 1 - scipy.spatial.distance.cosine(output['mention_encodings'][i], self.entity_embeddings[j]), places=2) self.assertSequenceEqual(output['attention_weights'].shape, (self.n_mentions, self.entity_vocab_size)) def test_masking(self): """Check masked positions not contributing to input.""" mention_mask = jnp.zeros(shape=(self.n_mentions)) output, _ = self.model.init_with_output( self.rng, encoded_input=self.encoded_input, mention_batch_positions=self.mention_batch_positions, mention_start_positions=self.mention_start_positions, mention_end_positions=self.mention_end_positions, mention_mask=mention_mask, entity_embeddings=self.entity_embeddings, ) self.assertTrue(jnp.allclose(output['encoded_output'], self.normed_input)) if __name__ == '__main__': absltest.main()
StarcoderdataPython
240033
from mx.Proxy import WeakProxy o = [] p = q = WeakProxy(o) p = q = WeakProxy(o) del o print p
StarcoderdataPython
43990
<gh_stars>0 """Elliptic Curve Method using Montgomery Curves. """ import random import time from math import gcd import numpy as np from wheel_sieve.common import ( PRIME_GEN, InverseNotFound, CurveInitFail, inv, init_wheel, ) def get_curve_suyama(sigma, n): """Given parameter sigma, generate an Elliptic Curve (mod n) and a point on it using Suyama's parametrization. The constructed curve's group order is a multiple of 12, compared to 4 guaranteed for Montgomery Curves. Args: sigma (int): The sigma parameter. n (int): Modulus. Raises: CurveInitFail: Thrown when the curve generated by the given parameters fails the necessary conditions. Returns: tuple(tuple(int, int), tuple(int, int, int)): (Point, Curve), where - Point = (x0, z0) in projective coordinates ignoring y. - Curve = (A, s, n), representing B * (y/z) ** 2 == (x/z) ** 3 + A * (x/z) ** 2 + (x/z) (mod n), ignoring B and y. - s = (A+2)/4 % n is precomputed for point doubling. """ if sigma % n in (n - 5, n - 3, n - 1, 0, 1, 3, 5) or sigma * 3 % n in (n - 5, 5): raise CurveInitFail() u = sigma ** 2 - 5 % n v = 4 * sigma % n x0 = u ** 3 % n z0 = v ** 3 % n A = ((v - u) ** 3 * (3 * u + v) * inv(4 * u ** 3 * v, n) - 2) % n if A in (n - 2, 2): raise CurveInitFail() s = (A + 2) * inv(4, n) % n # For completeness... # B = u * inv(z0, n) % n # y = (sigma ** 2 - 1) * (sigma ** 2 - 25) * (sigma ** 4 - 25) % n # x0_norm = (x0 * inv(z0, n)) % n # y0_norm = (y * inv(z0, n)) % n # assert B * y0_norm ** 2 % n == (x0_norm ** 3 + A * x0_norm ** 2 + x0_norm) % n return (x0, z0), (A, s, n) def get_curve_a(x, A, n): """Given parameters x and A, generate an Elliptic Curve (mod n) and a point on it. Args: x (int): Desired x coordinate of the point. A (int): Parameter A of Montgomery Curve. n (int): Modulus. Raises: CurveInitFail: Thrown when the curve generated by the given parameters fails the necessary conditions. Returns: tuple(tuple(int, int), tuple(int, int, int)): (Point, Curve), where - Point = (x0, z0) in projective coordinates ignoring y. - Curve = (A, s, n), representing B * (y/z) ** 2 == (x/z) ** 3 + A * (x/z) ** 2 + (x/z) (mod n), ignoring B and y. - s = (A+2)/4 % n is precomputed for point doubling. """ if A % n in (n - 2, 2): raise CurveInitFail() x0 = x % n z0 = 1 s = (A + 2) * inv(4, n) % n # For completeness... # x0_norm = x0 # y0_norm = 2 # B = (x0_norm ** 3 + A * x0_norm ** 2 + x0_norm) * inv(y0_norm ** 2, n) % n # assert B * y0_norm ** 2 % n == (x0_norm ** 3 + A * x0_norm ** 2 + x0_norm) % n return (x0, z0), (A, s, n) def add_pt(ptp, ptq, pt_, curve): """Computes point P+Q given points P, Q and P-Q, and curve. Does not return correct result when P == Q, use dbl_pt instead. Args: ptp (tuple(int, int)): Point P. ptq (tuple(int, int)): Point Q. pt_ (tuple(int, int)): Point P-Q. curve (tuple(int, int, int)): Curve. Returns: tuple(int, int): Point P+Q. """ xp, zp = ptp xq, zq = ptq x_, z_ = pt_ _A, _s, n = curve u = (xp - zp) * (xq + zq) % n v = (xp + zp) * (xq - zq) % n xr = z_ * ((u + v) ** 2 % n) % n zr = x_ * ((u - v) ** 2 % n) % n return (xr, zr) def to_weierstrass(pt, curve): """Given a point P and an Montgomery Curve it is on, computes the equivalent point and curve in weierstrass form. Note: Multiple calls for same curve with different P will produce different output curves. This is due to y-coordinates being omitted in the representation. Without the ability to square-root y (mod n) by fixing B, the natural thing to do is to fix y and calculate B. So different point P produces different B. Args: pt (tuple(int, int)): Point P in XZ form. curve (tuple(int, int, int)): Curve in Montgomery form. Returns: tuple(tuple(int, int), tuple(int, int, int)): (Point, Curve), where - Point = (t, v) in XY form. - Curve = (a, b, n) representing the Elliptic Curve y**2 = x**3 + a*x + b (mod n). """ x, z = pt A, _s, n = curve y_norm = 1 x_norm = x * inv(z, n) B = (x_norm ** 3 + A * x_norm ** 2 + x_norm) % n assert B * y_norm ** 2 % n == (x_norm ** 3 + A * x_norm ** 2 + x_norm) % n B_inv = inv(B, n) three_inv = inv(3, n) t = (x_norm * B_inv + A * three_inv * B_inv) % n v = (y_norm * B_inv) % n a = (3 - A ** 2) * three_inv * B_inv * B_inv % n b = (2 * A ** 3 - 9 * A) * (three_inv * B_inv % n) ** 3 % n assert v ** 2 % n == (t ** 3 + a * t + b) % n return (t, v), (a, b, n) def add_pt_exn(ptp, ptq, pt_, curve): """Computes point P+Q given points P, Q and P-Q, and curve. Does not return correct result when P == Q, use dbl_pt instead. Args: ptp (tuple(int, int)): Point P. ptq (tuple(int, int)): Point Q. pt_ (tuple(int, int)): Point P-Q. curve (tuple(int, int, int)): Curve. Raises: InverseNotFound: Thrown when point P+Q is the point at infinity. Returns: tuple(int, int): Point P+Q. """ return check(add_pt(ptp, ptq, pt_, curve), curve) def dbl_pt(pt, curve): """Computes point 2P given point P and curve. Args: pt (tuple(int, int)): Point P. curve (tuple(int, int, int)): Curve. Returns: tuple(int, int): Point 2P. """ x, z = pt _A, s, n = curve a = (x + z) ** 2 % n b = (x - z) ** 2 % n t = a - b xr = a * b % n zr = t * ((b + s * t) % n) % n return (xr, zr) def mul_pt_exn(pt, curve, k): """Computes point kP given point P, curve and k using Montgomery Ladder. Args: pt (tuple(int, int)): Point P. curve (tuple(int, int, int)): Curve. k (int): Multiplier. Raises: InverseNotFound: Thrown when point kP is the point at infinity. Returns: tuple(int, int): Point kP. """ if k <= 2: if k < 0: # x and z coordinates are the same for P and -P. return mul_pt_exn(pt, curve, -k) if k == 0: # InverseNotFound will be thrown return check((0, 0), curve) if k == 1: return check(pt, curve) return check(dbl_pt(pt, curve), curve) res0 = pt res1 = dbl_pt(pt, curve) j = k.bit_length() - 2 while j >= 1: if (k >> j) % 2 == 1: res0 = add_pt(res1, res0, pt, curve) res1 = dbl_pt(res1, curve) else: res1 = add_pt(res1, res0, pt, curve) res0 = dbl_pt(res0, curve) j -= 1 if k % 2 == 1: res0 = add_pt(res1, res0, pt, curve) else: res0 = dbl_pt(res0, curve) return check(res0, curve) def check(pt, curve): """Given point P (x, z), check that P is not the point at infinity, i.e. gcd(z, n) == 1, and return P. Args: pt (tuple(int, int)): Point P. curve (tuple(int, int, int)): Curve. Raises: InverseNotFound: Thrown when point P is the point at infinity. Returns: tuple(int, int): Point P. """ _x, z = pt _A, _s, n = curve if gcd(z, n) > 1: raise InverseNotFound(z, n) return pt def ecm(n, rounds, b1, b2): """Elliptic Curve Factorization Method. In each round, the following steps are performed: 0. Generate random point and curve. 1. Repeatedly multiply the current point by small primes raised to some power, determined by b1. 2. Repeatedly try to multiply the point from step 1 by primes (with wheel of 2310) between b1 and b2. Returns when a non-trivial factor is found. Args: n (int): Number to be factorized. n >= 12. rounds (int): Number of random curves to try. b1 (int): Bound for primes used in step 1. b2 (int): Bound for primes searched for in step 2. b1 < b2. Raises: ValueError: Thrown when n < 12. Returns: int: Non-trivial factor if found, otherwise returns None. """ if n < 12: raise ValueError wheel = 2310 st = time.time() j_list, prime_array = init_wheel(b1, b2, wheel) print("Init time: {:.2f}".format(time.time() - st)) for round_i in range(rounds): st = time.time() print("Round {}...".format(round_i)) count = 0 success = False while not success and count < 20: try: count += 1 sigma = random.randint(6, n - 6) pt, curve = get_curve_suyama(sigma, n) success = True except InverseNotFound as e: res = gcd(e.x, n) if 1 < res < n: return res except CurveInitFail: pass if not success: print(" - Curve Init Failed.") break try: # Step 1 print("{:>5.2f}: Step 1".format(time.time() - st)) for p in PRIME_GEN(b1): for _ in range(int(np.log(b1) / np.log(p))): pt = mul_pt_exn(pt, curve, p) # Step 2 print("{:>5.2f}: Step 2".format(time.time() - st)) q = pt mq = mul_pt_exn(q, curve, wheel) xj_list = [] for j in j_list: xj, zj = mul_pt_exn(q, curve, j) xj_list.append(xj * inv(zj, n) % n) c1 = b1 // wheel c2 = b2 // wheel + 2 c = 0 cq = mul_pt_exn(q, curve, c1 * wheel) cq_ = mul_pt_exn(q, curve, (c1 - 1) * wheel) while c < c2 - c1: s = 1 for xj, is_prime in zip( xj_list, np.unpackbits(prime_array[c, :], bitorder="little") ): if is_prime: t = (xj * cq[1] - cq[0]) % n if t != 0: s = s * t % n res = gcd(s, n) if 1 < res < n: return res elif res == n: for xj in xj_list: res = gcd(xj * cq[1] - cq[0], n) if 1 < res < n: return res # s is a multiple of n while each of {(xj * cq[1] - cq[0]) % n} is not. # There must be at least 2 non-trivial factors. The function should have returned. assert False c += 1 cq, cq_ = add_pt_exn(cq, mq, cq_, curve), cq print("{:>5.2f}: End".format(time.time() - st)) except InverseNotFound as e: res = gcd(e.x, n) if 1 < res < n: return res return None if __name__ == "__main__": random.seed(2) # (406724252548875212358759885439 * 724413085648406196306771670711) num = 294636370796972331405770334382449402989049465216208991677129 print(ecm(num, 430, 250_000, 40_000_000))
StarcoderdataPython
5111694
import requests from Services.ApiAddressService import ApiAddressService from Services.StorageCookieService import StorageCookieService class FriendShipApiService(object): def __init__(self): self.apiaddress = ApiAddressService() self.storagecookie = StorageCookieService() def Mine(self): return requests.get("{0}/Friendship/Mine".format(self.apiaddress.getaddress()), cookies=self.storagecookie.get()) def UserDetail(self, userid): return requests.get("{0}/Friendship/UserDetail?id={1}".format(self.apiaddress.getaddress(), str(userid)), cookies=self.storagecookie.get()) def DeleteFriend(self, userid): return requests.post("{0}/Friendship/DeleteFriend".format(self.apiaddress.getaddress()), data={ "id": str(userid) }, cookies=self.storagecookie.get())
StarcoderdataPython
103573
-- 코드를 입력하세요 SELECT c_p.cart_id as CART_ID ,if(sum(price)>minimum_requirement,0,1) as abused from Cart_products as c_p join coupons as c where c_p.cart_id = c.cart_id group by c_p.cart_id;
StarcoderdataPython
9626554
#!/usr/bin/env python # # Copyright 2007 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Wrapper for contextvars that can fall back to old os.environ hack.""" import os import contextvars from google.appengine.runtime.context import gae_headers from google.appengine.runtime.context import oauth from google.appengine.runtime.context import wsgi READ_FROM_OS_ENVIRON = os.environ.get('READ_GAE_CONTEXT_FROM_OS_ENVIRON', 'true') == 'true' def get(key, default=None): """Read context from os.environ if READ_GAE_CONTEXT_FROM_OS_ENVIRON else, from contextvars.""" if READ_FROM_OS_ENVIRON: return os.environ.get(key, default) ctxvar = vars(oauth).get(key, vars(gae_headers).get(key, vars(wsgi).get(key))) assert isinstance(ctxvar, contextvars.ContextVar) val = ctxvar.get(default) if isinstance(val, bool): return '1' if val else '0' return val def put(key, value): """Write context to os.environ if READ_GAE_CONTEXT_FROM_OS_ENVIRON else, to contextvars.""" if READ_FROM_OS_ENVIRON: os.environ[key] = value return ctxvar = vars(oauth).get(key, vars(gae_headers).get(key, vars(wsgi).get(key))) assert isinstance(ctxvar, contextvars.ContextVar) if isinstance(value, str): ctxvar.set(value == '1') ctxvar.set(value) def clear(): if READ_FROM_OS_ENVIRON: os.environ.clear() return for key in contextvars.copy_context(): del key def update(env): if READ_FROM_OS_ENVIRON: os.environ.update(env) return for key, value in env: put(key, value) def pop(key): if READ_FROM_OS_ENVIRON: return os.environ.pop(key) del contextvars.copy_context().items()[key] def items(): if READ_FROM_OS_ENVIRON: return os.environ return {x.name: y for x, y in contextvars.copy_context().items()} def init_from_wsgi_environ(wsgi_env): gae_headers.init_from_wsgi_environ(wsgi_env) wsgi.init_from_wsgi_environ(wsgi_env)
StarcoderdataPython
11215906
#!/usr/bin/env python # -*- coding: iso-8859-1 -*- # -*- coding: utf-8 -*- # # Get-OTX-IOCs # Retrieves IOCs from Open Threat Exchange # # Create an account and select your feeds # https://otx.alienvault.com # # Changes: # 16.12.2017 - Merged the changes by Scott with the code base # 22.11.2017 - <NAME> <EMAIL> # 13.02.2018 - Reworked the hash whitelist # from OTXv2 import OTXv2 import re import os import sys import traceback import argparse OTX_KEY = '7607c7e15409381f7f8532b4d4caaeaa6c96e75cea7ef3169d9b6bad19290c43' # Hashes that are often included in pulses but are false positives HASH_WHITELIST = ['e617348b8947f28e2a280dd93c75a6ad', '125da188e26bd119ce8cad7eeb1fc2dfa147ad47', '06f7826c2862d184a49e3672c0aa6097b11e7771a4bf613ec37941236c1a8e20', 'd378bffb70923139d6a4f546864aa61c', '8094af5ee310714caebccaeee7769ffb08048503ba478b879edfef5f1a24fefe', '01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b', 'b6f9aa44c5f0565b5deb761b1926e9b6', # Empty file 'd41d8cd98f00b204e9800998ecf8427e', 'da39a3ee5e6b4b0d3255bfef95601890afd80709', 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855', # One byte line break file (Unix) 0x0a '68b329da9893e34099c7d8ad5cb9c940', 'adc83b19e793491b1c6ea0fd8b46cd9f32e592fc', '01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b', # One byte line break file (Windows) 0x0d0a '81051bcc2cf1bedf378224b0a93e2877', 'ba8ab5a0280b953aa97435ff8946cbcbb2755a27', '<KEY>', ] FILENAMES_WHITELIST = ['wncry'] DOMAIN_WHITELIST = ['proofpoint.com'] class WhiteListedIOC(Exception): pass class OTXReceiver(): # IOC Strings hash_iocs = "" filename_iocs = "" c2_iocs_ipv4 = "" c2_iocs_ipv6 = "" c2_iocs_domain = "" # Output format separator = ";" use_csv_header = False extension = "txt" hash_upper = True filename_regex_out = True def __init__(self, api_key, siem_mode, debug, proxy, csvheader, extension): self.debug = debug self.otx = OTXv2(api_key, proxy) if siem_mode: self.separator = "," self.use_csv_header = csvheader self.extension = extension self.hash_upper = True self.filename_regex_out = False def get_iocs_last(self): # mtime = (datetime.now() - timedelta(days=days_to_load)).isoformat() print("Starting OTX feed download ...") self.events = self.otx.getall() print("Download complete - %s events received" % len(self.events)) # json_normalize(self.events) def write_iocs(self, ioc_folder): hash_ioc_file = os.path.join(ioc_folder, "otx-hash-iocs.{0}".format(self.extension)) filename_ioc_file = os.path.join(ioc_folder, "otx-filename-iocs.{0}".format(self.extension)) c2_ioc_ipv4_file = os.path.join(ioc_folder, "otx-c2-iocs-ipv4.{0}".format(self.extension)) c2_ioc_ipv6_file = os.path.join(ioc_folder, "otx-c2-iocs-ipv6.{0}".format(self.extension)) c2_ioc_domain_file = os.path.join(ioc_folder, "otx-c2-iocs.{0}".format(self.extension)) print("Processing indicators ...") for event in self.events: try: for indicator in event["indicators"]: try: # Description description = event["name"].encode('unicode-escape').replace(self.separator, " - ") # Hash IOCs if indicator["type"] in ('FileHash-MD5', 'FileHash-SHA1', 'FileHash-SHA256'): # Whitelisting if indicator["indicator"].lower() in HASH_WHITELIST: raise WhiteListedIOC hash = indicator["indicator"] if self.hash_upper: hash = indicator["indicator"].upper() self.hash_iocs += "{0}{3}{1} {2}\n".format( hash, description, " / ".join(event["references"])[:80], self.separator) # Filename IOCs if indicator["type"] == 'FilePath': # Whitelisting for w in FILENAMES_WHITELIST: if w in indicator["indicator"]: raise WhiteListedIOC filename = indicator["indicator"] if self.filename_regex_out: filename = my_escape(indicator["indicator"]) self.filename_iocs += "{0}{3}{1} {2}\n".format( filename, description, " / ".join(event["references"])[:80], self.separator) # C2 IOCs # Whitelisting if indicator["type"] in ('IPv4', 'IPv6', 'domain', 'hostname', 'CIDR'): for domain in DOMAIN_WHITELIST: if domain in indicator["indicator"]: print(indicator["indicator"]) raise WhiteListedIOC if indicator["type"] == 'IPv4': self.c2_iocs_ipv4 += "{0}{3}{1} {2}\n".format( indicator["indicator"], description, " / ".join(event["references"])[:80], self.separator) if indicator["type"] == 'IPv6': self.c2_iocs_ipv6 += "{0}{3}{1} {2}\n".format( indicator["indicator"], description, " / ".join(event["references"])[:80], self.separator) if indicator["type"] in ('domain', 'hostname', 'CIDR'): self.c2_iocs_domain += "{0}{3}{1} {2}\n".format( indicator["indicator"], description, " / ".join(event["references"])[:80], self.separator) except WhiteListedIOC as e: pass except Exception as e: traceback.print_exc() # Write to files with open(hash_ioc_file, "w") as hash_fh: if self.use_csv_header: hash_fh.write('hash{0}'.format(self.separator) + 'source\n') hash_fh.write(self.hash_iocs) print("{0} hash iocs written to {1}".format(self.hash_iocs.count('\n'), hash_ioc_file)) with open(filename_ioc_file, "w") as fn_fh: if self.use_csv_header: fn_fh.write('filename{0}'.format(self.separator) + 'source\n') fn_fh.write(self.filename_iocs) print("{0} filename iocs written to {1}".format(self.filename_iocs.count('\n'), filename_ioc_file)) with open(c2_ioc_ipv4_file, "w") as c24_fh: if self.use_csv_header: c24_fh.write('host{0}'.format(self.separator) + 'source\n') c24_fh.write(self.c2_iocs_ipv4) print("{0} c2 ipv4 iocs written to {1}".format(self.c2_iocs_ipv4.count('\n'), c2_ioc_ipv4_file)) with open(c2_ioc_ipv6_file, "w") as c26_fh: if self.use_csv_header: c26_fh.write('host{0}'.format(self.separator) + 'source\n') c26_fh.write(self.c2_iocs_ipv6) print("{0} c2 ipv6 iocs written to {1}".format(self.c2_iocs_ipv6.count('\n'), c2_ioc_ipv6_file)) with open(c2_ioc_domain_file, "w") as c2d_fh: if self.use_csv_header: c2d_fh.write('host{0}'.format(self.separator) + 'source\n') c2d_fh.write(self.c2_iocs_domain) print("{0} c2 domain iocs written to {1}".format(self.c2_iocs_domain.count('\n'), c2_ioc_domain_file)) def my_escape(string): return re.sub(r'([\-\(\)\.\[\]\{\}\\\+])', r'\\\1', string) if __name__ == '__main__': parser = argparse.ArgumentParser(description='OTX IOC Receiver') parser.add_argument('-k', help='OTX API key', metavar='APIKEY', default=OTX_KEY) # parser.add_argument('-l', help='Time frame in days (default=1)', default=1) parser.add_argument('-o', metavar='dir', help='Output directory', default='../iocs') parser.add_argument('-p', metavar='proxy', help='Proxy server (e.g. http://proxy:8080 or ' 'http://user:pass@proxy:8080', default=None) parser.add_argument('--verifycert', action='store_true', help='Verify the server certificate', default=False) parser.add_argument('--siem', action='store_true', default=False, help='CSV output for use in SIEM systems (e.g. Splunk)') parser.add_argument('--nocsvheader', action='store_true', default=False, help='Disable header in CSV output (e.g. McAfee SIEM)') parser.add_argument('-e', metavar='ext', help='File extension', default='txt') parser.add_argument('--debug', action='store_true', default=False, help='Debug output') args = parser.parse_args() if len(args.k) != 64: print("Set an API key in script or via -k APIKEY. Go to https://otx.alienvault.com create an account and get your own API key") sys.exit(0) # Create a receiver otx_receiver = OTXReceiver(api_key=args.k, siem_mode=args.siem, debug=args.debug, proxy=args.p, csvheader=(not args.nocsvheader), extension=args.e) # Retrieve the events and store the IOCs # otx_receiver.get_iocs_last(int(args.l)) otx_receiver.get_iocs_last() # Write IOC files otx_receiver.write_iocs(ioc_folder=args.o)
StarcoderdataPython
1846634
# -*- coding: utf-8 -*- from sqlalchemy import Column, ForeignKey from sqlalchemy import String from sqlalchemy.orm import relationship from app.model import Base class HoloMemberTwitterUrl(Base): __tablename__ = 'holo_member_twitter_url' holo_member_tweet_id = Column(String(50), ForeignKey('holo_member_tweet.tweet_id')) holo_member_tweet = relationship("HoloMemberTweet", backref="holo_member_twitter_url") url = Column(String(300), nullable=True) def __repr__(self): return "<HoloMemberTwitterUrl(url='%s')>" % ( self.url ) @classmethod def get_id(cls): return HoloMemberTwitterUrl.index FIELDS = {"url": str} FIELDS.update(Base.FIELDS)
StarcoderdataPython
246766
import pymath radius = float(input()) time = float(input()) cycles = float(input()) print("tangental speed = {}".format(pymath.physics.tangental_speed(radius, time, cycles)))
StarcoderdataPython
11341297
import json import subprocess import os import time local = True pool = [] run_server = "sudo docker exec -i participant ./bin/prac-server -node=co -preload -addr=" run_rl_server_cmd = "python3 downserver/main.py " protocols = ["rac", "3pc", "2pc"] logf = open("./tmp/progress.log", "w") logf = open("./tmp/progress.log", "w") if local: run_client_cmd = "./bin/prac-server -node=ca -local=true -addr=" else: run_client_cmd = "./bin/prac-server -node=ca -addr=" def get_server_cmd(addr, r, minlevel, env, nf): cmd = run_server + str(addr) + \ " -r=" + str(r) + \ " -tl=" + str(env) + \ " -nf=" + str(nf) + \ " -ml=" + str(minlevel) return cmd def get_client_cmd(bench, protocol, clients, r, file, env=20, alg=1, nf=-1, ml = 1): return run_client_cmd + " -bench=" + str(bench) + \ " -p=" + str(protocol) + \ " -c=" + str(clients) + \ " -d=" + str(alg) + \ " -nf=" + str(nf) + \ " -tl=" + str(env) + \ " -ml=" + str(ml) + \ " -r=" + str(r) + file if local: with open("./configs/local.json") as f: config = json.load(f) else: with open("./configs/remote.json") as f: config = json.load(f) for id_ in config["coordinators"]: run_client_cmd = run_client_cmd + config["coordinators"][id_] # gcloud beta compute ssh --zone "asia-southeast1-a" "cohort1" -- ' def execute_cmd_in_gcloud(zone, instance, cmd): cmd = "gcloud beta compute ssh --zone " + "%s %s -- \'" % (zone, instance) + " " + cmd + "\'" ssh = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return ssh def run_task(cmd): print(cmd, file=logf) logf.flush() p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid) return p def start_participant(zone, instance, service, r, minlevel, env, nf): cmd = get_server_cmd(service, r, minlevel, env, nf) return execute_cmd_in_gcloud(zone, instance, cmd) def start_service_on_all(r, run_rl = False, time = 0, minlevel=1, env=25, nf=-1): if run_rl: pool.append(run_task(run_rl_server_cmd + str(time) + ">./tmp/train.log")) if local: return for id_ in config["participants"]: pool.append( start_participant(config["zones"][id_], config["instances"][id_], config["participants"][id_], r, minlevel, env, nf)) def terminate_service(): global pool for p in pool: p.wait() pool = [] TestBatch = 5 def delete_extra_zero(n): if isinstance(n, int): return str(n) if isinstance(n, float): n = str(n).rstrip('0') if n.endswith('.'): n = n.rstrip('.') return n return "nooo" def run_exp_dense(bench, r=3, proto = "all"): upper = 1000 l = [c for c in range(50, upper+1, 50)] for c in l: filename = ">./tmp/" + delete_extra_zero(r) + "/" + bench.upper() + str(c) + ".log" if proto == "all": for po in protocols: for each in range(TestBatch): start_service_on_all(r) time.sleep(1) p = run_task(get_client_cmd(bench, po, c, r, filename)) p.wait() terminate_service() if filename[1] == '.': filename = ">" + filename for each in range(TestBatch): start_service_on_all(r) time.sleep(1) p = run_task(get_client_cmd(bench, "rac", c, r, filename, ml=2)) p.wait() terminate_service() if filename[1] == '.': filename = ">" + filename else: for each in range(TestBatch): start_service_on_all(r) time.sleep(1) p = run_task(get_client_cmd(bench, proto, c, r, filename)) p.wait() terminate_service() if filename[1] == '.': filename = ">" + filename def run_heu(alg, env, bench = "tpc", c = 700, r = 4.5, nf = -1): if env <= 0: filename = ">./tmp/he/CF-" + str(-env) + "-" + str(alg) + ".log" else: filename = ">./tmp/he/NF-" + str(nf) + "-" + str(alg) + ".log" for each in range(TestBatch): start_service_on_all(r, run_rl= (alg == 0), time=3 * max(-env, nf) + 5 + 5, env=env, nf=nf) time.sleep(2) p = run_task(get_client_cmd(bench, "rac", c, r, filename, env, alg, nf)) p.wait() terminate_service() if filename[1] == '.': filename = ">" + filename def run_exp_loose(bench, r): l = [2**c for c in range(7, 9)] for c in l: filename = ">./tmp/loose/" + bench.upper() + str(c) + ".log" rnd = TestBatch for po, lv in [("3pc", 1), ("pac", 1), ("2pc", 1), ("rac", 1)]: for each in range(rnd): start_service_on_all(r) time.sleep(1) p = run_task(get_client_cmd(bench, po, c, r, filename, ml=lv)) p.wait() terminate_service() if filename[1] == '.': filename = ">" + filename def run_loose_heu(): for t in [1, 4, 16]: for i in [1, 2, 4, 8, 16, 32, 64, 128]: #, 3, 4, 5, 6, 7, 8, 12, 16, 24, 32, 48, 64, 96, 128]: run_heu(i, -t) run_heu(i, 33, nf=t) def run_all_heu(): t = 1 for r in range(5): for i in [0, 1, 2 , 3, 4, 5, 6, 7, 8, 12, 16, 24, 32, 48, 64, 96, 128]: run_heu(i, -t) t *= 2 t = 1 for r in range(5): for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 12, 16, 24, 32, 48, 64, 96, 128]: run_heu(i, 33, nf=t) t *= 2 def run_loose_heu(): for t in [1, 16]: for i in [1, 2, 4, 8, 16, 32, 64, 128]: #, 3, 4, 5, 6, 7, 8, 12, 16, 24, 32, 48, 64, 96, 128]: run_heu(i, -t) run_heu(i, 33, nf=t) if __name__ == '__main__': # run_exp_dense("tpc", 3) # run_exp_dense("ycsb", 3) needs to change constants # for r in range(1, 3): # run_exp_dense("tpc", r, "rac") # for r in range(4, 8): # run_exp_dense("tpc", r, "rac") run_exp_loose("ycsb", 1) # run_exp_loose("ycsb", 3, "tpc") # for r in [0.5, 1.3, 1.6, 2.5, 3.5, 4.5, 5.5, 7, 8]: # run_exp_dense("tpc", r, "rac") # run_loose_heu() # logf.close()
StarcoderdataPython
8075110
<reponame>nsmai/2021-proejct-2 import tensorflow as tf from tensorflow import keras ### Coefficient of Determination (결정계수) ### 결정계수 위키(영문) https://en.wikipedia.org/wiki/Coefficient_of_determination ### 결정계수 위키(국문) https://ko.wikipedia.org/wiki/%EA%B2%B0%EC%A0%95%EA%B3%84%EC%88%98 ### 결정계수는 0~1 사이로 정의되나, ### 0미만의 값이 보인다면 해당 모델은 x_data의 값들과 y_data의 평균값의 차이보다 큰경우 음수가 나옴 ### (다시 말하면 그냥 y_data 평균값으로 예측해도 그것보다 못하다는 말로, 매우 안좋은 모델이라는 말임) def coeff_determination(y_true, y_pred): from keras import backend as K SS_res = K.sum(K.square( y_true-y_pred )) SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) ) r2 = 1 - SS_res/(SS_tot + K.epsilon()) return r2 x_data = [1,2,3,4,5] y_data = [4,7,10,13,16] model = keras.Sequential([keras.layers.Dense(1, input_shape=[1])]) optimizer = keras.optimizers.RMSprop(lr=0.2) ### 위에서 결정한 optimizer를 이용한 예측모델 방법 생성 model.compile(loss='mse', optimizer=optimizer, metrics=[coeff_determination]) ### 모델 피팅 epochs만큼 수행 model.fit(x_data, y_data, epochs=100) ### 만들어진 모델에 x_data를 입력하여 예측된 y_data의 값을 y_pred_data에 저장 y_pred_data = model.predict(x_data) ### x_data, y_data, 예측된 y_pred_data, 오차값 을 출력함. for idx, xi in enumerate(x_data): yi = y_data[idx] y_pred_i = y_pred_data[idx][0] error_i = abs(yi-y_pred_i) print('x: {0}, y: {1}, y_pred: {2:.4f}, Mean_squared_error: {3:.4f}'.format(xi, yi, y_pred_i, error_i)) pass
StarcoderdataPython
8043121
# TODO Move to regex REGEX_SUFFIX = r"\s*=[^\r\n]*" PHP_ORIGIN = ( f"file_uploads{REGEX_SUFFIX}", f"allow_url_fopen{REGEX_SUFFIX}", f"memory_limit{REGEX_SUFFIX}", f"upload_max_filesize{REGEX_SUFFIX}", f"cgi.fix_pathinfo{REGEX_SUFFIX}", f"max_execution_time{REGEX_SUFFIX}", f"date.timezone{REGEX_SUFFIX}", )
StarcoderdataPython
199349
<gh_stars>1-10 # coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XLM configuration """ from __future__ import absolute_import, division, print_function, unicode_literals import json import logging import sys from io import open from .configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) XLM_PRETRAINED_CONFIG_ARCHIVE_MAP = { 'xlm-mlm-en-2048': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-en-2048-config.json", 'xlm-mlm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-ende-1024-config.json", 'xlm-mlm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enfr-1024-config.json", 'xlm-mlm-enro-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-enro-1024-config.json", 'xlm-mlm-tlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-tlm-xnli15-1024-config.json", 'xlm-mlm-xnli15-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-xnli15-1024-config.json", 'xlm-clm-enfr-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-enfr-1024-config.json", 'xlm-clm-ende-1024': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-clm-ende-1024-config.json", 'xlm-mlm-17-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-17-1280-config.json", 'xlm-mlm-100-1280': "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-mlm-100-1280-config.json", } class XLMConfig(PretrainedConfig): """Configuration class to store the configuration of a `XLMModel`. Args: vocab_size: Vocabulary size of `inputs_ids` in `XLMModel`. d_model: Size of the encoder layers and the pooler layer. n_layer: Number of hidden layers in the Transformer encoder. n_head: Number of attention heads for each attention layer in the Transformer encoder. d_inner: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. ff_activation: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. untie_r: untie relative position biases attn_type: 'bi' for XLM, 'uni' for Transformer-XL dropout: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps: The epsilon used by LayerNorm. dropout: float, dropout rate. init: str, the initialization scheme, either "normal" or "uniform". init_range: float, initialize the parameters with a uniform distribution in [-init_range, init_range]. Only effective when init="uniform". init_std: float, initialize the parameters with a normal distribution with mean 0 and stddev init_std. Only effective when init="normal". mem_len: int, the number of tokens to cache. reuse_len: int, the number of tokens in the currect batch to be cached and reused in the future. bi_data: bool, whether to use bidirectional input pipeline. Usually set to True during pretraining and False during finetuning. clamp_len: int, clamp all relative distances larger than clamp_len. -1 means no clamping. same_length: bool, whether to use the same attention length for each token. """ pretrained_config_archive_map = XLM_PRETRAINED_CONFIG_ARCHIVE_MAP def __init__(self, vocab_size=30145, emb_dim=2048, n_layers=12, n_heads=16, dropout=0.1, attention_dropout=0.1, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=1, use_lang_emb=True, max_position_embeddings=512, embed_init_std=2048 ** -0.5, layer_norm_eps=1e-12, init_std=0.02, bos_index=0, eos_index=1, pad_index=2, unk_index=3, mask_index=5, is_encoder=True, summary_type='first', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, start_n_top=5, end_n_top=5, **kwargs): """Constructs XLMConfig. """ super(XLMConfig, self).__init__(**kwargs) self.vocab_size = vocab_size self.emb_dim = emb_dim self.n_layers = n_layers self.n_heads = n_heads self.dropout = dropout self.attention_dropout = attention_dropout self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.use_lang_emb = use_lang_emb self.layer_norm_eps = layer_norm_eps self.bos_index = bos_index self.eos_index = eos_index self.pad_index = pad_index self.unk_index = unk_index self.mask_index = mask_index self.is_encoder = is_encoder self.max_position_embeddings = max_position_embeddings self.embed_init_std = embed_init_std self.init_std = init_std self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_proj_to_labels = summary_proj_to_labels self.summary_first_dropout = summary_first_dropout self.start_n_top = start_n_top self.end_n_top = end_n_top @property def n_words(self): # For backward compatibility return self.vocab_size @n_words.setter def n_words(self, value): # For backward compatibility self.vocab_size = value @property def hidden_size(self): return self.emb_dim @property def num_attention_heads(self): return self.n_heads @property def num_hidden_layers(self): return self.n_layers
StarcoderdataPython
8082107
# -*- coding: UTF-8 -*- import networkx as nx import numpy as np import pandas as pd import os os.chdir(os.getcwd()) def build_graph(edge_file_path): ''' Reads the input network using networkx. ''' G = nx.read_edgelist(edge_file_path, nodetype=int, data=(('weight', float),), create_using=nx.DiGraph()) G = G.to_undirected() return G def write_edgelist(path,network): k = 0 with open(path,'w') as f: for edge in network.edges.data('weight'): ls = str(edge[0]) + ' ' + str(edge[1]) + ' ' + str(edge[2]) + '\n' f.write(ls) ls = str(edge[1]) + ' ' + str(edge[0]) + ' ' + str(edge[2]) + '\n' f.write(ls) k = k+2 print('edge number is',k) f.close() social_network = build_graph("data/flickr/social.edgelist") tag_network = build_graph("data/flickr/tag.edgelist") node_list = [] with open("data/flickr/nodes.txt", 'r') as f: for line in f: ls = line.strip().split() node_list.append(int(ls[0])) f.close() node_cnt = len(node_list) social_node = node_list tag_node = node_list while(1): social_network = nx.subgraph(social_network, node_list) tag_network = nx.subgraph(tag_network, node_list) social_node = [n for n in list(social_network.nodes()) if social_network.degree(n)>8] tag_node = [n for n in list(tag_network.nodes()) if tag_network.degree(n)>8] node_list = list(set(social_node) & set(tag_node)) if len(node_list) == node_cnt: break; else: node_cnt = len(node_list) print('current node number is', node_cnt) print('final node number is', len(node_list)) social_sub = "data/flickr/social_sub.edgelist" tag_sub = "data/flickr/tag_sub.edgelist" node_sub = "data/flickr/nodes_sub.txt" if os.path.exists(social_sub): os.remove(social_sub) if os.path.exists(tag_sub): os.remove(tag_sub) write_edgelist(social_sub, social_network) write_edgelist(tag_sub, tag_network) with open(node_sub, 'w') as f: k = 0 for n in node_list: ls = str(n) + '\n' f.write(ls) k += 1 assert k == len(node_list) f.close()
StarcoderdataPython
354884
<reponame>davegallant/tokenizer #!/usr/bin/env python3 """ The main entry point. Invoke as `aws-role-play' or `python -m aws-role-play'. """ from .cli import cli def main(): cli(ctx=None) if __name__ == "__main__": main()
StarcoderdataPython
4965598
from setuptools import setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension setup( name='MDS', ext_modules=[ CUDAExtension('MDS', [ 'MDS_cuda.cu', 'MDS.cpp', ]), ], cmdclass={ 'build_ext': BuildExtension })
StarcoderdataPython
8113817
"""Tests for the Airplane geometry class.""" # pylint: disable=redefined-outer-name import machup.geometry as geom import numpy as np import pytest @pytest.fixture def single_wing_plane(): """Return a plane for the single_wing.json example.""" filename = "test/geometry/testairplanes/single_wing.json" plane = geom.Airplane(inputfile=filename) return plane @pytest.fixture def multisegment_wing_plane(): """Return a plane for the multisegment_wing.json example.""" filename = "test/geometry/testairplanes/multisegment_wing.json" plane = geom.Airplane(inputfile=filename) return plane @pytest.fixture def no_control_surface_plane(): """Return a plane for the no_control_surace.json example.""" filename = "test/geometry/testairplanes/no_control_surface.json" plane = geom.Airplane(inputfile=filename) return plane def test_get_number_of_sections(single_wing_plane): assert single_wing_plane.get_num_sections() == 80 def test_get_wing_segments(single_wing_plane): wingsegments = single_wing_plane.get_wingsegments() assert len(wingsegments) == 2 def test_multisegment_wing(multisegment_wing_plane): wingsegments = multisegment_wing_plane.get_wingsegments() pos = {} for seg in wingsegments: pos[seg.name] = np.array([seg.get_position("left_tip"), seg.get_position("right_tip")]) wing_root = np.array([0.5, 0., -0.1]) first_right = np.array([-0.3499546541037, 3.9392310120488, -0.69459271066772]) + wing_root first_left = np.array([-0.3499546541037, -3.9392310120488, -0.69459271066772]) + wing_root second_right = np.array([-0.3499546541037, 4., 0.]) + first_right second_left = np.array([-0.3499546541037, -4., 0.]) + first_left l_out = np.array([second_left, first_left]) l_in = np.array([first_left, wing_root]) r_in = np.array([wing_root, first_right]) r_out = np.array([first_right, second_right]) assert np.allclose(pos["left_outer"], l_out, rtol=0., atol=1e-12) is True assert np.allclose(pos["left_inner"], l_in, rtol=0., atol=1e-12) is True assert np.allclose(pos["right_inner"], r_in, rtol=0., atol=1e-12) is True assert np.allclose(pos["right_outer"], r_out, rtol=0., atol=1e-12) is True def test_no_control_surf_specified(no_control_surface_plane): segments = no_control_surface_plane.get_wingsegments() for seg in segments: span = seg.get_control_surface_span() chord = seg.get_control_surface_chord() mix = seg.get_control_mix() assert span == (0., 1.) assert chord == (0., 0.)
StarcoderdataPython
349505
<reponame>cliffordlab/Sedline-Root-EEG-Toolbox # # REPO: # https://github.com/cliffordlab/Sedline-Root-EEG-Toolbox # # ORIGINAL SOURCE AND AUTHORS: # <NAME> # Last Modified: January 14th, 2021 # # COPYRIGHT (C) 2021 # LICENSE: # This software may be modified and distributed under the terms # of the BSD 3-Clause license. See the LICENSE file in this repo for # details. # from tkinter import Tk, Label, Button import subprocess class no_flashdrive_attached: def __init__(self, master): self.master = master master.title("Message") self.label = Label(master, text="No flashdrive attached\nPlease press the button on USB hub twice to reconnect flashdrive.") self.label.pack() self.Okay_button = Button(master, text="Ok", command=self.Okay) self.Okay_button.pack() def Okay(self): root.destroy() root = Tk() root.overrideredirect(True) root.geometry("{0}x{1}+0+0".format(root.winfo_screenwidth(), root.winfo_screenheight())) my_gui = no_flashdrive_attached(root) root.mainloop()
StarcoderdataPython
11342972
from .echogram import EchoGram
StarcoderdataPython
6491577
import numpy as np def convert_rsd_batch(batch, device=None): """Convert a batch of RSD data to the format the RSD model works with. Parameters ---------- batch : list List of dictionaries. device : str, optional The device to use (default: None). Returns ------- Dictionary such that each field points to a batched list. """ if device is not None: raise NotImplementedError() return { 'xs': [item['features'] for item in batch], 'ts': np.asarray([int(item['is_relevant']) for item in batch], dtype='i') } def convert_entmem_batch(batch, device=None): """Convert a batch of EntMem data to the format the EntMem model works with. Parameters ---------- batch : list List of examples. device : str, optional The device to use (default: None). Returns ------- Dictionary such that each field points to a batched list. """ if device is not None: raise NotImplementedError() result = { 'xs_feat': [], 'idx_postags': [], 'idx_words': [], 'txt_words': [], 'txt_postags': [], 'is_salient': [] } for item in batch: result['xs_feat'].append( np.vstack([ np.array(item['features'])[:, :, 0], np.matrix(item['query_entity_features']).T, np.matrix(item['context_entities_features']).T, ]) ) result['idx_postags'].append(np.array(item['postag_ids'])) result['idx_words'].append(np.array(item['word_ids'])) result['txt_words'].append(item['words']) result['txt_postags'].append(item['postags']) result['is_salient'].append(item['is_salient']) result['is_salient'] = np.array(result['is_salient']) return result
StarcoderdataPython
1724298
import cv2 import numpy as np from matplotlib import pyplot as plt def read_data(): left = "tsukuba-imL.png" right = "tsukuba-imR.png" img_left = cv2.cvtColor(cv2.imread(left), cv2.COLOR_BGR2GRAY) img_right = cv2.cvtColor(cv2.imread(right), cv2.COLOR_BGR2GRAY) return left, right if __name__ == "__main__": imgL = cv2.imread('tsukuba_l.png',0) imgR = cv2.imread('tsukuba_r.png',0) stereo = cv2.createStereoBM(numDisparities=16, blockSize=15) disparity = stereo.compute(imgL,imgR) plt.imshow(disparity,'gray') plt.show()
StarcoderdataPython
9782168
#!/usr/bin/python3 import operator import sys, os import json import math sys.path.append("..") sys.path.append(os.getcwd()) import log import hashlib import traceback import datetime import sqlalchemy import stmanage import requests import comm import comm.error import comm.result import comm.values from comm.result import result, parse_except from comm.error import error from vlsopt.violasclient import violasclient, violaswallet, violasserver from enum import Enum from db.dbvfilter import dbvfilter from db.dbvproof import dbvproof from analysis.analysis_proof_base import aproofbase #module name name="aproofswap" class aproofswap(aproofbase): def __init__(self, name = "vproof", ttype = "violas", #ttype violas/libra dtype = "v2vswap", #metadata type:swap dbconf = None, #save analysis result db conf fdbconf = None, #base data from lfilter/vfilter db conf rdbconf = None, #save transaction record db conf nodes = None, #chain nodes libra/violas chain = "violas" ): super().__init__(name, ttype, dtype, dbconf, fdbconf, rdbconf, nodes, chain) def __del__(self): super().__del__() def stop(self): super().stop() def get_tran_id(self, tran_info): tran_id = self.create_tran_id(tran_info["flag"], tran_info["type"], tran_info['sender'], \ tran_info['receiver'], tran_info['module'], tran_info['version']) return tran_id def update_proof_info(self, tran_info): try: self._logger.debug(f"start update_proof_info tran info: {tran_info}") version = tran_info.get("version", None) #create tran id tran_id = self.get_tran_id(tran_info) ret = self._dbclient.key_is_exists(tran_id) if ret.state != error.SUCCEED: return ret #found key = version info, db has old datas , must be flush db? if ret.datas: return result(error.TRAN_INFO_INVALID, f"key{version} tran_id({tran_id})is exists, db datas is old, flushdb ?. violas tran info : {tran_info}") tran_info["flag"] = tran_info["flag"].value tran_info["type"] = tran_info["type"].value tran_info["tran_id"] = tran_id ret = self._dbclient.set_proof(version, json.dumps(tran_info)) if ret.state != error.SUCCEED: return ret self._logger.info(f"saved new proof succeed. version = {tran_info.get('version')} tran_id = {tran_id} state={tran_info['state']}") ret = result(error.SUCCEED, "", {"new_proof":True, "tran_id":tran_id, "state": tran_info["state"]}) except Exception as e: ret = parse_except(e) return ret
StarcoderdataPython
3260843
<gh_stars>0 import pymysql import config def connection(): conn = pymysql.connect(host='localhost', user=config.USER, password=<PASSWORD>, db='SuplDB', charset='utf8mb4') c = conn.cursor() sql = "SET NAMES utf8" c.execute(sql) conn.commit() return c, conn if __name__ == '__main__': c, conn = connection()
StarcoderdataPython
3436907
<gh_stars>0 # -*- coding: utf-8 -*- from .object import Object from .member import Member from datetime import datetime from .content import Content class Message(Object): """Represents a Spectrum message object Supported Operations: +-----------+-----------------------------------------+ | Operation | Description | +===========+=========================================+ | x == y | Checks if two messages are equal. | +-----------+-----------------------------------------+ | x != y | Checks if two messages are not equal. | +-----------+-----------------------------------------+ | str(x) | Returns the message's content. TODO | +-----------+-----------------------------------------+ id : int The ID of the Message time_created : ``datetime.datetime`` The time that the message was created at time_modified : ``datetime.datetime`` The time that the message was modified. NOTE: May be the same as the time that it was created. This just means it has not been modified. Maybe changed in the future. lobby : :class:`Lobby` The lobby that the message was sent in. I know the general plan of how to execute this, but since a lobby object isn't sent with the message (which makes sense) I will have to pull it from the cache when the message is sent. I have it in my head of how it will work. # TODO author : :class:`Member` The author of the message. While this is a Member object, it is called author because it make more sense in context. content : :class:`Content` The content object in the message. media_id : str This is in the format of embed:<embed ID here>. It shows the embed ID if the message has an embed. Can be ``None`` if there are no embeds highlight_role : int or NotImplemented The role that the member used for that message. It may return an int if the role was not in the clients cache, or it may return a :class:`Role` object if it was in the clients cache. reactions : NotImplemented The reactions to the message. TODO """ __slots__ = [ 'id', 'time_created', 'time_modified', 'lobby', 'author', 'content', 'media_id', 'highlight_role_id', 'reactions', ] def __init__(self, **kwargs): super(Message, self).__init__(kwargs.pop('id')) self.time_created = datetime.utcfromtimestamp( kwargs.pop('time_created')) self.time_modified = datetime.utcfromtimestamp( kwargs.pop('time_modified')) self.lobby = kwargs.pop('lobby') # This checks to see if networking found the author object in the cache if isinstance(kwargs['member'], Member): self.author = kwargs.pop('member') else: self.author = Member(**kwargs.pop('member')) self.content = Content(**kwargs.pop('content_state')) self.media_id = kwargs.pop('media_id') # checks if the role object was passed from the cache if isinstance(kwargs['highlight_role_id'], NotImplemented): self.highlight_role = kwargs.pop('highlight_role_id') else: self.highlight_role = int(kwargs.pop('highlight_role_id')) self.reactions = NotImplemented def __str__(self): return self.content.raw_content
StarcoderdataPython
3272652
<gh_stars>1-10 import socket import sys import time import traceback import struct CAN_MESSAGE_LENGTH = 25 MAX_UDP_SIZE = 1400 # This should match the programming on the node PORT = 59581 node_adresses = {11:"192.168.1.2", 0:"192.168.1.5"} # SOCK_DGRAM is the socket type to use for UDP sockets server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) server_socket.bind(('',PORT)) server_socket.settimeout(0) #make it non-blocking with open("drive_cycle.csv",'r') as drive_file: drive_lines = drive_file.readlines() def process_message(message,address): print("Received Message of {} bytes".format(len(message))) if message[:5] == b'CAN2\xff': print(message) print("WTF?") sys.exit() if not message[:5] == b'TIME\xff': print(message) #for i in range(0,num_can_messages): # CANID = struct.unpack(">L",message[1+i*CAN_MESSAGE_LENGTH:5+i*CAN_MESSAGE_LENGTH])[0] # print("Received: {:08X}".format(CANID)) def send_drive_profile_change(drive_data): sequence = drive_data[0] rel_time = drive_data[1] commands = {} for i in range(len(drive_data[2:])//3): #use integer division try: source = int(drive_data[2+3*i]) if source not in commands: commands[source]="" SPN = int(drive_data[3+3*i]) value = float(drive_data[4+3*i]) commands[source]+="{:d},{:0.3f},".format(SPN,value) except ValueError: pass for SA,command_string in commands.items(): data_to_send = bytes(command_string[:-1],'ascii') if len(data_to_send) > 0 and len(data_to_send) < MAX_UDP_SIZE: bytes_sent = server_socket.sendto(data_to_send,(node_adresses[SA],PORT)) print("Sent {} bytes: {}".format(bytes_sent, data_to_send)) while True: previous_time = time.time() print("Drive Cycle Start Time: {:12.6f}".format(previous_time)) for drive_line in drive_lines[2:]: drive_data = drive_line.strip().strip(',').split(",") sequence_number = int(drive_data[0]) time_lapsed = float(drive_data[1]) while True: current_time = time.time() time_delta = current_time-previous_time if time_delta >= time_lapsed: #previous_time = current_time print("{:12.6f} ({:0.6f}) {}".format(current_time,time_delta,drive_data)) send_drive_profile_change(drive_data) # Be sure to break from the eternal loop and get the next timestep break else: pass # Read the socket and process the message data # With the timeout set to zero, this is a non-blocking call. # If no message is in the socket buffer, then a BlockingIOError is thrown. # This method helps process all the data in the socket. It is important not # to overload the network. try: message, address = server_socket.recvfrom(1500) process_message(message, address) except BlockingIOError: pass
StarcoderdataPython
1895871
from src.config import CENSUS_KEY import json import requests import numpy as np import pandas as pd from numpyencoder import NumpyEncoder # from numpy.lib.function_base import quantile def get_census_response(table_url, get_ls, geo): ''' Concatenates url string and returns response from census api query input: table_url (str): census api table url get_ls (ls): list of tables to get data from geo (str): geographic area and filter output: response (requests.response): api response ''' get = 'NAME,' + ",".join(get_ls) url = f'{table_url}get={get}&for={geo}&key={CENSUS_KEY}' # print(f"Calling for {geo}: {get_ls}") response = requests.get(url) # print(f"{response} Received") return(response) class CensusData: ''' Stores and Updates Census Data in df_dict input: var_metrics (tuple): [0] metric name (str) e.g. race, poverty [1] variable (dict): Keys: Census Table Codes Values: Census Table Names/Aliases table (str): link to ACS5 Data Tables geo_ls (list): list of geographies to process Instructions: Create a class instance via input described above Use the get_data method to update CensusData.df_dict View dataframes: df_dict[key] (key='zip' or 'county') Save dataframe using CensusData.df_to_json() Default saves zipped by geo_code Set zip_df = False to save dataframes without processing Load dataframe using CensusData.load_df() Default loads unzipped saved file, described above ''' df_dict = {} data_metrics = dict() data_bins = dict() def __init__(self, var_metrics: tuple, table: str, geo_ls: list = ["zip", "county"]): ''' Initialized instance Adds metric to class set data_metrics ''' self.metric = var_metrics[0] self.var_dict = var_metrics[1] self.data_metrics[self.metric] = self.var_dict self.table = table self.geo_ls = geo_ls def get_data(self): ''' Calls getCensusResponse on each geography Sends response json to __panda_from_json function Returns list of DataFrames ''' geo_dict = {'zip': 'zip code tabulation area:*', 'county': 'county:*&in=state:17'} get_ls = list(self.var_dict.keys()) df_ls = [] for g in self.geo_ls: response = get_census_response(self.table, get_ls, geo_dict[g]) self.response_json = response.json() df = self.__panda_from_json(self.response_json, g) df_ls.append(df) return df_ls @classmethod def process_data(cls, save=False): cls.__pd_process_race() cls.__pd_process_poverty() if save: cls.df_to_json(both=True) def __panda_from_json(self, response_json, geo): ''' Called by getData method Updates CensusData.zip_df and returns response_df ''' # Name Columns dict_values = list(self.var_dict.values()) columns = [self.var_dict.get(header, header) for header in response_json[0]] # Creates DF response_df = pd.DataFrame(response_json[1:], columns=columns) # adds types for performance and predictable method output string_df = response_df.astype('string') # ignore error to keep NAN values conversion_dict = {v: 'Int64' for v in dict_values} typed_df = string_df.astype(conversion_dict) # Processes geographies for output if geo == 'county': fip_series = typed_df.loc[:, 'state'] + typed_df.loc[:, 'county'] fip_series.rename('FIPS', inplace=True) geo_df = pd.concat([typed_df, fip_series], axis=1) geo_df = geo_df.set_index('FIPS').drop(['state', 'county'], axis=1) elif geo == 'zip': # filter keeps Illinois zipcodes drop_ls = ['state'] if 'state' in typed_df else [] drop_ls.append('NAME') geo_df = typed_df.set_index('zip code tabulation area')\ .drop(drop_ls, axis=1)\ .filter(regex='^(6[0-2])\d+', axis=0) # noqa: W605, E501 # checks if df exists class_df = self.df_dict.get(geo, pd.DataFrame()) if not(class_df.empty): # Removes NAME to avoid conflict geo_df = geo_df.drop( ['NAME'], axis=1) if 'NAME' in class_df.columns else geo_df try: self.df_dict[geo] = class_df.join(geo_df, how='outer') except Exception as e: print(e) print("Make sure the column names are unique") else: self.df_dict[geo] = geo_df return geo_df @classmethod def df_to_json(cls, zip_df=True, both=False): ''' Saves df to file Default: zips dataframe by geo_code Both: overrides zip_df, saves both Otherwise: saves df.to_json() in dictionary to json This format loads with load_df() ''' zip_df = False if both else zip_df class_json_dict = dict() # Add Meta Data Here (Bins, etc) class_json_dict['meta'] = {'data_metrics': cls.data_metrics, 'data_bins': cls.data_bins} if not(zip_df): fp = 'final_jsons/df_dump.json' zip_dict = class_json_dict.copy() for k in cls.df_dict: zip_dict[k] = cls.df_dict[k].to_dict() with open(fp, 'w') as f: json.dump(zip_dict, f, separators=(',', ':'), cls=NumpyEncoder) if not(both): return fp # determine metrics # Not sure we need this many loops, \ # but seemed like a good idea at the time for geo in cls.df_dict: geo_dict = dict() for geo_area in cls.df_dict[geo].itertuples(): geo_area_dict = {f'{m}_data': dict() for m in cls.data_metrics.keys()} for name in geo_area._fields: if name == "Index": continue for metric in cls.data_metrics.keys(): metric_name = f'{metric}_data' geo_area_dict[metric_name] if metric in name: geo_area_dict[metric_name][name] = getattr( geo_area, name) break else: geo_area_dict[name] = getattr(geo_area, name) geo_dict[geo_area.Index] = geo_area_dict class_json_dict[f'{geo}_data'] = geo_dict fp = 'final_jsons/df_merged_json.json' with open(fp, 'w') as f: json.dump(class_json_dict, f, separators=(',', ':'), cls=NumpyEncoder) fp = 'final_jsons/' if both else fp print(f'Data updated at {fp}') return fp @classmethod def load_df(cls, fp='final_jsons/df_dump.json'): ''' Loads df_dict from file saved from df_to_json ''' with open(fp) as f: load_df = json.load(f) cls.df_dict = dict() for k in load_df: if k == 'meta': cls.data_metrics = load_df[k]['data_metrics'] continue v = pd.DataFrame(load_df[k]) cls.df_dict[k] = v return None @classmethod def get_data_values(cls, metric_name): return tuple(cls.data_metrics[metric_name].values()) @classmethod def __nest_percentages(cls, df, total_col_str): ''' Calculates percentages and removes NaN for dict conversion Returns calculated percent_df and series of dictionaries ''' str_idx = total_col_str.find('_') metric = total_col_str[:str_idx] # divides df by total column to calculate percentages # rounds to save space divide_by_total = lambda x: np.round(x/df[total_col_str], 6) # noqa: E731, E501 # try: percent_df = df.apply(divide_by_total).drop(total_col_str, axis=1) # except: # print(df.dtypes) # raise Exception # converts NAN to None, for proper JSON encoding working_df = percent_df.where(pd.notnull(percent_df), None) # creates series of race percentages as a dictionary # this allows us to add percentages to the main table, # without adding many more columns dict_series = working_df.apply(pd.Series.to_dict, axis=1) dict_series.name = f'{metric}_percentages' return percent_df, dict_series @classmethod def __pd_process_race(cls): def majority(series): ''' Returns majority race demographic for each geo_area If no majority, returns 'majority_minority' ''' # indexes max value, returns NA for NA rows idx = series.idxmax() try: value = series[idx] except KeyError: # if NA row, idx = NA return None if value >= 0.5: return idx else: return 'majority_minority' for geo_area in cls.df_dict: geo_df = cls.df_dict[geo_area] # creates df using original columns # prevents conflicts with new columns # race_values = tuple(cls.data_metrics['race'].values()) race_values = cls.get_data_values('race') race_df = geo_df.loc[:, race_values] # divides df by race_total column to calculate percentages race_percent_df, pct_dict_series = cls.__nest_percentages(race_df, 'race_total') # noqa: E501 # creates series of majority race demographics majority_series = race_percent_df.apply(majority, axis=1) majority_series.name = 'race_majority' # creates df from the series for merging percentage_df = pd.concat([pct_dict_series, majority_series], axis=1) # A join would add the values as two new columns # Trying to merge creates the columns if they don't exist # and updates them if they do exist # Potential simplification with attribute access, # however I'm not confident that handles missing data, etc try: geo_df = geo_df.merge(percentage_df, left_index=True, right_index=True, suffixes=(False, False)) except ValueError: geo_df.update(percentage_df) cls.df_dict[geo_area] = geo_df @classmethod def __pd_process_poverty(cls): for geo_area in cls.df_dict: geo_df = cls.df_dict[geo_area] # creates df using original columns # prevents conflicts with new columns poverty_values = cls.get_data_values('poverty') poverty_df = geo_df.loc[:, poverty_values] total_col = 'poverty_population_total' pct_df, pct_series = cls.__nest_percentages(poverty_df, total_col) # create quantile bins q_df = pct_df.apply(np.quantile, q=(0, 0.25, 0.5, 0.75, 1)) q_dict = q_df.to_dict(orient='list') cls.data_bins.update({'quantiles': q_dict}) # A join would add the values as two new columns # Trying to merge creates the columns if they don't exist # and updates them if they do exist # Potential simplification with attribute access, # however I'm not confident that handles missing data, etc try: geo_df = geo_df.merge(pct_series, left_index=True, right_index=True, suffixes=(False, False)) except ValueError: geo_df.update(pct_series) cls.df_dict[geo_area] = geo_df def search_table(table_json_ls: list, keyword_ls: list, filter_function_ls: list) -> list: ''' Filters variable tables by keyword and filter input: table_json_ls (response.json() list object): list of lists from census variable table api keyword_ls (list): list of keyword strings keyword filter applied to the third element of the input list (concept column) filter_function_ls (list): list of functions that filter table_json_ls with filter method output: return_json_ls (list): list, same format as table_json_ls, filtered ''' return_json_ls = list() # runs filter for each function in filter_function_ls for f in filter_function_ls: table_json_ls = list(filter(f, table_json_ls)) # adds rows with keyword(s) in concept column to return_json_ls for d in table_json_ls: try: for k in keyword_ls: # d[2] is the concept column, d[1] is the label column if k.lower() in d[2].lower() or k.lower() in d[1].lower(): continue else: break else: return_json_ls.append(d) except IndexError: continue return return_json_ls def county_fips(reverse=False) -> dict: ''' Requests county fips from census API and returns list of IL county FIPS input: reverse (bool) reverses keys and values in output output: il_json (dict) {'county name': 'fip'} ''' import requests url = 'https://api.census.gov/data/2010/dec/sf1?get=NAME&for=county:*' response = requests.get(url) # filter for IL def il_county_filter(x): return x[1] == '17' response_json = response.json() if reverse: il_json = {county[1]+county[2]: county[0] for county in response_json if il_county_filter(county)} else: il_json = {county[0]: county[1]+county[2] for county in response_json if il_county_filter(county)} return il_json def bin_data(geo_data: dict): ''' Creates bins from functions described in this function geoData: keys: geo code values: percentages or data to bin ''' def quartiles(geo_value): min_v = np.min(geo_value) first = np.quantile(geo_value, 0.25) second = np.quantile(geo_value, 0.5) third = np.quantile(geo_value, 0.75) max = np.max(geo_value) q = [min_v, first, second, third, max] return {'quartiles': q} bin_dict = {} for k in geo_data: v = geo_data[k] try: q = quartiles(v) except ValueError: continue bin_dict[k] = q return bin_dict
StarcoderdataPython
3208442
# Copyright (c) 2018, ZIH, Technische Universitaet Dresden, Federal Republic of Germany # # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of metricq nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import asyncio import uuid from asyncio import CancelledError, Task from asyncio.futures import Future from enum import Enum, auto from typing import Any, Dict, Iterator, List, Optional import aio_pika from aiormq import ChannelInvalidStateError from . import history_pb2 from .client import Client, _GetMetricsResult from .connection_watchdog import ConnectionWatchdog from .exceptions import ( HistoryError, InvalidHistoryResponse, PublishFailed, ReconnectTimeout, ) from .logging import get_logger from .rpc import rpc_handler from .types import TimeAggregate, Timedelta, Timestamp, TimeValue from .version import __version__ # noqa: F401 - shut up flake8, automatic version str logger = get_logger(__name__) class HistoryRequestType(Enum): """The type of metric data to request. Use the value below to select which to request in :meth:`.HistoryClient.history_data_request`. See :class:`HistoryResponse` for a distinction between `raw values` and `aggregates`. """ AGGREGATE_TIMELINE = history_pb2.HistoryRequest.AGGREGATE_TIMELINE """Retrieve a timeline of aggregates with the specified `max_interval` each. """ AGGREGATE = history_pb2.HistoryRequest.AGGREGATE """Retrieve a single aggregate over the specified duration. """ LAST_VALUE = history_pb2.HistoryRequest.LAST_VALUE """Only retrieve the last data point recorded for a metric. """ FLEX_TIMELINE = history_pb2.HistoryRequest.FLEX_TIMELINE """Retrieve either aggregates or raw values, depending on the `max_interval.` """ class HistoryResponseType(Enum): """The type of a history response. See :attr:`HistoryResponse.mode` how these values should be interpreted in the context of a :class:`HistoryResponse`. """ EMPTY = auto() """The response contains no values at all. """ AGGREGATES = auto() """The response contains a list of aggregates. """ VALUES = auto() """The response contains a list of time-value pairs. """ LEGACY = auto() """The response is in an unspecified legacy format. """ class HistoryResponse: """Response to a history request containing the historical data. Providers of historical data send either `raw values` (`time-value` pairs, see :class:`.TimeValue`) or `aggregates` (see :class:`.TimeAggregate`). The data is accessed by iterating over either :meth:`values` or :meth:`aggregates`. If the response is of the wrong type, these methods might fail and raise :exc:`ValueError`. Match on the value of :attr:`mode` determine whether this response contains raw values or aggregates. Alternatively, pass :code:`convert=True` to either :meth:`values` or :meth:`aggregates` to transparently convert the data to the desired type. """ def __init__( self, proto: history_pb2.HistoryResponse, request_duration: Optional[float] = None, ): """HistoryResponse objects are created by the HistoryClient methods. Don't bother instantiating on your own. :meta private: """ self.request_duration = request_duration count = len(proto.time_delta) if proto.error != "": raise HistoryError(f"request failed on database: {proto.error}") elif count == 0: self._mode = HistoryResponseType.EMPTY if ( len(proto.value_min) != 0 or len(proto.value_max) != 0 or len(proto.aggregate) != 0 or len(proto.value) != 0 ): raise InvalidHistoryResponse("time_delta empty, but values present") elif len(proto.aggregate) == count: self._mode = HistoryResponseType.AGGREGATES if len(proto.value) != 0: raise InvalidHistoryResponse("values and aggregates present") elif len(proto.value) == count: self._mode = HistoryResponseType.VALUES if len(proto.aggregate) != 0: raise InvalidHistoryResponse("aggregates and values present") elif len(proto.value_avg) == count: self._mode = HistoryResponseType.LEGACY if ( len(proto.value_min) != count or len(proto.value_max) != count or len(proto.aggregate) != 0 or len(proto.value) != 0 ): raise InvalidHistoryResponse("inconsistent counts in legacy format") else: raise InvalidHistoryResponse("no value count matches time_delta count") self._proto = proto def __len__(self) -> int: return len(self._proto.time_delta) @property def mode(self) -> HistoryResponseType: """The type of response at hand. This determines the behavior of :meth:`aggregates` and :meth:`values`: :attr:`mode` is :attr:`~HistoryResponseType.VALUES`: :meth:`values` will return a iterator of :class:`~metricq.TimeValue`. :meth:`aggregates` will fail with :exc:`ValueError`, except if called with :code:`convert=True`. :attr:`mode` is :attr:`~HistoryResponseType.AGGREGATES`: :meth:`aggregates` will return a iterator of :class:`~metricq.TimeAggregate`. :meth:`values` will fail with :exc:`ValueError`, except if called with :code:`convert=True`. :attr:`mode` is :attr:`~HistoryResponseType.EMPTY`: Both :meth:`values` and :meth:`aggregates` return an empty iterator. :attr:`mode` is :attr:`~HistoryResponseType.LEGACY`: Both :meth:`values` and :meth:`aggregates` will raise :exc:`ValueError` unless called with :code:`convert=True`. .. warning:: The values listed here might be *non-exhaustive*, new ones might be added in the future. If matching on a value of :class:`HistoryResponseType`, make sure to include a *catch-all* case:: if response.mode is HistoryResponseType.VALUES: ... elif response.mode is HistoryResponseType.AGGREGATES: ... else: # catch-all case, handle it cleanly """ return self._mode def values(self, convert: bool = False) -> Iterator[TimeValue]: """An iterator over all data points included in this response. Args: convert: Convert values transparently if response does not contain raw values. If the response contains aggregates, this will yield the mean value of each aggregate. Raises: :class:`ValueError`: if :code:`convert=False` and the response does not contain raw values. """ time_ns = 0 if self._mode is HistoryResponseType.VALUES: for time_delta, value in zip(self._proto.time_delta, self._proto.value): time_ns = time_ns + time_delta yield TimeValue(Timestamp(time_ns), value) return elif self._mode is HistoryResponseType.EMPTY: return if not convert: raise ValueError( "Attempting to access values of HistoryResponse.values in wrong mode: {}".format( self._mode ) ) if self._mode is HistoryResponseType.AGGREGATES: for time_delta, proto_aggregate in zip( self._proto.time_delta, self._proto.aggregate ): time_ns = time_ns + time_delta timestamp = Timestamp(time_ns) aggregate = TimeAggregate.from_proto(timestamp, proto_aggregate) yield TimeValue(timestamp, aggregate.mean) return if self._mode is HistoryResponseType.LEGACY: for time_delta, average in zip( self._proto.time_delta, self._proto.value_avg ): time_ns = time_ns + time_delta yield TimeValue(Timestamp(time_ns), average) return raise ValueError("Invalid HistoryResponse mode") def aggregates(self, convert: bool = False) -> Iterator[TimeAggregate]: """An iterator over aggregates contained in this response. Args: convert: Convert values to aggregates transparently if response does not contain aggregates. If the response contains `raw values`, this will yield an aggregate for each value. Raises: ValueError: if :code:`convert=False` and the underlying response does not contain aggregates NonMonotonicTimestamps: if the underling data has mode :attr:`~HistoryResponseType.VALUES` and timestamps are not strictly monotonically increasing """ time_ns = 0 if self._mode is HistoryResponseType.AGGREGATES: for time_delta, proto_aggregate in zip( self._proto.time_delta, self._proto.aggregate ): time_ns = time_ns + time_delta timestamp = Timestamp(time_ns) yield TimeAggregate.from_proto(timestamp, proto_aggregate) return elif self._mode is HistoryResponseType.EMPTY: return if not convert: raise ValueError( "Attempting to access values of HistoryResponse.aggregates in wrong mode: {}".format( self._mode ) ) if len(self) == 0: return if self._mode is HistoryResponseType.VALUES: time_ns = self._proto.time_delta[0] previous_timestamp = Timestamp(time_ns) # First interval is useless here for time_delta, value in zip( self._proto.time_delta[1:], self._proto.value[1:] ): time_ns = time_ns + time_delta timestamp = Timestamp(time_ns) yield TimeAggregate.from_value_pair( previous_timestamp, timestamp, value ) previous_timestamp = timestamp return if self._mode is HistoryResponseType.LEGACY: for time_delta, minimum, maximum, average in zip( self._proto.time_delta, self._proto.value_min, self._proto.value_max, self._proto.value_avg, ): time_ns = time_ns + time_delta # That of course only makes sense if you just use mean or mean_sum # We don't do the nice intervals here... yield TimeAggregate( timestamp=Timestamp(time_ns), minimum=minimum, maximum=maximum, sum=average, count=1, integral_ns=0, active_time=Timedelta(0), ) return raise ValueError("Invalid HistoryResponse mode") class HistoryClient(Client): """A MetricQ client to access historical metric data.""" def __init__(self, *args: Any, **kwargs: Any): super().__init__(*args, **kwargs) self.data_server_address: Optional[str] = None self.history_connection: Optional[aio_pika.RobustConnection] = None self.history_channel: Optional[aio_pika.RobustChannel] = None self.history_exchange: Optional[aio_pika.Exchange] = None self.history_response_queue: Optional[aio_pika.Queue] = None self._history_connection_watchdog = ConnectionWatchdog( on_timeout_callback=lambda watchdog: self._schedule_stop( ReconnectTimeout( f"Failed to reestablish {watchdog.connection_name} after {watchdog.timeout} seconds" ) ), timeout=kwargs.get("connection_timeout", 60), connection_name="history connection", ) self._request_futures: Dict[str, Future[HistoryResponse]] = dict() self._reregister_task: Optional[Task[None]] = None async def connect(self) -> None: """Connect to the MetricQ network and register this HistoryClient.""" await super().connect() response = await self.rpc("history.register") logger.debug("register response: {}", response) assert response is not None self.data_server_address = self.derive_address(response["dataServerAddress"]) self.history_connection = await self.make_connection( self.data_server_address, connection_name="history connection {}".format(self.token), ) self.history_connection.add_close_callback(self._on_history_connection_close) self.history_connection.add_reconnect_callback( self._on_history_connection_reconnect # type: ignore ) self.history_channel = await self.history_connection.channel() assert self.history_channel is not None self.history_exchange = await self.history_channel.declare_exchange( name=response["historyExchange"], passive=True ) await self._declare_history_queue(response["historyQueue"]) if "config" in response: await self.rpc_dispatch("config", **response["config"]) self._history_connection_watchdog.start(loop=self.event_loop) self._history_connection_watchdog.set_established() await self._history_consume() async def stop(self, exception: Optional[Exception] = None) -> None: logger.info("closing history channel and connection.") await self._history_connection_watchdog.stop() if self.history_channel: await self.history_channel.close() # type: ignore self.history_channel = None if self.history_connection: # We need not pass anything as exception to this close. It will only hurt. await self.history_connection.close() # type: ignore self.history_connection = None self.history_exchange = None await super().stop(exception) async def get_metrics(self, *args: Any, **kwargs: Any) -> _GetMetricsResult: """Retrieve information for **historic** metrics matching a selector pattern. This is like :meth:`Client.get_metrics`, but sets :code:`historic=True` by default. See documentation there for a detailed description of the remaining arguments. """ kwargs.setdefault("historic", True) return await super().get_metrics(*args, **kwargs) async def history_data_request( self, metric: str, start_time: Optional[Timestamp], end_time: Optional[Timestamp], interval_max: Optional[Timedelta], request_type: HistoryRequestType = HistoryRequestType.AGGREGATE_TIMELINE, timeout: float = 60, ) -> HistoryResponse: """Request historical data points of a metric. Args: metric: The metric of interest. start_time: Only include data points from this point in time onward. end_time: Only include data points up to this point in time. interval_max: Maximum time between data points in response. request_type: The type of metric data to request. See :class:`.HistoryRequestType`. timeout: Operation timeout in seconds. Raises: ValueError: if metric is empty or longer than 255 bytes """ if not metric: raise ValueError("Metric must be a non-empty string") if len(metric.encode("utf-8")) > 255: raise ValueError( "Metric names (amqp routing keys) must be at most 255 bytes long" ) correlation_id = "mq-history-py-{}-{}".format(self.token, uuid.uuid4().hex) logger.debug( "running history request for {} ({}-{},{}) with correlation id {}", metric, start_time, end_time, interval_max, correlation_id, ) request = history_pb2.HistoryRequest() if start_time is not None: request.start_time = start_time.posix_ns if end_time is not None: request.end_time = end_time.posix_ns if interval_max is not None: request.interval_max = interval_max.ns if request_type is not None: request.type = request_type.value assert self.history_response_queue is not None msg = aio_pika.Message( body=request.SerializeToString(), correlation_id=correlation_id, reply_to=self.history_response_queue.name, ) self._request_futures[correlation_id] = asyncio.Future(loop=self.event_loop) assert self.history_exchange is not None await self._history_connection_watchdog.established() try: # TOC/TOU hazard: by the time we publish, the data connection might # be gone again, even if we waited for it to be established before. await self.history_exchange.publish(msg, routing_key=metric) except ChannelInvalidStateError as e: # Trying to publish on a closed channel results in a ChannelInvalidStateError # from aiormq. Let's wrap that in a more descriptive error. raise PublishFailed( f"Failed to publish data chunk for metric '{metric!r}' " f"on exchange '{self.history_exchange}' ({self.history_connection})" ) from e try: result = await asyncio.wait_for( self._request_futures[correlation_id], timeout=timeout ) finally: del self._request_futures[correlation_id] return result async def history_aggregate( self, metric: str, start_time: Optional[Timestamp] = None, end_time: Optional[Timestamp] = None, timeout: float = 60, ) -> TimeAggregate: """Aggregate values of a metric for the specified span of time. Args: metric: Name of the metric to aggregate. start_time: Only aggregate values from this point in time onward. If omitted, aggregation starts at the first data point of this metric. end_time: Only aggregate values up to this point in time. If omitted, aggregation includes the most recent values of this metric. timeout: Operation timeout in seconds. Returns: A single aggregate over values of this metric, including minimum/maximum/average/etc. values. Raises: ~exceptions.InvalidHistoryResponse: """ response: HistoryResponse = await self.history_data_request( metric=metric, start_time=start_time, end_time=end_time, interval_max=None, request_type=HistoryRequestType.AGGREGATE, timeout=timeout, ) if len(response) == 1: return next(response.aggregates()) else: raise InvalidHistoryResponse( f"contains {len(response)} aggregates, expected 1" ) async def history_aggregate_timeline( self, metric: str, *, interval_max: Timedelta, start_time: Optional[Timestamp] = None, end_time: Optional[Timestamp] = None, timeout: float = 60, ) -> Iterator[TimeAggregate]: """Aggregate values of a metric in multiple steps. Each aggregate spans values *at most* :literal:`interval_max` apart. Aggregates are returned in order, consecutive aggregates span consecutive values of this metric. Together, all aggregates span all values from :literal:`start_time` to :literal:`end_time`, inclusive. Args: metric: Name of the metric to aggregate. interval_max: Maximum timespan of values covered by each aggregate. start_time: Only aggregate values from this point in time onward. If omitted, aggregation starts at the first data point of this metric. end_time: Only aggregate values up to this point in time. If omitted, aggregation includes the most recent values of this metric. timeout: Operation timeout in seconds. Returns: An iterator over aggregates for this metric. Raises: ~exceptions.InvalidHistoryResponse: """ response: HistoryResponse = await self.history_data_request( metric=metric, start_time=start_time, end_time=end_time, interval_max=interval_max, request_type=HistoryRequestType.AGGREGATE_TIMELINE, timeout=timeout, ) try: return response.aggregates() except ValueError: raise InvalidHistoryResponse("AGGREGATE_TIMELINE contains no aggregates") async def history_last_value( self, metric: str, timeout: float = 60 ) -> Optional[TimeValue]: """Fetch the last value recorded for a metric. If this metric has no values recorded, return :literal:`None`. Args: metric: Name of the metric of interest. timeout: Operation timeout in seconds. Raises: ~exceptions.InvalidHistoryResponse: """ result = await self.history_data_request( metric, start_time=None, end_time=None, interval_max=None, request_type=HistoryRequestType.LAST_VALUE, timeout=timeout, ) try: return next(result.values(), None) except ValueError: raise InvalidHistoryResponse("LAST_VALUE returned more than 1 value") async def history_raw_timeline( self, metric: str, start_time: Optional[Timestamp] = None, end_time: Optional[Timestamp] = None, timeout: float = 60, ) -> Iterator[TimeValue]: """Retrieve raw values of a metric within the specified span of time. Omitting both :literal:`start_time` and :literal:`end_time` yields all values recorded for this metric, omitting either one yields values up to/starting at a point in time. Args: metric: Name of the metric. start_time: Only retrieve values from this point in time onward. If omitted, include all values before :literal:`end_time`. end_time: Only aggregate values up to this point in time. If omitted, include all values after :literal:`start_time`. timeout: Operation timeout in seconds. Returns: An iterator over values of this metric. Raises: ~exceptions.InvalidHistoryResponse: """ response: HistoryResponse = await self.history_data_request( metric=metric, start_time=start_time, end_time=end_time, interval_max=Timedelta(0), request_type=HistoryRequestType.FLEX_TIMELINE, timeout=timeout, ) try: return response.values(convert=False) except ValueError: raise InvalidHistoryResponse("Response contained no values") @rpc_handler("config") async def _history_config(self, **kwargs: Any) -> None: logger.info("received config {}", kwargs) async def _history_consume(self, extra_queues: List[aio_pika.Queue] = []) -> None: logger.info("starting history consume") assert self.history_response_queue is not None queues = [self.history_response_queue] + extra_queues await asyncio.gather( *[queue.consume(self._on_history_response) for queue in queues], loop=self.event_loop, ) async def _on_history_response(self, message: aio_pika.IncomingMessage) -> None: async with message.process(requeue=True): body = message.body from_token = message.app_id correlation_id = message.correlation_id request_duration = float(message.headers.get("x-request-duration", "-1")) logger.debug( "received message from {}, correlation id: {}, reply_to: {}", from_token, correlation_id, message.reply_to, ) history_response_pb = history_pb2.HistoryResponse() history_response_pb.ParseFromString(body) future = self._request_futures.get(correlation_id) # Make sure this message corresponds to a request we sent if future is None: logger.error( "received history response with unknown correlation id {} " "from {}", correlation_id, from_token, ) return # Ensure we did not already handle this response if future.done(): logger.error( "history response from {} with correlation ID {} was handled already", from_token, correlation_id, ) return # Parse the history response. If the database returned an error, # raise HistoryError in the code awaiting the parsed response. try: history_response = HistoryResponse( history_response_pb, request_duration ) logger.debug("message is a history response") future.set_result(history_response) except HistoryError as e: logger.debug("message is a history response containing an error: {}", e) future.set_exception(e) def _on_history_connection_close( self, sender: Any, _exception: Optional[BaseException] ) -> None: self._history_connection_watchdog.set_closed() def _on_history_connection_reconnect( self, sender: Any, connection: aio_pika.Connection ) -> None: logger.info("History connection ({}) reestablished!", connection) if self._reregister_task is not None and not self._reregister_task.done(): logger.warning( "History connection was reestablished, but another reregister task is still running!" ) self._reregister_task.cancel() self._reregister_task = self.event_loop.create_task( self._reregister(connection) ) def reregister_done(task: Task[None]) -> None: try: exception = task.exception() if exception is None: self._history_connection_watchdog.set_established() else: logger.error( f"Reregister failed with an unhandled exception: {exception}" ) raise exception except CancelledError: logger.warning("Reregister task was cancelled!") self._reregister_task.add_done_callback(reregister_done) async def _reregister(self, connection: aio_pika.Connection) -> None: logger.info( "Reregistering as history client...", ) response = await self.rpc("history.register") assert response is not None assert self.history_exchange is not None assert response["historyExchange"] == self.history_exchange.name assert ( self.derive_address(response["dataServerAddress"]) == self.data_server_address ) await self._declare_history_queue(response["historyQueue"]) if "config" in response: await self.rpc_dispatch("config", **response["config"]) logger.debug("Restarting consume...") await self._history_consume() async def _declare_history_queue(self, name: str) -> None: # The manager declares the queue and we only connect to that queue with passive=True # But when a disconnect happens, the queue gets deleted. Therefore, there is no # way, how a robust connection could reconnect to that queue. Hence, we set # robust=False and handle the reconnect ourselfs. # (See self._on_history_connection_reconnect()) assert self.history_channel is not None self.history_response_queue = await self.history_channel.declare_queue( name=name, passive=True, robust=False )
StarcoderdataPython
8021347
<gh_stars>0 from collections import deque from typing import Dict, NewType, Optional, Protocol, Tuple, Tuple, TypeVar, Union, List # NOTE TO tree: TOPLEFT = (0, 0) """ y1 (0, 0) ------- (0, 5) | | x1 | | x2 ------- (0, 5) (5, 5) y2 1. (x, y) 2. x1 < x2 3. y1 < y2 """ T = TypeVar("T") UID = NewType("UID", str) # all the computation should be done in the 'RTree' class # RTreeEntity & RTreeNode serves it purpose as a data container only # the root will start out as a branch type and remain as a branch type class NodeOverflowError(Exception): ... class NodeUnderflowError(Exception): ... class EntityNotFoundError(Exception): ... class DegenerateNodeError(Exception): ... class BBox(Protocol): _id: UID xmin: int ymin: int xmax: int ymax: int def intersect(this_bbox: BBox, that_bbox: BBox) -> bool: return not ( this_bbox.xmin >= that_bbox.xmax or this_bbox.xmax <= that_bbox.xmin or this_bbox.ymin >= that_bbox.ymax or this_bbox.ymax <= that_bbox.ymin ) def within(this_bbox: BBox, that_bbox: BBox) -> bool: return not ( that_bbox.xmin > this_bbox.xmin or that_bbox.ymin > this_bbox.ymin or that_bbox.xmax < this_bbox.xmax or that_bbox.ymax < this_bbox.ymax ) class RTreeObject: __slots__ = ("parent", "xmin", "ymin", "xmax", "ymax", "area") def __init__( self, parent: Optional["RTreeNode"] = None, xmin: int = float("inf"), ymin: int = float("inf"), xmax: int = -float("inf"), ymax: int = -float("inf"), ) -> None: self.parent = parent self.xmin = xmin self.ymin = ymin self.xmax = xmax self.ymax = ymax self.area = (self.xmax - self.xmin) * (self.ymax - self.ymin) class RTreeEntity(RTreeObject): __slots__ = ("_id",) def __init__( self, _id: UID, xmin: int, ymin: int, xmax: int, ymax: int, parent: Optional["RTreeNode"] = None ) -> None: super().__init__(parent, xmin, ymin, xmax, ymax) self._id = _id def __repr__(self) -> str: return f"RTreeEntity(_id={self._id}, parent={self.parent})" class RTreeNode(RTreeObject): __slots__ = ("height", "is_leaf", "children") def __init__( self, height: int, is_leaf: bool, children: List[Union["RTreeEntity", "RTreeNode"]], parent: Optional["RTreeNode"] = None, xmin: int = float("inf"), ymin: int = float("inf"), xmax: int = -float("inf"), ymax: int = -float("inf"), ) -> None: super().__init__(parent, xmin, ymin, xmax, ymax) self.height = height self.is_leaf = is_leaf self.children = children self.update() def update(self) -> None: xmin = ymin = float("inf") xmax = ymax = -float("inf") for child in self.children: xmin = child.xmin if xmin > child.xmin else xmin ymin = child.ymin if ymin > child.ymin else ymin xmax = child.xmax if xmax < child.xmax else xmax ymax = child.ymax if ymax < child.ymax else ymax self.xmin = xmin self.ymin = ymin self.xmax = xmax self.ymax = ymax self.area = (self.xmax - self.xmin) * (self.ymax - self.ymin) def __repr__(self) -> str: return f"RTreeNode(children_num={len(self.children)}, is_leaf={self.is_leaf}, height={self.height}, parent={self.parent})" class RTree: def __init__(self, max_capacity: int = 9, min_capacity: Optional[int] = None) -> None: self.max_capacity = max_capacity self.min_capacity = min_capacity if min_capacity != None else int(max_capacity / 2) self.entity_index: Dict[UID, RTreeEntity] = {} self.root = RTreeNode(children=[], height=0, is_leaf=True) def insert(self, entity: BBox) -> None: entity_obj = RTreeEntity( _id=entity._id, xmin=entity.xmin, ymin=entity.ymin, xmax=entity.xmax, ymax=entity.ymax, ) self.entity_index[entity._id] = entity_obj self._insert(entity_obj, self.root.height) def _insert(self, node_to_insert: Union[RTreeEntity, RTreeNode], height: int) -> None: xmin, ymin, xmax, ymax = node_to_insert.xmin, node_to_insert.ymin, node_to_insert.xmax, node_to_insert.ymax node = self.choose_subtree(height, xmin, ymin, xmax, ymax) node.children.append(node_to_insert) node_to_insert.parent = node while node: if len(node.children) <= self.max_capacity: while node: n_xmin, n_ymin, n_xmax, n_ymax = node.xmin, node.ymin, node.xmax, node.ymax node.xmin = xmin if xmin < n_xmin else n_xmin node.ymin = ymin if ymin < n_ymin else n_ymin node.xmax = xmax if xmax > n_xmax else n_xmax node.ymax = ymax if ymax > n_ymax else n_ymax node = node.parent break self.split(node) node = node.parent def load(self, entities: List[BBox]) -> None: if len(entities) <= self.min_capacity: [self.insert(entity) for entity in entities] for index, entity in enumerate(entities): _id = entity._id entity_obj = RTreeEntity( _id=_id, xmin=entity.xmin, ymin=entity.ymin, xmax=entity.xmax, ymax=entity.ymax, ) self.entity_index[_id] = entity_obj entities[index] = entity_obj node = self._load(entities) root = self.root if not self.root.children: self.root = node return root_height = root.height node_height = node.height if node_height == root_height: self.split_root(node) return if node_height > root_height: __temp = root self.root = node node = __temp self._insert(node) def _load(self, nodes: List[RTreeEntity | RTreeNode], height: int = 0, is_leaf: bool = True) -> RTreeNode: total_nodes = len(nodes) if total_nodes <= self.min_capacity: root_node = RTreeNode(height=height, is_leaf=is_leaf, parent=None, children=nodes) for node in nodes: node.parent = root_node return root_node merged_nodes = [] self.axis_sort(nodes) total_parent_nodes = -(-total_nodes // (self.max_capacity - 1)) total_partition, total_remainder_node = divmod(total_nodes, total_parent_nodes) for i in range(total_parent_nodes): node_partition = nodes[ i * total_partition + min(i, total_remainder_node) : (i + 1) * total_partition + min(i + 1, total_remainder_node) ] parent_node = RTreeNode(height=height, is_leaf=is_leaf, parent=None, children=node_partition) for node in node_partition: node.parent = parent_node merged_nodes.append(parent_node) return self._load(merged_nodes, height + 1, is_leaf=False) def remove(self, _id: UID) -> None: try: entity_node = self.entity_index[_id] del self.entity_index[_id] except KeyError as err: raise EntityNotFoundError(f"Entity with the id '{_id}' does not exist in RTree") from err node = entity_node.parent node.children.remove(entity_node) entity_to_reallocate: List[RTreeEntity] = [] while node: parent_node = node.parent total_children = len(node.children) if not parent_node: if total_children == 0: self.root = RTreeNode(children=[], height=0, is_leaf=True) break elif total_children < self.min_capacity: if node.is_leaf: entity_to_reallocate.extend(node.children) parent_node.children.remove(node) node.children.clear() elif total_children == 0: parent_node.children.remove(node) else: node.update() node = parent_node for entity in entity_to_reallocate: del self.entity_index[entity._id] self.insert(entity) def query(self, bbox: BBox) -> List[UID]: if not intersect(self.root, bbox): return [] elif within(self.root, bbox): return self.get_all_entities(self.root) to_process = deque([self.root]) to_return: List["RTreeNode"] = [] while to_process: node = to_process.pop() if not intersect(node, bbox): continue if node.is_leaf: to_return.extend([child._id for child in node.children]) elif within(node, bbox): to_return.extend(self.get_all_entities(node)) else: to_process.extend(node.children) return to_return def collide(self, bbox: BBox) -> bool: if not intersect(self.root, bbox): return False elif within(self.root, bbox): return True to_process = deque([self.root]) while to_process: node = to_process.pop() if not intersect(node, bbox): continue if isinstance(node, RTreeEntity) or within(node, bbox): return True to_process.extend(node.children) return False def get_all_entities(self, node: RTreeNode) -> List[UID]: to_process = deque([node]) to_return: List[UID] = [] while to_process: node = to_process.pop() if node.is_leaf: to_return.extend(node.children) continue to_process.extend(node.children) return to_return def split(self, old_node: RTreeNode) -> None: self.axis_sort(old_node.children) old_node_children = old_node.children index = self.choose_split_index(old_node_children) new_node_children, old_node.children = old_node_children[index:], old_node_children[:index] old_node.update() new_node = RTreeNode( children=new_node_children, is_leaf=old_node.is_leaf, height=old_node.height, parent=old_node.parent, ) for child in new_node_children: child.parent = new_node if old_node.parent: old_node.parent.children.append(new_node) return self.split_root(new_node) def split_root(self, root_sibling: RTreeNode) -> None: new_root = RTreeNode( children=[self.root, root_sibling], height=self.root.height + 1, is_leaf=False, ) self.root.parent = root_sibling.parent = new_root self.root = new_root def choose_split_index(self, nodes: List[RTreeNode]) -> int: this_bbox = nodes[0] xmin_1, ymin_1, xmax_1, ymax_1 = this_bbox.xmin, this_bbox.ymin, this_bbox.xmax, this_bbox.ymax that_bbox = nodes[-1] xmin_2, ymin_2, xmax_2, ymax_2 = that_bbox.xmin, that_bbox.ymin, that_bbox.xmax, that_bbox.ymax this_node_num = that_node_num = 1 total_node = self.max_capacity + 1 for index, bbox in enumerate(nodes[1:-1], 1): b_xmin, b_ymin, b_xmax, b_ymax = bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax o_xmin_1, o_ymin_1, o_xmax_1, o_ymax_1 = b_xmin, b_ymin, b_xmax, b_ymax o_xmin_2, o_ymin_2, o_xmax_2, o_ymax_2 = b_xmin, b_ymin, b_xmax, b_ymax m_xmin_1, m_ymin_1, m_xmax_1, m_ymax_1 = xmin_1, ymin_1, xmax_1, ymax_1 m_xmin_2, m_ymin_2, m_xmax_2, m_ymax_2 = xmin_2, ymin_2, xmax_2, ymax_2 if xmin_1 > b_xmin: m_xmin_1 = b_xmin o_xmin_1 = xmin_1 if ymin_1 > b_ymin: m_ymin_1 = b_ymin o_ymin_1 = ymin_1 if xmax_1 < b_xmax: m_xmax_1 = b_xmax o_xmax_1 = xmax_1 if ymax_1 < b_ymax: m_ymax_1 = b_ymax o_ymax_1 = ymax_1 dx_1, dy_1 = (o_xmax_1 - o_xmin_1), (o_ymax_1 - o_ymin_1) overlapped_area_1 = dx_1 * dy_1 if (dx_1 >= 0) and (dy_1 >= 0) else 0 merged_area_1 = (m_xmax_1 - m_xmin_1) * (m_ymax_1 - m_ymin_1) if xmin_2 > b_xmin: m_xmin_2 = b_xmin o_xmin_2 = xmin_2 if ymin_2 > b_ymin: m_ymin_2 = b_ymin o_ymin_2 = ymin_2 if xmax_2 < b_xmax: m_xmax_2 = b_xmax o_xmax_2 = xmax_2 if ymax_2 < b_ymax: m_ymax_2 = b_ymax o_ymax_2 = ymax_2 dx_2, dy_2 = (o_xmax_2 - o_xmin_2), (o_ymax_2 - o_ymin_2) overlapped_area_2 = dx_2 * dy_2 if (dx_2 >= 0) and (dy_2 >= 0) else 0 merged_area_2 = (m_xmax_2 - m_xmin_2) * (m_ymax_2 - m_ymin_2) if (total_node - index) <= self.min_capacity: if this_node_num < that_node_num: this_node_num += 1 else: that_node_num += 1 elif overlapped_area_1 > overlapped_area_2: this_node_num += 1 elif overlapped_area_1 < overlapped_area_2: that_node_num += 1 else: if merged_area_1 > merged_area_2: that_node_num += 1 else: this_node_num += 1 xmin_2, ymin_2, xmax_2, ymax_2 = m_xmin_2, m_ymin_2, m_xmax_2, m_ymax_2 xmin_1, ymin_1, xmax_1, ymax_1 = m_xmin_1, m_ymin_1, m_xmax_1, m_ymax_1 return this_node_num def calculate_bbox_distribution(self, bboxes: List[RTreeNode]) -> int: first_half_dataset, second_half_dataset = ( bboxes[: self.min_capacity], bboxes[self.min_capacity :], ) margin = 0 xmin = ymin = float("inf") xmax = ymax = -float("inf") for bbox in first_half_dataset: b_xmin, b_ymin, b_xmax, b_ymax = bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax xmin = b_xmin if b_xmin < xmin else xmin ymin = b_ymin if b_ymin < ymin else ymin xmax = b_xmax if b_xmax > xmax else xmax ymax = b_ymax if b_ymax > ymax else ymax margin += (xmax - xmin) + (ymax - ymin) for bbox in second_half_dataset: b_xmin, b_ymin, b_xmax, b_ymax = bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax xmin = b_xmin if b_xmin < xmin else xmin ymin = b_ymin if b_ymin < ymin else ymin xmax = b_xmax if b_xmax > xmax else xmax ymax = b_ymax if b_ymax > ymax else ymax margin += (xmax - xmin) + (ymax - ymin) return margin def axis_sort(self, bboxes: List[RTreeEntity | RTreeNode]) -> List[RTreeEntity | RTreeNode]: """ assign half of the node's children's bbox to (left // up) TBN, and the that_bbox half to (right // down) TBN combine the margin added by the enlargement of the TBN by the that_bbox half of the node's children's bbox TBN: temporary bbox node """ # gettting the distribution of the X-axis x_margin = 0 bboxes.sort(key=lambda node: node.xmin) x_margin += self.calculate_bbox_distribution(bboxes) x_margin += self.calculate_bbox_distribution(bboxes[::-1]) # gettting the distribution of the Y-axis y_margin = 0 bboxes.sort(key=lambda node: node.ymin) y_margin += self.calculate_bbox_distribution(bboxes) y_margin += self.calculate_bbox_distribution(bboxes[::-1]) if y_margin > x_margin: bboxes.sort(key=lambda node: node.xmin) def choose_subtree(self, height: int, xmin: int, ymin: int, xmax: int, ymax: int) -> RTreeNode: depth = 0 target_node = self.root while not target_node.is_leaf and depth != height: min_area = min_dead_space = float("inf") children = target_node.children for child in children: c_xmin, c_ymin, c_xmax, c_ymax = child.xmin, child.ymin, child.xmax, child.ymax t_xmin = c_xmin if xmin > c_xmin else xmin t_ymin = c_ymin if ymin > c_ymin else ymin t_xmax = c_xmax if xmax < c_xmax else xmax t_ymax = c_ymax if ymax < c_ymax else ymax area = child.area merged_area = (t_xmax - t_xmin) * (t_ymax - t_ymin) dead_space = merged_area - area if dead_space < min_dead_space: min_dead_space = dead_space target_node = child if area < min_area: min_area = area if dead_space == min_dead_space: target_node = child depth += 1 return target_node
StarcoderdataPython
1924255
""" Multi-job exceptions """ from multi_job.utils.colours import fail from multi_job.utils.emojis import FIRE class PrettyException(Exception): def __init__(self, message): pretty_msg = f"\n{FIRE}{fail('Oh my!')}{FIRE}\n{message}" super().__init__(pretty_msg) class ParserValidationError(PrettyException): pass class ConfigNotGiven(PrettyException): pass class ArgumentMissing(PrettyException): pass class StepError(PrettyException): pass
StarcoderdataPython
4840873
<filename>src/sentry/api/serializers/models/deploy.py from __future__ import absolute_import from sentry.api.serializers import Serializer, register from sentry.models import Deploy, Environment @register(Deploy) class DeploySerializer(Serializer): def get_attrs(self, item_list, user, *args, **kwargs): environments = { e.id: e for e in Environment.objects.filter( id__in=[d.environment_id for d in item_list], ) } result = {} for item in item_list: result[item] = { 'environment': environments.get(item.environment_id), } return result def serialize(self, obj, attrs, user, *args, **kwargs): return { 'environment': attrs.get('environment'), 'dateStarted': obj.date_started, 'dateFinished': obj.date_finished, 'name': obj.name, 'url': obj.url, 'environment': getattr(attrs.get('environment'), 'name', None), }
StarcoderdataPython
11386646
import robin_stocks as r import os import datetime import time as t ''' This is an example script that will print out options data every 10 seconds for 1 minute. It also saves the data to a txt file. The txt file is saved in the same directory as this code. ''' #!!! Fill out username and password username = '' password = '' #!!! login = r.login(username,password) #!!! fill out the specific option information strike = 355 date = "2020-06-26" stock = "AAPL" optionType = "put" #or "put" #!!! # File saving variables minutesToTrack = 1 #in minutes PrintInterval = 10 #in seconds endTime = t.time() + 60 * minutesToTrack fileName = "options.txt" writeType = "w" #or enter "a" to have it continuously append every time script is run # #os.chdir(os.path.dirname(__file__)) path = os.getcwd() filename = os.path.join(path,fileName) fileStream = open(filename, mode=writeType) while t.time() < endTime: time = str(datetime.datetime.now()) #Both write and print the data so that you can view it as it runs. fileStream.write("\n") fileStream.write(time) print(time) #Get the data instrument_data = r.get_option_instrument_data(stock,date,strike,optionType) market_data = r.get_option_market_data(stock,date,strike,optionType) fileStream.write("\n") fileStream.write("{} Instrument Data {}".format("="*30,"="*30)) print("{} Instrument Data {}".format("="*30,"="*30)) # instrument_data is a dictionary, and the key/value pairs can be accessed with .items() for key, value in instrument_data.items(): fileStream.write("\n") fileStream.write("key: {:<25} value: {}".format(key,value)) print("key: {:<25} value: {}".format(key,value)) fileStream.write("\n") fileStream.write("{} Market Data {}".format("="*30,"="*30)) print("{} Market Data {}".format("="*30,"="*30)) for key, value in market_data[0].items(): fileStream.write("\n") fileStream.write("key: {:<25} value: {}".format(key,value)) print("key: {:<25} value: {}".format(key,value)) t.sleep(PrintInterval) # make sure to close the file stream when you are done with it. fileStream.close()
StarcoderdataPython
9744908
<reponame>brown170/fudge # <<BEGIN-copyright>> # Copyright 2021, Lawrence Livermore National Security, LLC. # See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: BSD-3-Clause # <<END-copyright>> """ This is the 'Properties of Particles' or PoPs package. It defines a set of Python classes for storing intrinsic particle properties like charge, mass, spin, etc. """
StarcoderdataPython
196526
import numpy as np import matplotlib.pyplot as plt import cv2 import os from scipy import ndimage as ndi from skimage.morphology import watershed from skimage.feature import peak_local_max from sklearn.cluster import MeanShift from PIL import Image size = 100, 100 img_names = ["../Images/Segmentation/strawberry.png", "../Images/Segmentation/shapes.png"] ext_names = ["../Images/Segmentation/coins.png", "../Images/Segmentation/two_halves.png"] output_path_extension = '../OutputImages/Segmentation/' images = [i for i in img_names] ext_images = [i for i in ext_names] def plot_three_images(figure_title, image1, label1, image2, label2, image3, label3, output_path): fig = plt.figure() fig.suptitle(figure_title) # Display the first image fig.add_subplot(1, 3, 1) plt.imshow(image1) plt.axis('off') plt.title(label1) # Display the second image fig.add_subplot(1, 3, 2) plt.imshow(image2) plt.axis('off') plt.title(label2) # Display the third image fig.add_subplot(1, 3, 3) plt.imshow(image3) plt.axis('off') plt.title(label3) plt.show() fig.savefig(output_path) for img_path in images: img = Image.open(img_path) img.thumbnail(size) # Convert the image to 100 x 100 # Convert the image to a numpy matrix img_mat = np.array(img)[:, :, :3] # --------------- Mean Shift algortithm --------------------- # Extract the three RGB colour channels b, g, r = cv2.split(img_mat) # Combine the three colour channels by flatten each channel # then stacking the flattened channels together. # This gives the "colour_samples" colour_samples = np.stack((b.flatten(), g.flatten(), r.flatten()), axis=1) # Perform Meanshift clustering ms_clf = MeanShift(bin_seeding=True) ms_labels = ms_clf.fit_predict(colour_samples) # Reshape ms_labels back to the original image shape for displaying the segmentation output ms_labels = np.reshape(ms_labels, b.shape) # ------------- Water Shed algortithm -------------------------- # Convert the image to gray scale and convert the image to a numpy matrix img_array = cv2.cvtColor(img_mat, cv2.COLOR_BGR2GRAY) # Calculate the distance transform distance = ndi.distance_transform_edt(img_array) # Generate the watershed markers local_maximum = peak_local_max(distance, indices=False, footprint=np.ones((3, 3))) markers = ndi.label(local_maximum)[0] # Perform watershed and store the labels ws_labels = watershed(-distance, markers, mask=img_array) # Display the results plot_three_images(img_path, img, "Original Image", ms_labels, "MeanShift Labels", ws_labels, "Watershed Labels", output_path_extension + os.path.split(img_path)[1]) # If you want to visualise the watershed distance markers then try # plotting the code below. # plot_three_images(img_path, img, "Original Image", -distance, "Watershed Distance", # ws_labels, "Watershed Labels", output_path_extension + os.path.split(img_path)[1])
StarcoderdataPython
6419326
<gh_stars>1-10 #! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 from django.core.exceptions import ValidationError from .models import Admin def validate_email_auth(value): email = value try: Admin.objects.get(email=email) except: raise ValidationError("The email is incorrect!")
StarcoderdataPython
4986738
from setuptools import setup, find_packages with open("README.md","r") as fh: long_description = fh.read() setup( name='EMeRGE', long_description=long_description, long_description_content_type="text/markdown", version='v1.5.1', description='Emerging technologies Management and Risk evaluation on distributions Grid Evolution', author='<NAME>', author_email='<EMAIL>', packages=find_packages("EMeRGE"), package_data={".//dssdashboard//assets":["*.css","*.png"]}, url="https://github.com/NREL/EMeRGE", keywords="Distribution System DER technologies management risk impact analysis", install_requires=["dash_html_components==1.0.2", "plotly==4.4.1", "dash_daq==0.3.3", "dash_table==4.6.0", "pyproj==1.9.6", "dash==1.9.0", "OpenDSSDirect.py==0.3.7", "pandas==0.24.2", "numpy==1.16.4", "matplotlib==3.1.0", "dash_core_components==1.8.0", "networkx==2.3"], package_dir={"": "EMeRGE"}, classifiers=[ "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3.7", "Operating System :: OS Independent" ] )
StarcoderdataPython
8011978
<reponame>Ronlin1/switch-case-in-python def Sunday(): return "Sunday" def Monday(): return "Monday" def Tuesday(): return "Tuesday" def Wednesday(): return "Wednesday" def Thursday(): return "Thursday" def Friday(): return "Friday" def Saturday(): return "Saturday" switcher = { 0: Sunday, 1: Monday, 2: Tuesday, 3: Wednesday, 4: Thursday, 5: Friday, 6: Saturday, } def switch(day0fWeek): return switcher.get(day0fWeek)() print(switch(6))
StarcoderdataPython
3523243
#total = 0 #for num in range(101): # total = total + num #print(total) print('My name is') i = 0 while i < 5: print('<NAME> Times (' + str(i) + ')') i = i + 1
StarcoderdataPython
1689685
import uvicorn if __name__ == "__main__": import sys, os sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))) # WARNING: You must pass the application as an import string to enable 'reload' or 'workers'. def develop(app): uvicorn.run('blogsley:app', host="0.0.0.0", port=8000, reload=True, log_level="info") if __name__ == "__main__": from blogsley.application import create_app app = create_app() develop(app)
StarcoderdataPython
8081143
<gh_stars>0 """ This script creates a test that fails when garage.tf.algos.TNPG performance is too low. """ from garage.baselines import LinearFeatureBaseline from garage.envs import normalize from garage.envs.box2d import CartpoleEnv import garage.misc.logger as logger from garage.tf.algos import TNPG from garage.tf.envs import TfEnv from garage.tf.policies import GaussianMLPPolicy from tests.fixtures import TfGraphTestCase class TestTNPG(TfGraphTestCase): def test_tnpg_cartpole(self): """Test TNPG with Cartpole environment.""" logger.reset() env = TfEnv(normalize(CartpoleEnv())) policy = GaussianMLPPolicy( name="policy", env_spec=env.spec, hidden_sizes=(32, 32)) baseline = LinearFeatureBaseline(env_spec=env.spec) algo = TNPG( env=env, policy=policy, baseline=baseline, batch_size=10000, max_path_length=100, n_itr=10, discount=0.99, optimizer_args=dict(reg_coeff=5e-2)) last_avg_ret = algo.train(sess=self.sess) assert last_avg_ret > 40
StarcoderdataPython
3438672
<reponame>obyrned1/ledGrid '''This is the test file''' import sys sys.path.append('.') from ledGrid.main import * def test_file_exists(): test_file = "http://claritytrec.ucd.ie/~alawlor/comp30670/input_assign3_d.txt" # give a test file for testing. Will return error if it's not a real link assert file_exists(test_file) != None # as per practical, assert and call the function in main with test file # test that the result isn't empty def test_string_convert(): test_file = "http://claritytrec.ucd.ie/~alawlor/comp30670/input_assign3_d.txt" test_string = file_exists(test_file) output = string_convert(test_string) assert output[0] == ('switch', '109', '360', '331', '987') def test_coordinates(): test_string = ('109','360') output = coordinates(test_string) assert output == [109,360] def test_within_grid(): '''test check if coordinates for a start and stop point that are outside the grid, convert to 0 or the size of grid if they are less or more than those figures''' grid = LightTester(1000) test_startstop = grid.within_grid([-123,2000]) assert test_startstop == ([0,999]) def test_turn_on(): '''test if area covered by given coordinates are converted to True''' test = LightTester(1000) #call the light tester class with N of size 1000 test.turn_on([0,0],[100,100]) # create a test area which has 101*101 lights on on = 0 # going to count the lights on in this area, set count to 0 for i in range (0, 1000): for j in range (0, 1000): #looping through each row and each columns in the grid if test.lights[i][j] == True: #if the test_grid function has returned true in one of these spaces, count it on += 1 assert on == 10201 # both counts should be = 101*101 = 10201 def test_turn_off(): '''test if area covered by given coordinates are converted to False''' #same as test above for turn_on, just opposite test = LightTester(1000) test.turn_on([0,0],[100,100]) off = 0 for i in range (0, 1000): for j in range (0, 1000): if test.lights[i][j] == True: off += 1 assert off == 10201 # both counts should be = 101*101 = 10201 def test_switch(): '''test if the switch function turns a true to a false and vice versa given cooridnates''' test = LightTester(1000) test.turn_on([0,0],[100,100]) # run the same test as the test_turn_on, then call the function switch and see if all Trues have been reversed to false # check by counting all false in the 999 x 999 square test.switch([0,0],[100,100]) switched = 0 for i in range (0, 1000): for j in range (0, 1000): if test.lights[i][j] == False: switched += 1 assert switched == 1000000 # 1000 * 1000 = 1000000 def test_light_count(): '''tests the light count function, runs turn_on function, then calls light_count to check if the counts are equal''' test = LightTester(1000) test.turn_on([0,0],[100,100]) assert test.light_count() == 10201
StarcoderdataPython
11352832
import os import platform import json from random import choice def clear(): """Clear terminal.""" if platform.system() == "Windows": os.system("cls") else: os.system("clear") def show_header(disp_width=79): """Show the program header.""" program_name = "QUARANTINE Movie Selector" author = "Gontz" version = "1.1" header = f"[{program_name} v{version} by {author}]" print(header.center(disp_width, "_")) print() def user_menu(users): """Show the main menu.""" prompt = "User number:" while True: clear() show_header() print("Select a user (type 'exit' to quit):\n") for num, user in users.items(): print(f"\t{num}. {user}") opt = input(f"\n{prompt} ") if opt in users.keys() or opt.lower() == "exit": break else: continue return opt def show_login(user, disp_width=79): """Show login info.""" login_info = f"Logged in as {user}." print(login_info.center(disp_width)) print() def database_menu(user): """Show database menu to the specified user""" prompt = "Option number:" while True: clear() show_header() show_login(user) print("Select an option (type 'exit' to log out):\n") print("\t1. Show your database") print("\t2. Add a movie to your database") print("\t3. Remove a movie from your database") print("\t4. Select a random movie from your database") opt = input(f"\n{prompt} ") if opt.lower() in ["1", "2", "3", "4", "exit"]: break else: continue return opt def show_database(user, disp_width=79): """Show a user's database""" clear() show_header() show_login(user) opt_info = "[SHOW DATABASE]" print(opt_info.center(disp_width, "-")) print() filename = "database_" + user.lower() + ".json" error_msg = "ERROR: There is no database file available." try: with open(filename) as f: database = json.load(f) except FileNotFoundError: print(error_msg) else: print("Movie list:\n") for movie in database["available"]: print(f"- {movie.title()}") film_count = len(database["available"]) print(f"\nNº of films: {film_count}") input("\nPress ENTER key to continue.") def add_movie(user, disp_width=79): """Add a movie to a user's database""" clear() show_header() show_login(user) opt_info = "[ADD MOVIE]" print(opt_info.center(disp_width, "-")) print() filename = "database_" + user.lower() + ".json" prompt = "Enter the title:" movie = input(f"{prompt} ") movie = movie.lower() try: with open(filename) as f: database = json.load(f) except FileNotFoundError: database = {"available": [movie], "removed": []} with open(filename, "w") as f: json.dump(database, f) print(f"\nMovie {repr(movie.title())} added succesfully.") else: if movie not in database["available"] and movie not in database["removed"]: database["available"].append(movie) database["available"].sort() with open(filename, "w") as f: json.dump(database, f) print(f"\nMovie {repr(movie.title())} added succesfully.") else: if movie in database["available"]: print(f"\nThe movie {repr(movie.title())} is already in your database.") if movie in database["removed"]: print(f"\nThe movie {repr(movie.title())} has already been watched or removed previously.") input("\nPress ENTER key to continue.") def rm_movie(movie, database, filename): """Remove a movie from a database in filename""" database["available"].remove(movie) database["removed"].append(movie) database["removed"].sort() with open(filename, "w") as f: json.dump(database, f) def remove_movie(user, disp_width=79): """Remove a movie from a user's database""" clear() show_header() show_login(user) opt_info = "[REMOVE MOVIE]" print(opt_info.center(disp_width, "-")) print() filename = "database_" + user.lower() + ".json" prompt = "Enter the title:" error_msg = "ERROR: There is no database file available." try: with open(filename) as f: database = json.load(f) except FileNotFoundError: print(error_msg) else: movie = input(f"{prompt} ") movie = movie.lower() if movie in database["available"]: rm_movie(movie, database, filename) print(f"\nMovie {repr(movie.title())} removed succesfully.") else: print(f"\nThe movie {repr(movie.title())} is not in your database.") input("\nPress ENTER key to continue.") def select_movie(user, disp_width=79): """Select a random movie from a user's database""" clear() show_header() show_login(user) opt_info = "[SELECT RANDOM MOVIE]" print(opt_info.center(disp_width, "-")) print() filename = "database_" + user.lower() + ".json" error_msg_1 = "ERROR: There is no database file available." error_msg_2 = "ERROR: There is no available movies in your database." try: with open(filename) as f: database = json.load(f) except FileNotFoundError: print(error_msg_1) else: if database["available"]: selection = choice(database["available"]) print(f"The selected movie is: {repr(selection.title())}") print("\nAre you going to watch it? (y/n): ") while True: answer = input() if answer.lower() in ["y", "n"]: break if answer == "y": print(f"\nI'm glad you liked my selection!\nThis movie will be removed from {user}'s database.") rm_movie(selection, database, filename) else: print(f"\nI'm sorry you didn't like my selection!\n{user}, try to add better movies to your database.") else: print(error_msg_2) input("\nPress ENTER key to continue.")
StarcoderdataPython
9605291
<filename>src/configs/configure.py # First import the library import pyrealsense2 as rs import time import json from src.Globals import constants import numpy as np class CameraHandler: __instance = None @staticmethod def get_instance(): """ Static access method. """ if CameraHandler.__instance is None: CameraHandler() return CameraHandler.__instance def __init__(self): self.load() self.pipeline = rs.pipeline() self.config = rs.config() self.config.enable_stream(rs.stream.depth, constants.WIDTH, constants.HEIGHT, rs.format.z16, constants.FPS) self.config.enable_stream(rs.stream.color, constants.WIDTH, constants.HEIGHT, rs.format.bgr8, constants.FPS) self.profile = self.pipeline.start(self.config) if CameraHandler.__instance is not None: raise Exception("This class is a singleton!") else: CameraHandler.__instance = self @staticmethod def find_device_that_supports_advanced_mode(): ctx = rs.context() devices = ctx.query_devices() for dev in devices: if dev.supports(rs.camera_info.product_id) and str(dev.get_info(rs.camera_info.product_id)) \ in constants.DS5_product_ids: return dev raise Exception("No device that supports advanced mode was found") def load(self): try: dev = self.find_device_that_supports_advanced_mode() advnc_mode = rs.rs400_advanced_mode(dev) print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled") # Loop until we successfully enable advanced mode while not advnc_mode.is_enabled(): print("Trying to enable advanced mode...") advnc_mode.toggle_advanced_mode(True) # At this point the device will disconnect and re-connect. print("Sleeping for 3 seconds...") time.sleep(3) # The 'dev' object will become invalid and we need to initialize it again dev = self.find_device_that_supports_advanced_mode() advnc_mode = rs.rs400_advanced_mode(dev) print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled") with open('src/configs/Hand.json') as f: json_dict = json.loads(f.read()) json_string = json.dumps(json_dict) advnc_mode.load_json(json_string) except Exception as e: print(e) pass def create_filters(self): spatial = rs.spatial_filter() spatial.set_option(rs.option.filter_magnitude, 5) spatial.set_option(rs.option.filter_smooth_alpha, 1) spatial.set_option(rs.option.filter_smooth_delta, 50) spatial.set_option(rs.option.holes_fill, 3) temporal = rs.temporal_filter() hole_filling = rs.hole_filling_filter() disparity_to_depth = rs.disparity_transform(False) depth_to_disparity = rs.disparity_transform(True) filters = {"S": spatial, "T": temporal, "H": hole_filling, "DZ": disparity_to_depth, "ZD": depth_to_disparity} return filters def fetch(self, pipeline): """To get next frame and align depth frame on RGB frame""" frames = pipeline.wait_for_frames() align = rs.align(rs.stream.depth) frames = align.process(frames) color_frame_preprocessing = frames.get_color_frame() depth_data = frames.get_depth_frame() # The commented lines are too remove unnecessary pixels in frames # depth_image = np.asanyarray(Depth_data.get_data()) color_frame = np.asanyarray(color_frame_preprocessing.get_data()) # grey_color = 153 # depth_image_3D = np.dstack((depth_image, depth_image, depth_image)) # bg_removed = np.where((depth_image_3D > constants.clipping_threshold) # | (depth_image_3D <= 0), grey_color, RGB_frame) return color_frame, depth_data def colorize_depth(self, depth_frame): """Generate the heat map""" colorizer = rs.colorizer() colorized_depth = np.asanyarray(colorizer.colorize(depth_frame).get_data()) return colorized_depth def post_processing(self, filters, depth_frame): """Ably different filters to decrease noisiness from frames""" depth_frame = filters["ZD"].process(depth_frame) depth_frame = filters["S"].process(depth_frame) depth_frame = filters["T"].process(depth_frame) depth_frame = filters["DZ"].process(depth_frame) return depth_frame def process_frames(self, filters): frame, depth = self.fetch(self.pipeline) depth = self.post_processing(filters, depth) colorized_depth = self.colorize_depth(depth) depth = np.asanyarray(depth.get_data()) return frame, depth, colorized_depth
StarcoderdataPython
12822839
""" .. _basic_plotting: Review of available plotting commands ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This example lists the different plotting commands available, shown with the arguments available. """ from ansys.dpf import core as dpf from ansys.dpf.core import examples # from ansys.dpf.core.plotter import plot_chart # Plot the bare mesh of a model model = dpf.Model(examples.multishells_rst) model.plot(color="w", show_edges=True, title='Model', text='Model plot') # # Additional PyVista kwargs are supported, such as: model.plot(off_screen=True, screenshot='model_plot.png', title='Model', text='Model plot') # Plot a field on its supporting mesh (field location must be Elemental or Nodal) stress = model.results.stress() stress.inputs.requested_location.connect("Nodal") fc = stress.outputs.fields_container() field = fc[0] field.plot(notebook=False, shell_layers=None, show_axes=True, title='Field', text='Field plot') # # Additional PyVista kwargs are supported, such as: field.plot(off_screen=True, screenshot='field_plot.png', title='Field', text='Field plot off') # # # Alternatively one can plot the MeshedRegion associated to the model mesh = model.metadata.meshed_region mesh.plot(field_or_fields_container=None, shell_layers=None, show_axes=True, title='Mesh fc None', text='Mesh plot') # Additional PyVista kwargs are supported, such as: mesh.plot(off_screen=True, screenshot='mesh_plot.png', title='Mesh', text='Mesh plot off') # A fields_container or a specific field can be given to plot on the mesh. mesh.plot(field_or_fields_container=fc, title='Mesh with fields container', text='Mesh fc plot') mesh.plot(field_or_fields_container=field, title='Mesh with field', text='Mesh field plot') # # One can also plot a MeshesContainer. Here our mesh is split by material. split_mesh_op = dpf.Operator("split_mesh") split_mesh_op.connect(7, mesh) split_mesh_op.connect(13, "mat") meshes_cont = split_mesh_op.get_output(0, dpf.types.meshes_container) meshes_cont.plot(title='Meshes Container', text='Meshes Container plot') # A fields_container can be given as input, with results on each part of our split mesh. disp_op = dpf.Operator("U") disp_op.connect(7, meshes_cont) ds = dpf.DataSources(examples.multishells_rst) disp_op.connect(4, ds) disp_fc = disp_op.outputs.fields_container() meshes_cont.plot(disp_fc, title='Meshes Container disp_fc', text='Meshes Container disp_fc plot') # Additional PyVista kwargs are supported, such as: meshes_cont.plot(off_screen=True, screenshot='meshes_cont_plot.png', title='Meshes Container', text='Meshes Container plot')
StarcoderdataPython
5049456
#!/usr/bin/python # -*- coding: utf-8 -*- ### # Copyright (2021) Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); # You may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ### from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = r""" --- module: get_snmpv3_users description: This module will get SNMPv3 users from the servers requirements: - "python >= 3.6" - "ansible >= 2.11" author: - "<NAME> (@Gayathirideviramasamy)" options: baseuri: description: - iLO IP address of the server type: str default: NONE required: true username: description: - Username of the server for authentication type: str default: NONE required: true password: description: - Password of the server for authentication type: str default: NONE required: true http_schema: description: - http or https Protocol type: str default: https required: false """ EXAMPLES = r""" - name: Get SNMPv3 users get_snmpv3_users: baseuri: "***.***.***" username: "abcxyz" password: "******" """ RETURN = r""" expected_result: description: SNMPv3 users from the server returned: List of SNMPv3 users present in the server type: list failure case 1: description: Redfish Package is not installed returned: Failed to import the required Python library (redfish) corrective_action: Install python3-redfish package type: str failure case 2: description: Incorrect/Unreachable server IP address(baseuri) is provided returned: RetriesExhaustedError corrective_action: Provide the correct IP address of the server type: str failure case 3: description: Credentials not valid returned: InvalidCredentialsError corrective_action: Validate the credentials type: str failure case 4: description: Getting managers data failed returned: GET on /redfish/v1/Managers/1/ Failed, Status <Status code>, Response <API response> corrective_action: Verify the response in the output message type: str failure case 5: description: Getting list of SNMPv3 users failed returned: GET on /redfish/v1/Managers/1/SnmpService/SNMPUsers/ Failed, Status <Status code>, Response <API response> corrective_action: Verify the response in the output message type: str failure case 6: description: Getting particular SNMPv3 user failed returned: GET on /redfish/v1/Managers/1/SnmpService/SNMPUsers/<SNMPv3 user ID>/ Failed, Status <Status code>, Response <API response> corrective_action: Verify the response in the output message type: str """ import json try: from redfish import redfish_client HAS_REDFISH = True except ImportError: HAS_REDFISH = False from ansible.module_utils.basic import AnsibleModule, missing_required_lib base_uri = "/redfish/v1/" manager_uri = "Managers/1/" def logout(redfishClient, module): redfishClient.logout() def error_msg(module, method, uri, status, response): # Print error message module.fail_json( msg="%s on %s Failed, Status: %s, Response: %s" % (str(method), str(uri), str(status), str(response)) ) def remove_odata(output): # Remove odata variables given in the list remove_list = ["@odata.context", "@odata.etag", "@odata.id", "@odata.type"] for key in remove_list: if key in output: output.pop(key) return output def get_snmpv3_users(redfishClient, module): # Get on Managers API snmpv3_users = [] uri = base_uri + manager_uri response = redfishClient.get(uri) if response.status != 200: error_msg(module, "GET", uri, response.status, response.text) snmp_res = redfishClient.get(uri + "SnmpService/SNMPUsers/") if snmp_res.status != 200: error_msg( module, "GET", uri + "SnmpService/SNMPUsers/", snmp_res.status, snmp_res.text, ) snmp_list = json.loads(snmp_res.text) for item in snmp_list["Members"]: item_rsp = redfishClient.get(item["@odata.id"]) if item_rsp.status != 200: error_msg(module, "GET", item["@odata.id"], item_rsp.status, item_rsp.text) # Remove odata details output = remove_odata(json.loads(item_rsp.text)) snmpv3_users.append(output) return snmpv3_users def main(): module = AnsibleModule( argument_spec=dict( baseuri=dict(required=True, type="str"), username=dict(required=True, type="str"), password=dict(required=True, type="str", no_log=True), http_schema=dict(required=False, default="https", type="str"), ) ) if not HAS_REDFISH: module.fail_json(msg=missing_required_lib("redfish")) baseuri = module.params["baseuri"] username = module.params["username"] password = module.params["password"] http_schema = module.params["http_schema"] base_url = "{0}://{1}".format(http_schema, baseuri) redfishClient = redfish_client( base_url=base_url, username=username, password=password ) redfishClient.login() snmpv3_users = get_snmpv3_users(redfishClient, module) logout(redfishClient, module) module.exit_json(changed=False, snmpv3_users=snmpv3_users) if __name__ == "__main__": main()
StarcoderdataPython
317833
<reponame>AhKiu69/hotpoor_autoclick_xhs from random import randrange title = 'asdfghjkl;' a = randrange(0 , (len(title)-1)) title = title.replace(title[a],'口红',1) print(a) print(title)
StarcoderdataPython
5081884
from datetime import datetime, timedelta import pytest from marshmallow import ValidationError from app.constants import STATUS_MAP from app.schemas import MessageSchema, UserSchema def test_user_empty_data(): try: UserSchema().load({}) except ValidationError as exc: assert exc.messages == {"name": ["Missing data for required field."]} def test_user_valid_data(): data = {"name": "marco", "email": "<EMAIL>", "phone": "123456789"} result = UserSchema().load(data) assert result == data @pytest.mark.parametrize( "email", ["a@", "aa.aa.aa", "@a", "aa@aa", "@aa.aa", "<EMAIL>"] ) def test_user_email_validation(email): data = {"name": "marco", "email": email} try: UserSchema().load(data) except ValidationError as exc: assert exc.messages == {"email": ["Not a valid email address."]} def test_message_empty_data(): try: MessageSchema().load({}) except ValidationError as exc: assert exc.messages == { "kind": ["Missing data for required field."], "scheduled": ["Missing data for required field."], "text": ["Missing data for required field."], "user_id": ["Missing data for required field."], } def test_message_load_auto_fill_created_with_utc_and_microsecond_zero(): data = { "scheduled": "2040-08-2T16:12:12Z", "text": "oijwqoid", "kind": "email", "status": "sended", "user_id": 1, } result = MessageSchema().load(data) expected_result = { "created": datetime.utcnow().replace(microsecond=0), "kind": 1, "scheduled": datetime(2040, 8, 2, 16, 12, 12), "status": 2, "text": "oijwqoid", "user_id": 1, } assert result == expected_result def test_message_auto_fill_status_field(): data = { "scheduled": "2030-08-2T16:12:12Z", "text": "oijwqoid", "kind": "email", "user_id": 1, } result = MessageSchema().load(data) assert result["status"] == STATUS_MAP["scheduled"] @pytest.mark.parametrize( "status_input, status_output", [("scheduled", 1), ("sended", 2)] ) def test_message_status_mapping_str_to_int_on_load(status_input, status_output): data = { "scheduled": "2040-08-2T16:12:12Z", "text": "oijwqoid", "kind": "email", "status": status_input, "user_id": 1, } result = MessageSchema().load(data) assert result["status"] == status_output @pytest.mark.parametrize( "status_input, status_output", [(1, "scheduled"), (2, "sended")] ) def test_message_status_mapping_int_to_str_on_dump(status_input, status_output): data = { "scheduled": datetime(2040, 8, 2, 16, 12, 12), "text": "oijwqoid", "kind": 1, "status": status_input, "user_id": 1, } result = MessageSchema().dump(data) assert result["status"] == status_output @pytest.mark.parametrize( "kind_input, kind_output", [("email", 1), ("sms", 2), ("push", 3), ("whatsapp", 4)], ) def test_message_kind_mapping_str_to_int_on_load(kind_input, kind_output): data = { "scheduled": "2040-08-2T16:12:12Z", "text": "oijwqoid", "kind": kind_input, "user_id": 1, } result = MessageSchema().load(data) assert result["kind"] == kind_output @pytest.mark.parametrize( "kind_input, kind_output", [(1, "email"), (2, "sms"), (3, "push"), (4, "whatsapp")], ) def test_message_kind_mapping_int_to_str_on_dump(kind_input, kind_output): data = { "scheduled": datetime(2040, 8, 2, 16, 12, 12), "text": "oijwqoid", "kind": kind_input, "status": 1, "user_id": 1, } result = MessageSchema().dump(data) assert result["kind"] == kind_output def test_message_options_fields_invalid_options(): data = { "scheduled": "2040-08-2T16:12:12Z", "text": "oijwqoid", "kind": "xxxx", "status": "xxxx", "user_id": 1, } try: MessageSchema().load(data) except ValidationError as exc: assert exc.messages == { "kind": ["Must be one of: email, sms, push, whatsapp."], "status": ["Must be one of: scheduled, sended."], } @pytest.mark.parametrize("minutes_offset", [10, 0]) def test_message_schedule_cannot_be_older_than_utcnow_or_equal_to_utcnow( minutes_offset, ): _scheduled = datetime.utcnow().replace(microsecond=0) - timedelta( minutes=minutes_offset ) scheduled = _scheduled.strftime("%Y-%m-%dT%H:%M:%SZ") data = { "scheduled": scheduled, "text": "oijwqoid", "kind": "sms", "user_id": 1, } try: MessageSchema().load(data) except ValidationError as exc: assert exc.messages == { "scheduled": ["Cannot scheduled messages into the past"], }
StarcoderdataPython
8134137
import scrapy import re import time import sys class CondosSpider(scrapy.Spider): name = "condos" def start_requests(self): # url = "https://www.livinginsider.com/searchword/Condo/Buysell/1/%E0%B8%A3%E0%B8%A7%E0%B8%A1%E0%B8%9B%E0%B8%A3%E0%B8%B0%E0%B8%81%E0%B8%B2%E0%B8%A8-%E0%B8%82%E0%B8%B2%E0%B8%A2-%E0%B8%84%E0%B8%AD%E0%B8%99%E0%B9%82%E0%B8%94.html" url = 'https://www.livinginsider.com/living_zone/13/Condo/Buysell/1/%E0%B8%A3%E0%B8%B1%E0%B8%8A%E0%B8%94%E0%B8%B2-%E0%B8%AB%E0%B9%89%E0%B8%A7%E0%B8%A2%E0%B8%82%E0%B8%A7%E0%B8%B2%E0%B8%87.html' yield scrapy.Request(url=url, callback=self.parse_listing_page) def parse_listing_page(self, response): print("Scraping: ", response.url) list_selector = ".istock-list:not(.sticky):not(.sticky-banner) > div.item-desc a::attr(href)" urls = response.css(list_selector).getall() for url in urls: yield scrapy.Request(url=url, callback=self.parse_data) next_page_url = "" paginates = response.css("ul.pagination > li > a") for anchor in paginates: anchor_text = anchor.css("*::text").get() if anchor_text.find("Next") != -1: next_page_url = anchor.css("*::attr(href)").get() if (next_page_url != ""): yield scrapy.Request(url=next_page_url, callback=self.parse_listing_page) def parse_data(self, response): print("Parsing data", response.url) thumbs_list = response.css( 'img.mbSlideDown::attr(src)').getall() if len(thumbs_list) < 1: print("Error no thumb list", response.url) sys.exit(1) # for idx, thumb in enumerate(thumbs_list): idx = 0 thumb = thumbs_list[idx] yield { "url": response.url, "description": response.css("#detail-topic h1.font-Sarabun::text").get().strip(), "price": response.css("#detail-topic p.price-detail::text").get().strip(), "area": re.sub("\<\/div\>", "", re.sub(r"^\<.+\<img.+\"\>", "", "".join(response.css("#property-inform>div.row>div:nth-last-child(2)").get().splitlines()))), "bedrooms": re.sub("\<\/div\>", "", re.sub(r"^\<.+\<img.+\"\>", "", "".join(response.css("#property-inform>div.row>div:nth-last-child(1)>div:nth-child(1)").get().splitlines()))), "restrooms": re.sub("\<\/div\>", "", re.sub(r"^\<.+\<img.+\"\>", "", "".join(response.css("#property-inform>div.row>div:nth-last-child(1)>div:nth-child(2)").get().splitlines()))), "floors": re.sub("\<\/div\>", "", re.sub(r"^\<.+\<img.+\"\>", "", "".join(response.css("#property-inform>div.row>div:nth-last-child(1)>div:nth-child(3)").get().splitlines()))), "images": thumb, "image_index": idx }
StarcoderdataPython
4901502
import json from collections import defaultdict import utils """ createTreeFromEdges constructs a directed graph structure from a set of directed edges and a set of vertices. The set of edges and vertices used must contain a single root vertex. """ def createTreeFromEdges(edges, vertices, group, project, sub_node_label, sub_node_id): nodes = {} forest = [] driver = utils.get_neo4j() with driver.session() as session: result = session.read_transaction(dependent_method_usage, group, project, sub_node_label, sub_node_id) node_usages = {} for record in result: node = record.get('node') object_to_return = {} object_to_return['label'] = list(getattr(node, '_labels'))[0] object_to_return['id'] = getattr(node, '_properties').get('id') object_to_return['usage'] = record.get("usage") object_to_return['project'] = getattr(record.get("proj"), '_properties').get('id') object_to_return['distinct_usage'] = record.get("usage_dist") object_to_return['properties'] = getattr(node, '_properties') object_to_return['name'] = "{}: {}".format( object_to_return.get('label'), object_to_return.get('id')) node_usages[object_to_return['id']] = object_to_return driver.session().close() for node_id in vertices.keys(): nodes[node_id] = { 'id': node_id, "name": getattr(vertices[node_id], '_properties').get('id'), "properties": getattr(vertices[node_id], '_properties'), "label": list(getattr(vertices[node_id], '_labels'))[0], "size": 1, "children": [] } id = nodes[node_id]["properties"]["id"] nodes[node_id]["id"] = getattr(vertices[node_id], '_properties').get('id') nodes[node_id]["name"] = nodes[node_id]["id"].split('.')[-1] nodes[node_id]["size"] = node_usages[id]["usage"] nodes[node_id]["value"] = node_usages[id]["usage"] nodes[node_id]["usage"] = node_usages[id]["usage"] nodes[node_id]["distinct_usage"] = node_usages[id]["distinct_usage"] nodes[node_id]["label"] = node_usages[id]["label"] nodes[node_id]["project"] = node_usages[id]["project"] forest.append(nodes[node_id]) # Must remove shortest paths to ensure there is no duplication. Java hiearchy naming can be used to ensure only direct children are linked. # The issue is that project names at the top level don't follow this pattern, so extra work must be done to identify which children are direct children of the # project level. roots = [] for i in edges: parent_id, child_id = i if nodes[parent_id]["project"] == nodes[parent_id]["id"]: roots.append(nodes[child_id]["id"]) tmp = roots.copy() for outer_root in tmp: roots = [root for root in roots if not root.startswith(outer_root) or root == outer_root] for i in edges: parent_id, child_id = i if nodes[parent_id]["project"] == nodes[parent_id]["id"] and nodes[child_id]["id"] in roots or nodes[parent_id]["id"] + '.' + nodes[child_id]["id"].split(".")[-1] == nodes[child_id]["id"]: node = nodes[child_id] parent = nodes[parent_id] parent['children'].append(node) if (node in forest): forest.remove(node) #forest is now a graph, with a single root vertex return forest """Returns the total and distinct usage of methods for all dependents for given group and project""" def dependent_method_usage(tx, group, project, sub_node_label, sub_node_id): match = ''' MATCH p = (proj:Project)-[r:Contains*0..]->(x)-[l:Contains*0..]->(i:Method)-[:Calls]->(m:Method)<-[:Contains*0..]-(y:{2} {{id: "{3}"}})<-[:Contains*0..]-(:Project {{id: "{0}/{1}"}}) RETURN proj as proj, x as node, count(distinct m) as usage_dist, count(m) as usage UNION MATCH p = (proj:Project)-[r:Contains*0..]->(i:Method)-[:Calls]->(m:Method)<-[:Contains*0..]-(y:{2} {{id: "{3}"}})<-[:Contains*0..]-(:Project {{id: "{0}/{1}"}}) RETURN proj as proj, i as node, count(distinct m) as usage_dist, count(m) as usage '''.format(group, project, sub_node_label, sub_node_id) print(match) return tx.run(match) """ ast_tree_dependent retrieves the complete AST tree of a specified dependent project, which is dependent on a specified node of a project under analysis """ def ast_tree_dependent_new(tx, group, project, sub_node_label, sub_node_id): if (sub_node_label != None and sub_node_id != None): match = ''' MATCH p = (proj:Project)-[r:Contains*]->(x) WHERE (x)-[:Calls]->(:Method)<-[:Contains*0..]-(:{} {{id: "{}"}})<-[:Contains*0..]-(:Project {{id: "{}/{}"}}) UNWIND nodes(p) AS Vertex WITH proj as proj, collect(DISTINCT Vertex) as nodes, collect(relationships(p)) as paths UNWIND paths AS Edges UNWIND Edges as Edge WITH proj as proj, nodes, [r in collect(distinct Edge) | [id(startNode(r)),id(endNode(r))]] as rels RETURN proj, size(nodes),size(rels),nodes,rels '''.format(sub_node_label, sub_node_id, group, project) print(match) else: match = ''' MATCH p = (proj:Project)-[r:Contains*0..]->(x)-[l:Contains*0..]->(i:Method)-[:Calls]->(:Method)<-[:Contains*0..]-(:Project {{id: "{}/{}"}}) WITH proj as proj, collect(DISTINCT x) as nodes, collect(DISTINCT i) as other_nodes, [r in collect(distinct last(r)) | [id(startNode(r)),id(endNode(r))]] as rels, [l in collect(distinct last(l)) | [id(startNode(l)),id(endNode(l))]] as other_rels RETURN proj, size(nodes),size(rels), size(other_nodes), size(other_rels), nodes, rels, other_nodes, other_rels '''.format(group, project) print(match) result = tx.run(match) dependents = [] for record in result: rels = [] nodes = {} rels = rels + [x for x in record.get("rels") if x not in rels] for node in record.get("nodes"): nodes[node.id] = node dependents.append(createTreeFromEdges(rels, nodes, group, project, sub_node_label, sub_node_id)[0]) return {"id": project, "name": project, "children": dependents, "size": 2, "value": 2} """ ast_tree_dependent retrieves the complete AST tree of a specified dependent project, which is dependent on a specified node of a project under analysis """ def ast_tree_dependent(tx, group, project, dependent_group, dependent_project, sub_node_label, sub_node_id): if (sub_node_label != None and sub_node_id != None): match = ''' MATCH p = (:Project {{id: "{}/{}"}})-[r:Contains*0..]->(x)-[l:Contains*0..]->(i:Method)-[:Calls]->(:Method)<-[:Contains*0..]-(y:{} {{id: "{}"}})<-[:Contains*0..]-(:Project {{id: "{}/{}"}}) WITH collect(DISTINCT x) as nodes, collect(DISTINCT i) as other_nodes, [r in collect(distinct last(r)) | [id(startNode(r)),id(endNode(r))]] as rels, [l in collect(distinct last(l)) | [id(startNode(l)),id(endNode(l))]] as other_rels RETURN size(nodes),size(rels), size(other_nodes), size(other_rels), nodes, rels, other_nodes, other_rels UNION MATCH p = (:Project {{id: "{}/{}"}})-[r:Contains*0..]->(x)-[l:Contains*0..]->(i:Method)-[:Calls]->(y:{} {{id: "{}"}})<-[:Contains*0..]-(:Project {{id: "{}/{}"}}) WITH collect(DISTINCT x) as nodes, collect(DISTINCT i) as other_nodes, [r in collect(distinct last(r)) | [id(startNode(r)),id(endNode(r))]] as rels, [l in collect(distinct last(l)) | [id(startNode(l)),id(endNode(l))]] as other_rels RETURN size(nodes),size(rels), size(other_nodes), size(other_rels), nodes, rels, other_nodes, other_rels '''.format(dependent_group, dependent_project, sub_node_label, sub_node_id, group, project, dependent_group, dependent_project, sub_node_label, sub_node_id, group, project) print(match) result = tx.run(match) else: match = ''' MATCH p = (:Project {{id: "{}/{}"}})-[r:Contains*0..]->(x)-[l:Contains*0..]->(i:Method)-[:Calls]->(:Method)<-[:Contains*0..]-(:Project {{id: "{}/{}"}}) WITH collect(DISTINCT x) as nodes, collect(DISTINCT i) as other_nodes, [r in collect(distinct last(r)) | [id(startNode(r)),id(endNode(r))]] as rels, [l in collect(distinct last(l)) | [id(startNode(l)),id(endNode(l))]] as other_rels RETURN size(nodes),size(rels), size(other_nodes), size(other_rels), nodes, rels, other_nodes, other_rels '''.format(dependent_group, dependent_project, group, project) print(match) result = tx.run(match) to_return = [] rels = [] nodes = {} for record in result: rels = rels + [x for x in record.get("rels") if x not in rels] rels = rels + [x for x in record.get("other_rels") if x not in rels] for node in record.get("nodes"): nodes[node.id] = node for node in record.get("other_nodes"): nodes[node.id] = node return createTreeFromEdges(rels, nodes) # return to_return # retrieves dependent projects of the specific node def dependents_from_node(tx, group, project, node_label, node_id): if (node_label != None and node_id != None): match = ''' MATCH (:Project {{ id: '{}/{}' }})-[:Contains*]->(:{} {{id: '{}'}})-[:Contains*]->(m:Method)<-[:Calls]-(:Method)<-[:Contains*]-(d:Project) RETURN d, COUNT(DISTINCT m) as v UNION MATCH (:Project {{ id: '{}/{}' }})-[:Contains*]->(m:{} {{id: '{}'}})<-[:Calls]-(:Method)<-[:Contains*]-(d:Project) RETURN d, COUNT(DISTINCT m) as v '''.format(group, project, node_label, node_id, group, project, node_label, node_id) print(match) result = tx.run(match) else: match = ''' MATCH (:Project {{ id: '{}/{}' }})-[:Contains*]->(m:Method)<-[:Calls]-(:Method)<-[:Contains*]-(d:Project) RETURN d, COUNT(DISTINCT m) as v '''.format(group, project) print(match) result = tx.run(match) to_return = [] for record in result: node = record.get('d') object_to_return = {} object_to_return['label'] = list(getattr(node, '_labels'))[0] object_to_return['id'] = getattr(node, '_properties').get('id') object_to_return['value'] = record.get("v") object_to_return['properties'] = getattr(node, '_properties') object_to_return['name'] = "{}: {}".format(object_to_return.get('label'), object_to_return.get('id')) to_return.append(object_to_return) return to_return def project_hierarchy(tx, group, project): """Returns the project hierarchy, excluding methods that are not used by any other projects. Useful for retrieving a filtered down representation of the analysed project that is relevent to dependent analysis.""" dependent_project_string = "" match = ''' MATCH (p:Project {{ id: '{}/{}' }})-[:Contains*]->(c)-[:Contains*]->(:Method)<-[:Calls]-(:Method)<-[:Contains*]-(d:Project {}) RETURN c, COUNT(DISTINCT d) as v, COUNT(d) as w UNION MATCH (p:Project {{ id: '{}/{}' }})-[:Contains*]->(c:Method)<-[:Calls]-(:Method)<-[:Contains*]-(d:Project {}) RETURN c, COUNT(DISTINCT d) as v, COUNT(d) as w '''.format(group, project, dependent_project_string, group, project, dependent_project_string) print(match) result = tx.run(match) tmp = [] for node in result.graph().nodes: print(node) print(node) print(result.graph()) print(result.graph().nodes) print(result.graph().relationships) for record in result: print(record) node = record.get('c') object_to_return = {} object_to_return['label'] = list(getattr(node, '_labels'))[0] object_to_return['id'] = getattr(node, '_properties').get('id') object_to_return['value'] = record.get("w") object_to_return['size'] = record.get("w") object_to_return['dependent_projects'] = record.get("v") object_to_return['properties'] = getattr(node, '_properties') object_to_return['name'] = "{}: {}".format( object_to_return.get('label'), object_to_return.get('id')) object_to_return['children'] = [] object_to_return[ 'retrieve_children_url'] = "{}/project/{}/{}/retrieve/children?label={}&id={}".format( utils.get_domain(), group, project, object_to_return.get('label'), object_to_return.get('id')) tmp.append(object_to_return) payload = {} current_layer = payload print(json.dumps(tmp)) for obj in tmp: found = False while found: if obj["id"].startswith(current_layer["name"]): current_layer["children"].append({ "id": obj["id"], "value": obj["value"], "size": obj["size"], "label": obj["label"], "children": [], "dependent_projects": obj["dependent_projects"] }) to_return = tmp.copy() # Query returns relevant artifacts in the project, but contains no inherent # hierarchical data. Construct this hierarchical data structure from the # artifact's name. output = {} for data in to_return: if not output.get("id", None): output = { "id": data["id"], "name": data["id"].split('.')[-1], "value": data["value"], "size": data["size"], "label": data["label"], "children": [], "dependent_projects": data["dependent_projects"] } else: print("did we even get here") not_placed = True current_layer = output while not_placed: for child in current_layer["children"]: print(child) if data["id"].startswith(child["id"]): current_layer = child break else: current_layer["children"].append({ "id": data["id"], "name": data["id"].split('.')[-1], "value": data["value"], "size": data["size"], "label": data["label"], "children": [], "dependent_projects": data["dependent_projects"] }) not_placed = False return output def all_project_dependencies(tx, group, project): """Return """ match = ''' MATCH (proj:Project)-[:Contains*]->(i:Method)-[:Calls*0..]->(m:Method)<-[:Contains*]-(:Project {{id: "{}/{}"}}) RETURN m, i, proj '''.format(group, project) print(match) result = tx.run(match) results_list = [] for record in result: callee = record.get('m') caller = record.get('i') caller_project = record.get('proj'), results_list.append([{ "id": getattr(callee, '_properties').get('id'), "name": getattr(callee, '_properties').get('name'), "project": "{}/{}".format(group, project), }, { "id": getattr(caller, '_properties').get('id'), "name": getattr(caller, '_properties').get('name'), "project": getattr(caller_project[0], '_properties').get('id'), }]) output = defaultdict(lambda: defaultdict()) for pair in results_list: output[pair[0]['id']]['name'] = 'root.' + pair[0]['id'] output[pair[0]['id']]['project'] = pair[0]['project'] output[pair[0]['id']]['size'] = 1 output[pair[0]['id']]['imports'] = [] output[pair[1]['id']]['name'] = 'root.' + pair[1]['id'] output[pair[1]['id']]['project'] = pair[1]['project'] output[pair[1]['id']]['size'] = 1 if 'root.' + pair[0]['id'] not in output[pair[1]['id']].get( 'imports', []): if not output[pair[1]['id']].get('imports', None): output[pair[1]['id']]['imports'] = [] output[pair[1]['id']]['imports'].append( 'root.' + pair[0]['id']), return list(output.values()) def contains_from_node_legacy(tx, group, project, node_label, node_id, dependent_project_group, dependent_project_repo): if (dependent_project_group != None and dependent_project_repo != None): dependent_project_string = "{{ id: '{}/{}' }}".format(dependent_project_group, dependent_project_repo) else: dependent_project_string = "" if (node_label != None and node_id != None): match = ''' MATCH (p:Project {{ id: '{}/{}' }})-[:Contains*]->(:{} {{id: '{}'}})-[:Contains]->(c)-[:Contains*]->(:Method)<-[:Calls]-(:Method)<-[:Contains*]-(d:Project {}) RETURN c, COUNT(DISTINCT d) as v UNION MATCH (p:Project {{ id: '{}/{}' }})-[:Contains*]->(:{} {{id: '{}'}})-[:Contains]->(c:Method)<-[:Calls]-(:Method)<-[:Contains*]-(d:Project {}) RETURN c, COUNT(DISTINCT d) as v '''.format(group, project, node_label, node_id, dependent_project_string, group, project, node_label, node_id, dependent_project_string) print(match) result = tx.run(match) else: match = ''' MATCH (p:Project {{ id: '{}/{}' }})-[:Contains]->(c)-[:Contains*]->(:Method)<-[:Calls]-(:Method)<-[:Contains*]-(d:Project {}) RETURN c, COUNT(DISTINCT d) as v UNION MATCH (p:Project {{ id: '{}/{}' }})-[:Contains]->(c:Method)<-[:Calls]-(:Method)<-[:Contains*]-(d:Project {}) RETURN c, COUNT(DISTINCT d) as v '''.format(group, project, dependent_project_string, group, project, dependent_project_string) print(match) result = tx.run(match) tmp = [] for record in result: node = record.get('c') object_to_return = {} object_to_return['label'] = list(getattr(node, '_labels'))[0] object_to_return['id'] = getattr(node, '_properties').get('id') object_to_return['value'] = record.get("v") object_to_return['properties'] = getattr(node, '_properties') object_to_return['name'] = "{}: {}".format(object_to_return.get('label'), object_to_return.get('id')) object_to_return['children'] = [] object_to_return['retrieve_children_url'] = "{}/project/{}/{}/retrieve/children?label={}&id={}".format(utils.get_domain(), group, project, object_to_return.get('label'), object_to_return.get('id')) if (dependent_project_group != None and dependent_project_repo != None): object_to_return['retrieve_children_url'] = "{}&dependent_group={}&dependent_repo={}".format(object_to_return['retrieve_children_url'], dependent_project_group, dependent_project_repo) tmp.append(object_to_return) to_return = tmp.copy() for this_record in tmp: for other_record in tmp: if (other_record.get('id') != this_record.get('id')): # If this record is a child of another record if (this_record.get('id').startswith(other_record.get('id'))): print(this_record.get('id') + "is child of " + other_record.get('id')) # remove this record try: to_return.remove(this_record) except: pass return to_return def is_package_parsed(tx, groupId, artifactId): result = tx.run("MATCH (a:ArtifactAttribute {{id: '{}.{}.parsed', name: 'parsed'}}) RETURN a.value".format(groupId, artifactId)) if (result == None): return False single = result.single() if (single == None): return False if (single[0] == "True"): return True return False def project_exists(tx, group, project): result = tx.run("MATCH (p:Project {{id:'{}/{}'}}) RETURN p".format(group, project)) if (result == None): return False single = result.single() if (single == None): return False return True def artifact_exists(tx, groupId, artifactId): result = tx.run("MATCH (a:Artifact {{id:'{}.{}'}}) RETURN a".format(groupId, artifactId)) if (result == None): return False single = result.single() if (single == None): return False return True def get_project_packages(tx, group, repo): query_string = "MATCH (p:Project {{id:'{}/{}'}})-[:Contains]->(a:Artifact) RETURN a.group, a.artifact".format(group, repo) print (query_string) to_return = [] result = tx.run(query_string) for record in result: print (record) to_return.append({'group': record.get('a.group'), 'artifact': record.get('a.artifact')}) return to_return def get_project_dependents(tx, groupId, artifactId): query_string_parsed = "MATCH (p:Project {{id:'{}/{}'}})-[:Contains]->(:Artifact)<-[:Depends]-(:Artifact)<-[:Contains]-(d:Project)-[:Contains]->(:Artifact)-[:Attribute]-(a:ArtifactAttribute {{name:'total_count'}}) RETURN DISTINCT d.id, SUM(DISTINCT toInteger(a.value)) as v".format(groupId, artifactId) query_string_unparsed = "MATCH (p:Project {{id:'{}/{}'}})-[:Contains]->(:Artifact)<-[:Depends]-(:Artifact)<-[:Contains]-(d:Project) RETURN DISTINCT d.id".format(groupId, artifactId) to_return = [] size = 0 result_parsed = tx.run(query_string_parsed) result_unparsed = tx.run(query_string_unparsed) for record in result_parsed: print (record) to_return.append({'github_short_url': record.get('d.id'), 'dependents_count': record.get('v')}) size = size + 1 for record in result_unparsed: print (record) to_return.append({'github_short_url': record.get('d.id'), 'dependents_count': "Unknown"}) size = size + 1 return {'count': size, 'projects': to_return} def get_project_dependencies(tx, groupId, repoId): query_string = "MATCH (p:Project {{id:'{}/{}'}})-[:Contains]->(:Artifact)-[:Depends]->(d:Artifact) RETURN d.group, d.artifact".format(groupId, repoId) print (query_string) to_return = [] size = 0 result = tx.run(query_string) for record in result: print (record) to_return.append({'group': record.get('d.group'), 'artifact': record.get('d.artifact')}) size = size + 1 return {'count': size, 'artifacts': to_return} def get_project_dependents_total_cached(tx, groupId, artifactId): query_string = "MATCH (p:Project {{id:'{}/{}'}})-[:Contains]->(:Artifact)-[:Attribute]-(d:ArtifactAttribute {{name:'total_count'}}) RETURN SUM(toInteger(d.value))".format(groupId, artifactId) print (query_string) result = tx.run(query_string) if (result == None): return 0 single = result.single() if (single == None): return 0 return single[0] def get_artifact_dependents(tx, groupId, artifactId): query_string = "MATCH (a:Artifact {{id:'{}.{}'}})<-[:Depends]-(d:Artifact) RETURN d.group, d.artifact".format(groupId, artifactId) print (query_string) to_return = [] size = 0 result = tx.run(query_string) for record in result: print (record) to_return.append({'group': record.get('d.group'), 'artifact': record.get('d.artifact')}) size = size + 1 return {'count': size, 'artifacts': to_return} def get_artifact_dependents_count(tx, groupId, artifactId): query_string = "MATCH (a:Artifact {{id:'{}.{}'}})<-[:Depends]-(d:Artifact) RETURN COUNT (DISTINCT d)".format(groupId, artifactId) print (query_string) result = tx.run(query_string) if (result == None): return 0 single = result.single() if (single == None): return 0 return single[0] def get_transitive_artifact_dependents(tx, groupId, artifactId): query_string = "MATCH (a:Artifact {{id:'{}.{}'}})<-[:Depends*2]-(d:Artifact) RETURN d.group, d.artifact".format(groupId, artifactId) print (query_string) to_return = [] size = 0 result = tx.run(query_string) for record in result: print (record) to_return.append({'group': record.get('d.group'), 'artifact': record.get('d.artifact')}) size = size + 1 return {'count': size, 'artifacts': to_return} def get_artifact_dependents_total_cached(tx, groupId, artifactId): query_string = "MATCH (a:Artifact {{id:'{}.{}'}})-[:Attribute]-(d:ArtifactAttribute {{name:'total_count'}}) RETURN d.value".format(groupId, artifactId) result = tx.run(query_string) if (result == None): return 0 single = result.single() if (single == None): return 0 return single[0] def retrieve_project_attribute_value(tx, github_short_url, attribute_name): result = tx.run("MATCH (a:ProjectAttribute {{id: '{}.{}'}}) RETURN a.value AS value;".format(github_short_url, attribute_name)) if (result == None): return None single = result.single() if (single == None): return None return single[0] def retrieve_artifact_attribute_value(tx, groupId, artifactId, attribute_name): result = tx.run("MATCH (a:ArtifactAttribute {{id: '{}.{}.{}'}}) RETURN a.value AS value;".format(groupId, artifactId, attribute_name)) if (result == None): return None single = result.single() if (single == None): return None return single[0]
StarcoderdataPython
11318895
<gh_stars>1-10 import os import jinja2 from docx.shared import Mm from docxtpl import DocxTemplate, InlineImage from app.utils.docx.report_utils import li_multiple_plot, get_result, get_multiple_iback, sand_area_contraction from config import Config def set_sand_docxtpl(dict_data, location=''): ites = dict_data['tests']['samples'] path_in = './app/static/video/' path_out = '../static/video/' print(dict_data) print(dict_data['experiment']) print('\n') print(dict_data['tests']) print('dict_dat') if location == '' or location == 'index' or location is None or location == ' ': video_names = [] for dirpath, dirnames, filenames in os.walk(path_in): for filename in filenames: # dir_file_name = os.path.join(dirpath, filename) dir_file_name = filename if os.path.splitext(dir_file_name)[1] == '.mp4': # (('./app/static/movie', '.mp4')) print(dir_file_name) video_names.append(path_out + dir_file_name) length = len(video_names) else: ips = [] document_path = Config.SAVE_DOCUMENT_PATH with open(document_path + "ipcConfig.txt", "r+") as f: a = f.readlines() for i in a: ips.append(i) length = len(ips) print("一共有多少个数据") doc = DocxTemplate("tpl.docx") imge_file_location = Config.UPLOAD_IMAGE_PATH document_file_location = Config.SAVE_DOCUMENT_PATH multiplt_lines = li_multiple_plot(length, document_file_location) results_frame = get_result(ites, imge_file_location) name_list = ['v_vx', 'v_vy', 'scale_vx', 'scale_vy', 'density_vx', 'density_vy', 'viscosity_vx', 'viscosity_vy'] results_done = run_name(name_list, doc, results_frame) li_result = get_multiple_iback(length) i = 0 for item in li_result: print(item['area']['areas']) item['area_plt'] = sand_area_contraction('曲线各部分面积对比#' + str(i), '面积(m^2)', imge_file_location, item['area']['areas']) item['height_plt'] = sand_area_contraction('各部分高度对比#' + str(i), '高度(m)', imge_file_location, item['height']['heights']) i += 1 item['area_plt'] = InlineImage(doc, item['area_plt'], Mm(70)) item['height_plt'] = InlineImage(doc, item['height_plt'], Mm(70)) context = { 'device': dict_data['device'], 'experiment': dict_data['experiment'], 'tests': dict_data['tests'], 'line_relations': results_frame, 'multiple_lines': InlineImage(doc, multiplt_lines, Mm(100)), 'contrast': li_result, 'li_test': [0, 1, 2, 3, 6] } jinja_env = jinja2.Environment(autoescape=True) doc.render(context) file_location = dict_data['experiment']['file_location'] print("file location===>" + file_location) if (os.path.exists(file_location)): print("rush") doc.save(file_location + "/generated_doc.docx") else: print("cant") doc.save("generated_doc.docx") doc.save(document_file_location + "generated_doc.docx") def run_name(name_list, tpl, results_frame): for name in name_list: results_frame[name]['a'] = round(results_frame[name]['a'][0][0], 2) results_frame[name]['b'] = round(results_frame[name]['b'][0], 2) results_frame[name]['file_name'] = InlineImage(tpl, results_frame[name]['file_name'], Mm(100)) return results_frame
StarcoderdataPython
1959279
<reponame>aditya-prasad-projects/Deep-Semantic-Code-Search import ast import sys class ASTVisitor(ast.NodeVisitor): def __init__(self): self.api_seq = [] def visit(self, node): if node is None: return return super().visit(node) def visit_Assign(self, node): for target in node.targets: self.visit(target) self.visit(node.value) # def visit_AugAssign(self, node): # set_precedence(node, node.value, node.target) # self.statement(node, node.target, get_op_symbol(node.op, ' %s= '), # node.value) # # def visit_AnnAssign(self, node): # set_precedence(node, node.target, node.annotation) # set_precedence(Precedence.Comma, node.value) # need_parens = isinstance(node.target, ast.Name) and not node.simple # begin = '(' if need_parens else '' # end = ')' if need_parens else '' # self.statement(node, begin, node.target, end, ': ', node.annotation) # self.conditional_write(' = ', node.value) # def visit_Expr(self, node): # self.visit(node.value) # # def visit_FunctionDef(self, node, is_async=False): # self.append(node.name) # if isinstance(node.args, list): # for arg in node.args: # self.visit(arg) # else: # self.visit(node.args) # for n in node.body: # self.visit(n) # self.visit(node.returns) # # # introduced in Python 3.5 # def visit_AsyncFunctionDef(self, node): # self.visit_FunctionDef(node, is_async=True) # def visit_If(self, node): self.append("if") self.visit(node.test) for body in node.body: self.visit(body) self.append("else") for orelse in node.orelse: self.visit(orelse) def visit_For(self, node, is_async=False): if is_async: self.append("async") self.append("for") self.visit(node.target) self.visit(node.iter) if node.orelse: self.append("else") for orelse in node.orelse: self.visit(orelse) def visit_While(self, node): self.append("while") self.visit(node.test) for body in node.body: self.visit(body) if node.orelse: self.append("else") for orelse in node.orelse: self.visit(orelse) # # def visit_With(self, node, is_async=False): # if is_async: # self.append("async") # self.append("with") # for item in node.items: # self.visit(item) # self.visit(node.body) # # # new for Python 3.5 # def visit_AsyncWith(self, node): # self.visit_With(node, is_async=True) # # # new for Python 3.3 # def visit_withitem(self, node): # self.visit(node.context_expr) # self.visit(node.optional_vars) def visit_NameConstant(self, node): self.append(node.value) def visit_Pass(self, node): self.append('pass') # def visit_Print(self, node): # # XXX: python 2.6 only # self.append("print") # for value in node.values: # self.visit(value) # # def visit_Delete(self, node): # self.append('del') # for target in node.targets: # self.visit(target) # # def visit_TryExcept(self, node): # self.visit_Try(node) # self.visit(node.orelse) # # # new for Python 3.3 # def visit_Try(self, node): # self.append("try") # self.visit(node.body) # for handler in node.handlers: # self.visit(handler) # # def visit_ExceptHandler(self, node): # self.append("except") # self.visit(node.type) # self.visit(node.name) # self.visit(node.body) # # def visit_TryFinally(self, node): # self.visit_Try(node) # self.append("finally") # self.visit(node.finalbody) # def visit_Exec(self, node): # dicts = node.globals, node.locals # dicts = dicts[::-1] if dicts[0] is None else dicts # self.statement(node, 'exec ', node.body) # self.conditional_write(' in ', dicts[0]) # self.conditional_write(', ', dicts[1]) # def visit_Assert(self, node): # set_precedence(node, node.test, node.msg) # self.statement(node, 'assert ', node.test) # self.conditional_write(', ', node.msg) # def visit_Global(self, node): # self.statement(node, 'global ', ', '.join(node.names)) # # def visit_Nonlocal(self, node): # self.statement(node, 'nonlocal ', ', '.join(node.names)) def visit_Return(self, node): self.append("return") self.visit(node.value) def visit_Break(self, node): self.append("break") def visit_Continue(self, node): self.append("continue") def visit_Raise(self, node): # XXX: Python 2.6 / 3.0 compatibility self.append("raise") self.visit(node.exc) # Expressions def visit_Attribute(self, node): self.visit(node.value) self.append(node.attr) # def visit_Call(self, node, len=len): # self.visit(node.func) # args = node.args # for arg in args: # self.visit(arg) # keywords = node.keywords # for keyword in keywords: # # a keyword.arg of None indicates dictionary unpacking # # (Python >= 3.5) # arg = keyword.arg or '' # self.append(arg) # self.visit(keyword.value) def visit_Name(self, node): self.append(node.id) # def visit_JoinedStr(self, node): # for value in node.values: # self.visit(value) def visit_Str(self, node, is_joined=False): self.append(node.s) def visit_Bytes(self, node): self.append(repr(node.s)) def visit_Num(self, node, # constants new=sys.version_info >= (3, 0)): self.append(node.n) # def visit_Tuple(self, node): # with self.delimit(node) as delimiters: # # Two things are special about tuples: # # 1) We cannot discard the enclosing parentheses if empty # # 2) We need the trailing comma if only one item # elts = node.elts # delimiters.discard = delimiters.discard and elts # self.comma_list(elts, len(elts) == 1) # # def visit_List(self, node): # with self.delimit('[]'): # self.comma_list(node.elts) # # def visit_Set(self, node): # if node.elts: # with self.delimit('{}'): # self.comma_list(node.elts) # else: # # If we tried to use "{}" to represent an empty set, it would be # # interpreted as an empty dictionary. We can't use "set()" either # # because the name "set" might be rebound. # self.write('{1}.__class__()') # # def visit_Dict(self, node): # set_precedence(Precedence.Comma, *node.values) # with self.delimit('{}'): # for idx, (key, value) in enumerate(zip(node.keys, node.values)): # self.write(', ' if idx else '', # key if key else '', # ': ' if key else '**', value) # # def visit_BinOp(self, node): # op, left, right = node.op, node.left, node.right # with self.delimit(node, op) as delimiters: # ispow = isinstance(op, ast.Pow) # p = delimiters.p # set_precedence((Precedence.Pow + 1) if ispow else p, left) # set_precedence(Precedence.PowRHS if ispow else (p + 1), right) # self.write(left, get_op_symbol(op, ' %s '), right) # # def visit_BoolOp(self, node): # with self.delimit(node, node.op) as delimiters: # op = get_op_symbol(node.op, ' %s ') # set_precedence(delimiters.p + 1, *node.values) # for idx, value in enumerate(node.values): # self.write(idx and op or '', value) # # def visit_Compare(self, node): # with self.delimit(node, node.ops[0]) as delimiters: # set_precedence(delimiters.p + 1, node.left, *node.comparators) # self.visit(node.left) # for op, right in zip(node.ops, node.comparators): # self.write(get_op_symbol(op, ' %s '), right) # # def visit_UnaryOp(self, node): # with self.delimit(node, node.op) as delimiters: # set_precedence(delimiters.p, node.operand) # # In Python 2.x, a unary negative of a literal # # number is merged into the number itself. This # # bit of ugliness means it is useful to know # # what the parent operation was... # node.operand._p_op = node.op # sym = get_op_symbol(node.op) # self.write(sym, ' ' if sym.isalpha() else '', node.operand) # # def visit_Subscript(self, node): # set_precedence(node, node.slice) # self.write(node.value, '[', node.slice, ']') # # def visit_Slice(self, node): # set_precedence(node, node.lower, node.upper, node.step) # self.conditional_write(node.lower) # self.write(':') # self.conditional_write(node.upper) # if node.step is not None: # self.write(':') # if not (isinstance(node.step, ast.Name) and # node.step.id == 'None'): # self.visit(node.step) # # def visit_Index(self, node): # with self.delimit(node) as delimiters: # set_precedence(delimiters.p, node.value) # self.visit(node.value) # # def visit_ExtSlice(self, node): # dims = node.dims # set_precedence(node, *dims) # self.comma_list(dims, len(dims) == 1) # # def visit_Yield(self, node): # with self.delimit(node): # set_precedence(get_op_precedence(node) + 1, node.value) # self.write('yield') # self.conditional_write(' ', node.value) # # # new for Python 3.3 # def visit_YieldFrom(self, node): # with self.delimit(node): # self.write('yield from ', node.value) # # # new for Python 3.5 # def visit_Await(self, node): # with self.delimit(node): # self.write('await ', node.value) # # def visit_Lambda(self, node): # with self.delimit(node) as delimiters: # set_precedence(delimiters.p, node.body) # self.write('lambda ') # self.visit_arguments(node.args) # self.write(': ', node.body) # # def visit_Ellipsis(self, node): # self.write('...') # # def visit_ListComp(self, node): # with self.delimit('[]'): # self.write(node.elt, *node.generators) # # def visit_GeneratorExp(self, node): # with self.delimit(node) as delimiters: # if delimiters.pp == Precedence.call_one_arg: # delimiters.discard = True # set_precedence(Precedence.Comma, node.elt) # self.write(node.elt, *node.generators) # # def visit_SetComp(self, node): # with self.delimit('{}'): # self.write(node.elt, *node.generators) # # def visit_DictComp(self, node): # with self.delimit('{}'): # self.write(node.key, ': ', node.value, *node.generators) # # def visit_IfExp(self, node): # with self.delimit(node) as delimiters: # set_precedence(delimiters.p + 1, node.body, node.test) # set_precedence(delimiters.p, node.orelse) # self.write(node.body, ' if ', node.test, ' else ', node.orelse) # # def visit_Starred(self, node): # self.write('*', node.value) # # def visit_Repr(self, node): # # XXX: python 2.6 only # with self.delimit('``'): # self.visit(node.value) # # def visit_Module(self, node): # self.write(*node.body) # # visit_Interactive = visit_Module # # def visit_Expression(self, node): # self.visit(node.body) # # # Helper Nodes # # def visit_arg(self, node): # self.write(node.arg) # self.conditional_write(': ', node.annotation) # # def visit_alias(self, node): # self.write(node.name) # self.conditional_write(' as ', node.asname) # # def visit_comprehension(self, node): # set_precedence(node, node.iter, *node.ifs) # set_precedence(Precedence.comprehension_target, node.target) # stmt = ' async for ' if self.get_is_async(node) else ' for ' # self.write(stmt, node.target, ' in ', node.iter) # for if_ in node.ifs: # self.write(' if ', if_) def append(self, value): if isinstance(value, str): value = value.strip() if value: self.api_seq.append(value) else: self.api_seq.append(value)
StarcoderdataPython
4955850
import os.path as osp import os import scipy.io as scio import cv2 from PIL import Image from tqdm import tqdm def parse_pascal_voc_aug(pth): out_pth = osp.join(pth, 'VOC_AUG') ds_pth = osp.join(pth, 'benchmark_RELEASE/dataset') labels_pth = osp.join(ds_pth, 'cls') lbmats = os.listdir(labels_pth) print('converting mat files to png images') for lbmat in tqdm(lbmats): mat_pth = osp.join(labels_pth, lbmat) mat = scio.loadmat(mat_pth, mat_dtype = True, squeeze_me = True, struct_as_record = False) lb_arr = mat['GTcls'].Segmentation lb_name = osp.splitext(lbmat)[0] lb_fn = '{}.png'.format(lb_name) lb_save_pth = osp.join(out_pth, 'labels', lb_fn) lb = Image.fromarray(lb_arr) lb.save(lb_save_pth) if __name__ == '__main__': parse_pascal_voc_aug('./data')
StarcoderdataPython
12852817
# Copyright (c) 2017 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. import sgtk from sgtk.platform.qt import QtCore from .search_result_delegate import SearchResultDelegate # import the shotgun_model and view modules from the shotgun utils framework shotgun_model = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_model") shotgun_globals = sgtk.platform.import_framework("tk-framework-shotgunutils", "shotgun_globals") views = sgtk.platform.current_bundle().import_module("views") class GlobalSearchResultDelegate(SearchResultDelegate): """ Delegate which renders search match entries in the global search completer. """ def _render_result(self, widget, model_index): """ Renders a result from the model into the provided widget. :param widget: Widget used to render the result. :type widget: ``SearchResultWidget`` :param model_index: Index of the item to render. :type model_index: :class:`~PySide.QtCore.QModelIndex` """ from .global_search_completer import GlobalSearchCompleter icon = shotgun_model.get_sanitized_data(model_index, QtCore.Qt.DecorationRole) if icon: thumb = icon.pixmap(512) widget.set_thumbnail(thumb) else: # probably won't hit here, but just in case, use default/empty # thumbnail widget.set_thumbnail(self._pixmaps.no_thumbnail) data = shotgun_model.get_sanitized_data(model_index, GlobalSearchCompleter.SG_DATA_ROLE) # Example of data stored in the data role: # {'status': 'vwd', # 'name': 'bunny_010_0050_comp_v001', # 'links': ['Shot', 'bunny_010_0050'], # 'image': 'https://xxx', # 'project_id': 65, # 'type': 'Version', # 'id': 99} entity_type_display_name = shotgun_globals.get_type_display_name(data["type"]) content = "" et_url = shotgun_globals.get_entity_type_icon_url(data["type"]) underlined_name = self._underline_search_term(data["name"]) if et_url: # present thumbnail icon and name content += "<img src='%s'/>&nbsp;&nbsp;<b style='color: rgb(48, 167, 227)';>%s</b>" % ( et_url, underlined_name ) else: # present type name name content += "%s" % underlined_name content += "<br>%s" % entity_type_display_name links = data["links"] # note users return weird data so ignore it. if links and links[0] != "" and links[0] != "HumanUser" and links[0] != "ClientUser": underlined_link = self._underline_search_term(links[1]) # there is a referenced entity et_url = shotgun_globals.get_entity_type_icon_url(links[0]) if et_url: # present thumbnail icon and name content += " on <img align=absmiddle src='%s'/> %s" % (et_url, underlined_link) else: # present type name name link_entity_type = links[0] content += " on %s %s" % (shotgun_globals.get_type_display_name(link_entity_type), underlined_link) widget.set_text(content)
StarcoderdataPython
3443470
<reponame>ihaywood3/twsmb<filename>ntlm.py # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """Implement the NT Lan Manager (NTLMv2) challenge/response authentication protocol """ from __future__ import absolute_import, division from zope.interface import implementer, Interface import struct import time import socket import hmac import hashlib import base import twisted.cred.credentials from twisted.python.randbytes import secureRandom from twisted.internet.defer import maybeDeferred from twisted.logger import Logger log = Logger() NTLM_MESSAGES = ['invalid', 'negotiate', 'challenge', 'auth'] FLAGS = { 'NegotiateUnicode' : 0x00000001, 'NegotiateOEM' : 0x00000002, 'RequestTarget' : 0x00000004, 'Unknown9' : 0x00000008, 'NegotiateSign' : 0x00000010, 'NegotiateSeal' : 0x00000020, 'NegotiateDatagram' : 0x00000040, 'NegotiateLanManagerKey' : 0x00000080, 'Unknown8' : 0x00000100, 'NegotiateNTLM' : 0x00000200, 'NegotiateNTOnly' : 0x00000400, 'Anonymous' : 0x00000800, 'NegotiateOemDomainSupplied' : 0x00001000, 'NegotiateOemWorkstationSupplied' : 0x00002000, 'Unknown6' : 0x00004000, 'NegotiateAlwaysSign' : 0x00008000, 'TargetTypeDomain' : 0x00010000, 'TargetTypeServer' : 0x00020000, 'TargetTypeShare' : 0x00040000, 'NegotiateExtendedSecurity' : 0x00080000, 'NegotiateIdentify' : 0x00100000, 'Unknown5' : 0x00200000, 'RequestNonNTSessionKey' : 0x00400000, 'NegotiateTargetInfo' : 0x00800000, 'Unknown4' : 0x01000000, 'NegotiateVersion' : 0x02000000, 'Unknown3' : 0x04000000, 'Unknown2' : 0x08000000, 'Unknown1' : 0x10000000, 'Negotiate128' : 0x20000000, 'NegotiateKeyExchange' : 0x40000000, 'Negotiate56' : 0x80000000 } DEFAULT_FLAGS={"NegotiateUnicode", "NegotiateSign", "RequestTarget", "NegotiateNTLM", "NegotiateAlwaysSign", "NegotiateExtendedSecurity", "NegotiateTargetInfo", "NegotiateVersion", "Negotiate128", "NegotiateKeyExchange", "Negotiate56"} def flags2set(flags): """ convert C-style flags to Python set @param flags: the flags @type flags: L{int} @rtype: L{set} of L{str} """ r = set() for k, v in FLAGS.items(): if v | flags > 0: r.add(k) return r def set2flags(s): """ convert set to C-style flags @rtype: L{int} @type s: L{set} of L{str} """ flags = 0 for i in s: flags |= FLAGS[i] return flags def avpair(code, data): """make an AVPAIR structure @param code: the attribute ID @type code: L{int} @param data: the value @type value: L{bytes}, or L{str} which is converted UTF-16 @rtype: L{bytes} """ if type(data) is str: data = data.encode("utf-16le") elif len(data) % 2 > 0: data += b'\0' return struct.pack("<HH", code, len(data)) + data AV_EOL=0x0000 AV_COMPUTER_NAME=0x0001 AV_DOMAIN_NAME=0x0002 # only first three are required AV_DNS_COMPUTER_NAME=0x0003 AV_DNS_DOMAIN_NAME=0x0004 AV_TREE_NAME=0x0005 AV_FLAGS=0x0006 AV_TIMESTAMP=0x0007 AV_SINGLE_HOST=0x0008 AV_TARGET_NAME=0x0009 AV_CHANNEL_BINDINGS=0x000A SERVER_VERSION=(6, 1, 1) # major version 6.1 = Vista, roughly speaking what this emulates class NTLMManager(object): """ manage the NTLM subprotocol @ivar credential: the user cred, available after the AUTH token received None prior to this @type credential: L{IUsernameHashedPassword} """ def __init__(self, domain): """ @param domain: the server NetBIOS domain @type domain: L{str} """ self.credential = None self.flags = DEFAULT_FLAGS self.server_domain = domain def receiveToken(self, token): """ receive client token once unpacked from overlying protocol @type token: L{bytes} """ self.token = token if len(token) < 36: log.debug(token) raise base.SMBError("token too small") sig, msg_id, rem = base.unpack("<8sL", token) if sig != b'NTLMSSP\0': log.debug(repr(token[:16])) raise base.SMBError("No valid NTLM token header") try: getattr (self, 'ntlm_'+NTLM_MESSAGES[msg_id]) (rem) except IndexError: raise base.SMBError("invalid message id %d" % msg_id) def ntlm_invalid(self, data): raise base.SMBError("invalid message id 0") def ntlm_challenge(self, data): raise base.SMBError("invalid to send NTLM challenge to a server") def ntlm_negotiate(self, data): (flags, domain_len, domain_max_len, domain_offset, workstation_len, workstation_max_len, workstation_offset, v_major, v_minor, v_build, v_protocol, _ ) = base.unpack("<LHHLHHLBBHxxxB", data) flags = flags2set(flags) log.info("NTLM NEGOTIATE") log.debug("--------------") log.debug("Flags %r" % list(flags)) if 'NegotiateVersion' in flags: log.debug("Version %d.%d (%d) 0x%02x" % ( v_major, v_minor, v_build, v_protocol)) if not 'NegotiateUnicode' in flags: raise base.SMBError("clients must use Unicode") if 'NegotiateOemDomainSupplied' in flags and domain_len > 0: self.client_domain = \ self.token[domain_len:domain_len+domain_offset].decode('utf-16le') log.debug("Client domain %r" % self.client_domain) else: self.client_domain = None if 'NegotiateOemWorkstationSupplied' in flags and workstation_len > 0: self.workstation = self.token[workstation_len:workstation_len+workstation_offset].decode('utf-16le') log.debug("Workstation %r" % self.workstation) else: self.workstation = None self.flags = DEFAULT_FLAGS & flags if 'NegotiateAlwaysSign' not in self.flags and 'NegotiateSign' not in self.flags: self.flags -= {'Negotiate128', 'Negotiate56'} if 'RequestTarget' in self.flags: self.flags.add('TargetTypeServer') def getChallengeToken(self): """generate NTLM CHALLENGE token @rtype: L{bytes} """ FORMAT= '<8sIHHII8s8xHHIBBHxxxB' header_len=struct.calcsize(FORMAT) if 'RequestTarget' in self.flags: target = socket.gethostname().upper().encode('utf-16le') else: target = b'' if 'NegotiateTargetInfo' in self.flags: targetinfo = avpair(AV_COMPUTER_NAME, socket.gethostname().upper()) + \ avpair(AV_DOMAIN_NAME, self.server_domain) + \ avpair(AV_DNS_COMPUTER_NAME, socket.getfqdn()) + \ avpair(AV_DNS_DOMAIN_NAME, b'\0\0') + \ avpair(AV_TIMESTAMP, struct.pack("<Q", base.u2nt_time(time.time()))) + \ avpair(AV_EOL, b'') else: targetinfo = b'' if 'NegotiateVersion' in self.flags: v_protocol = 0x0F v_major, v_minor, v_build = SERVER_VERSION else: v_major = v_minor = v_build = v_protocol = 0 self.challenge = secureRandom(8) header = struct.pack(FORMAT, b"NTLMSSP\0", 0x0002, len(target), len(target), header_len, set2flags(self.flags), self.challenge, len(targetinfo), len(targetinfo), header_len+len(target), v_major, v_minor, v_build, v_protocol) return header+target+targetinfo def ntlm_auth(self, data): # note authentication isn't checked here, it's just unpacked and # loaded into the credential object (lmc_len, lmc_maxlen, lmc_offset, ntc_len, ntc_maxlen, ntc_offset, domain_len, domain_maxlen, domain_offset, user_len, user_maxlen, user_offset, workstation_len, workstation_max_len, workstation_offset, ersk_len, ersk_maxlen, ersk_offset, # Encrypted Random Session Key flags, v_major, v_minor, v_build, v_protocol, mic, _) = base.unpack("<HHIHHIHHIHHIHHIHHIIBBHxxxB16s", data) flags = flags2set(flags) lm = {} if lmc_len > 0: raw_lm_response = self.token[lmc_offset:lmc_offset+lmc_len] lm['response'], lm['client_challenge'] = struct.unpack("16s8s", raw_lm_response) nt = {} if ntc_len > 0: raw_nt_response = self.token[ntc_offset:ntc_offset+ntc_len] nt['temp'] = raw_nt_response[16:] (nt['response'], resp_type, hi_resp_type, nt['time'], nt['client_challenge'], nt['avpairs'] ) = base.unpack("<16sBB6xQ8s4x", raw_nt_response) if resp_type != 0x01: log.warn("NT response not valid type") if not nt and not lm: raise smb.SMBError("one of LM challenge or NT challenge must be provided") if domain_len > 0: client_domain = self.token[domain_offset:domain_offset+domain_len] client_domain = client_domain.decode('utf-16le') else: client_domain = None if user_len > 0: user = self.token[user_offset:user_offset+user_len] user = user.decode('utf-16le') else: raise smb.SMBError("username is required") if workstation_len > 0: workstation = self.token[workstation_offset:workstation_offset+workstation_len] workstation = workstation.decode('utf-16le') else: workstation = None if ersk_len > 0 and 'NegotiateKeyExchange' in flags: ersk = self.token[ersk_offset:ersk_offset+ersk_len] else: ersk = None self.ersk = ersk log.debug("NTLM AUTH") log.debug("---------") if 'NegotiateVersion' in flags: log.debug("Version %d.%d (%d) 0x%02x" % ( v_major, v_minor, v_build, v_protocol)) log.debug("Flags %r" % flags) log.debug("User %r" % user) log.debug("Workstation %r" % workstation) log.debug("Client domain %r" % client_domain) log.debug("LM response %r" % lm) log.debug("NT response %r" % nt) log.debug("ERSK %r" % ersk) self.credential = NTLMCredential(user, client_domain, lm, nt, self.challenge) @implementer(twisted.cred.credentials.IUsernameHashedPassword) class NTLMCredential(object): """ A NTLM credential, unverified initially """ def __init__(self, user, domain, lm, nt, challenge): self.username = user self.domain = domain self.lm = lm self.nt = nt self.challenge = challenge def __repr__(self): return "%s/%s" % (self.username, self.domain) def checkPassword(self, password): # code adapted from pysmb ntlm.py d = hashlib.new("md4") d.update(password.encode('UTF-16LE')) ntlm_hash = d.digest() # The NT password hash response_key = hmac.new(ntlm_hash, (self.username.upper() + self.domain).encode('UTF-16LE'), 'md5').digest() # The NTLMv2 password hash. In [MS-NLMP], this is the result of NTOWFv2 and LMOWFv2 functions if self.lm and self.lm['response'] != b'\0'*16: new_resp = hmac.new(response_key, self.challenge + self.lm['client_challenge'], 'md5').digest() if new_resp != self.lm['response']: return False if self.nt: new_resp = hmac.new(response_key, self.challenge + self.nt['temp'], 'md5').digest() if new_resp != self.nt['response']: return False assert self.nt or self.lm return True
StarcoderdataPython
11383896
# /* === 🌌 WELCOME TO ORBIT NEXT FRAMEWORK 🌌 === # * # * By : # * # * ██████╗ ██████╗ ██████╗ ██╗████████╗ ████████╗██╗ ██╗██████╗ ███╗ ██╗███████╗██████╗ # * ██╔═══██╗██╔══██╗██╔══██╗██║╚══██╔══╝ ╚══██╔══╝██║ ██║██╔══██╗████╗ ██║██╔════╝██╔══██╗ # * ██║ ██║██████╔╝██████╔╝██║ ██║ ██║ ██║ ██║██████╔╝██╔██╗ ██║█████╗ ██████╔╝ # * ██║ ██║██╔══██╗██╔══██╗██║ ██║ ██║ ██║ ██║██╔══██╗██║╚██╗██║██╔══╝ ██╔══██╗ # * ╚██████╔╝██║ ██║██████╔╝██║ ██║ ██║ ╚██████╔╝██║ ██║██║ ╚████║███████╗██║ ██║ # * ╚═════╝ ╚═╝ ╚═╝╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═══╝╚══════╝╚═╝ ╚═╝ # * # * AUTHOR : <NAME> [Orbit Turner] - Email: <EMAIL> - Country: Senegal # */ # THIS PROGRAM WILL JUST COUNT ALL THE FOLDERS IN THE GIVEN DIRECTORY import os input_path = "C:\\laragon\\www\\TPDEVWEB_SIMPLON_P3" # type: str # LENGTH OF THE DIRECTORY - 1(.git folder) print(len(next(os.walk(input_path))[1])-1)
StarcoderdataPython
9614522
<reponame>J0sueTM/Competitive-Programming<filename>Implementation/Mathematics/BasicMath/python/gcd.py import math def gcdpf(a, b): if a == 0: return b if b == 0: return a if a == b: return a if a > b: return gcdpf(a - b, b) else: return gcdpf(a, b - a) def gcdea(a, b): if b == 0: return a return gcdea(a, a % b) a = int(input()) b = int(input()) print(gcdpf(a, b)) print(gcdea(a, b)) print(math.gcd(a, b))
StarcoderdataPython
8012327
import numpy as np import torch import torch.nn as nn from model.backboneModel import EfficientNet,YOLOLayer,BasicBlock class AutoNet(nn.Module): def __init__(self, batch_size, step_size, anchors, detection_classes,freeze=False, device=None): self.latent = 1000 self.fc_num = 400 self.batch_size = batch_size self.step_size = step_size self.device = device self.anchors = anchors self.anchors1 = np.reshape(anchors[0], [1, 2]) self.anchors2 = anchors[1:] self.detection_classes = detection_classes super(AutoNet, self).__init__() self.efficientNet = EfficientNet.from_name('efficientnet-b3', freeze=freeze) feature = self.efficientNet._fc.in_features self.efficientNet._fc = nn.Sequential( nn.Linear(in_features=feature, out_features=2 * self.latent), ) self.rnn1 = nn.LSTM(self.latent, self.fc_num, 2, batch_first=True, dropout=0.25) self.fc2 = nn.Sequential( nn.Linear(self.fc_num * 6, 25 * 25 * 32, bias=False), nn.BatchNorm1d(25 * 25 * 32), nn.ReLU(inplace=True), nn.Dropout(0.25), ) self.rnn1_1 = nn.LSTM(self.latent, self.fc_num, 2, batch_first=True, dropout=0.25) self.fc2_1 = nn.Sequential( nn.Linear(self.fc_num * 6, 25 * 25 * 64, bias=False), nn.BatchNorm1d(25 * 25 * 64), nn.ReLU(inplace=True), nn.Dropout(0.25), ) self.inplanes = 32 self.conv0 = self._make_layer(BasicBlock, 32, 2) self.deconv0 = self._make_deconv_layer(32, 16) self.inplanes = 16 self.conv1 = self._make_layer(BasicBlock, 16, 2) self.deconv1 = self._make_deconv_layer(16, 8) self.inplanes = 8 self.conv2 = self._make_layer(BasicBlock, 8, 2) self.deconv2 = self._make_deconv_layer(8, 4) self.inplanes = 4 self.conv3 = self._make_layer(BasicBlock, 4, 2) self.deconv3 = self._make_deconv_layer(4, 2) self.convfinal = nn.Conv2d(2, 2, 1) self.inplanes = 64 self.conv0_1 = self._make_layer(BasicBlock, 64, 2) self.deconv0_1 = self._make_deconv_layer(64, 16) self.conv0_1_detect = self._make_layer(BasicBlock, 64, 2) self.convfinal_0 = nn.Conv2d(64, len(self.anchors2) * (self.detection_classes + 5), 1) self.yolo0 = YOLOLayer(self.anchors2, self.detection_classes, 800,device=device) self.inplanes = 16 self.conv1_1_detect = self._make_layer(BasicBlock, 16, 2) self.convfinal_1 = nn.Conv2d(16, len(self.anchors1) * (self.detection_classes + 5), 1) self.yolo1 = YOLOLayer(self.anchors1, self.detection_classes, 800,device=device) self.conv1_1 = self._make_layer(BasicBlock, 16, 2) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.ConvTranspose2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, nn.LSTM): nn.init.xavier_normal_(m.all_weights[0][0]) nn.init.xavier_normal_(m.all_weights[0][1]) nn.init.xavier_normal_(m.all_weights[1][0]) nn.init.xavier_normal_(m.all_weights[1][1]) def _make_layer(self, block, planes, blocks): layers = [] for i in range(blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _make_deconv_layer(self, inplanes, outplanes): layers = [] layers.append( nn.ConvTranspose2d(inplanes, outplanes, 3, stride=2, padding=1, output_padding=1, bias=False)) layers.append(nn.BatchNorm2d(outplanes)) layers.append(nn.ReLU(inplace=True)) return nn.Sequential(*layers) def reparameterise(self, mu, logvar): return mu def batch_lstm(self, x, scene, step, branch): #x_lstm = [] # print("-------- in batch_lstm -------",flush=True) h0 = torch.zeros((2, 6 * scene, self.fc_num)).to(self.device) c0 = torch.zeros((2, 6 * scene, self.fc_num)).to(self.device) # print("hidden layer shape",h0.size(),flush=True) #for k in range(step): # if k < self.step_size-1: # x_pad = torch.zeros((6 * scene, self.step_size - k - 1, self.latent)).to(self.device) # print("padding size",x_pad.size(),flush=True) # x_lstm_unit = torch.cat([x_pad, x[:, :k + 1, :]], dim=1) # print("lstm unit after padding",x_lstm_unit.size(),flush=True) #else: # x_lstm_unit = x[:, k - self.step_size + 1:k + 1, :] # print("lstm unit no need pad",x_lstm_unit.size(),flush=True) # x_lstm.append(x_lstm_unit) #x_lstm = torch.cat(x_lstm, dim=0) #print("x_lstm",x.size(),flush=True) if branch == 1: x_lstm_out, (ht, ct) = self.rnn1(x, (h0, c0)) # x_lstm_out, (ht, ct) = self.rnn1(x_lstm, (h0, c0)) else: x_lstm_out, (ht, ct) = self.rnn1_1(x, (h0, c0)) # x_lstm_out, (ht, ct) = self.rnn1_1(x_lstm, (h0, c0)) #x_lstm_final = [] #print("x_lstm output of rnn",x_lstm_out.size(),flush=True) #for k in range(step): # x_lstm_unit = x_lstm_out[k * scene * 6:(k + 1) * scene * 6, self.step_size - 1, :] # print("x_lstm output unit",x_lstm_unit.size(),flush=True) # x_lstm_final.append(x_lstm_unit) #x = torch.cat(x_lstm_final, dim=0) x = x_lstm_out #print("x_lstm output",x.size(),flush=True) #x = x.view(-1,self.fc_num) #print("x_lstm reshaped output",x.size(),flush=True) x = x.view(scene, 6, step, self.fc_num) # print("reshape1 lstm output",x.size(),flush=True) x = x.transpose(1, 2).contiguous() # print("reshape2 lstm output",x.size(),flush=True) x = x.view(scene * step, self.fc_num * 6) # print("reshape3 lstm output",x.size(),flush=True) # print("--------- batch lstm --------",flush=True) return x def forward(self, x, detection_target=None): # (S,B,18,H,W) scene = x.size(0) step = x.size(1) x = x.view(-1, 3, 128, 160) # print("scence",scene,"step",step,"x",x.size(),flush=True) output_list = self.efficientNet(x) # print("output of efficient net",np.array(output_list).shape,flush=True) x = output_list[3].view(output_list[3].size(0), 2, -1) x = x.view(x.size(0), 2, -1) # print("x shape before vae encoder",x.size(),flush=True) mu = x[:, 0, :] logvar = x[:, 1, :] x = self.reparameterise(mu, logvar) x = x.view(scene, step, 6, self.latent) x = x.transpose(1, 2).contiguous() x = x.view(-1, step, self.latent) # print("roadmap: x shape before lstm",x.size(),flush=True) x1 = self.batch_lstm(x, scene, step, 1) #print("roadmap: lstm output x1",x1.size(),flush=True) x1 = self.fc2(x1) #print("x1 shape after fc",x1.size(),flush=True) x1 = x1.view(x1.size(0), -1, 25, 25) #print("x1 shape roadmap input shape",x1.size(),flush=True) x1 = self.conv0(x1) x1 = self.deconv0(x1) x1 = self.conv1(x1) x1 = self.deconv1(x1) x1 = self.conv2(x1) x1 = self.deconv2(x1) x1 = self.conv3(x1) x1 = self.deconv3(x1) x1 = self.convfinal(x1) #print("roadmap output x1 shape", x1.size(),flush=True) #print("yolo: x shape before lstm",x.size(),flush=True) x2 = self.batch_lstm(x, scene, step, 2) #print("yolo: lstm output x2",x2.size(),flush=True) x2 = self.fc2_1(x2) x2 = x2.view(x2.size(0), -1, 25, 25) #print("x2 shape yolo input shape",x2.size(),flush=True) x2 = self.conv0_1(x2) detect_output0 = self.conv0_1_detect(x2) detect_output0 = self.convfinal_0(detect_output0) detect_output0, detect_loss0 = self.yolo0(detect_output0, detection_target, 800) #print("yolo0 output shape",detect_output0.size(),flush=True) x2 = self.deconv0_1(x2) x2 = self.conv1_1(x2) detect_output1 = self.conv1_1_detect(x2) detect_output1 = self.convfinal_1(detect_output1) detect_output1, detect_loss1 = self.yolo1(detect_output1, detection_target, 800) #print("yolo1 output shape",detect_output1.size(),flush=True) total_loss = 0.6 * detect_loss0 + 0.4 * detect_loss1 return nn.LogSoftmax(dim=1)(x1), detect_output0, detect_output1, total_loss def trainModel(device, anchors, detection_classes=9, batch_size=2, step_size=2, freeze=False): return AutoNet(batch_size, step_size, anchors, detection_classes, freeze=freeze, device=device)
StarcoderdataPython
1975379
<reponame>rdius/Corpus_builder_app<filename>app_collect.py # Core pkgs import streamlit as st import altair as alt ## EDA Pkgs import base64 import json import jsonlines import pandas as pd import os import numpy as np import sys import plotly.graph_objects as go import matplotlib.pyplot as plt from wordcloud import WordCloud # from container_ import main_proto ############## # def select_useful_cols(corpus): """ we just keep esssential cols that will be used for filtering and vizualising """ columns = ['name','text','title', 'SNE', 'TNE', 'pertinence', 'thematic'] new_corpus = pd.DataFrame(corpus, columns=columns) corpus_data = pd.concat([new_corpus.drop(['SNE', 'TNE'], axis=1), new_corpus['SNE'].apply(pd.Series), new_corpus['TNE'].apply(pd.Series)], axis=1) return corpus_data # def read_corpus(file): """ fxn to pars the database : corpus of thematic documents """ long_list = [] with jsonlines.open(file) as f: for line in f.iter(): long_list.append(line) my_corpus = pd.DataFrame(long_list,copy =True) corpus = select_useful_cols(my_corpus) return corpus def look_for_thematic_data(file,thematic): ''' Fxn to retrive data for a given thematic data from 3M Database. Three thematics are included : agriculture, hydrologie, and urbanisation file : input file, the database in jsonl, in this case ''' my_corpus = read_corpus(file) if thematic == 'agriculture': them_mask = my_corpus['thematic']==thematic query_data = my_corpus[them_mask] if thematic == 'hydrologie': them_mask = my_corpus['thematic']==thematic query_data = my_corpus[them_mask] if thematic == 'urbanisation': them_mask = my_corpus['thematic']==thematic query_data = my_corpus[them_mask] return query_data def get_table_download_link_csv(df): """ fnx to dowload the query results in csv file format """ #csv = df.to_csv(index=False) csv = df.to_csv(index=False, sep="\t").encode() #b64 = base64.b64encode(csv.encode()).decode() b64 = base64.b64encode(csv).decode() href = f'<a href="data:file/csv;base64,{b64}" download="data.csv" target="_blank">Download csv file</a>' return href def process_date(corpus_data): """ this fxn is used to categorize data during vizualisation. related to tne_color() fxn below """ corpus_data['date'] = pd.to_datetime(corpus_data['date']) corpus_data_tne = corpus_data[corpus_data['date'].notnull()] # corpus_data_tne.head(2) range_1 = corpus_data_tne[corpus_data_tne['date']>='2019-01-01'] # range_1 mask = (corpus_data_tne['date'] > '2015-01-01') & (corpus_data_tne['date'] <= '2019-01-01') range_2 = corpus_data_tne[mask] # range_2 range_3 = corpus_data_tne[corpus_data_tne['date']<='2015-01-01'] return corpus_data_tne, range_1, range_2, range_3 def temporal_filter(corpus_data): """ TO DO : take into account temporal filter (range for data query) as its doesn't works yet in UI. """ return filtered_data def sne_color(df): """ fnx to differentiate pie diagram colors, for spatial named entities """ colors = [] for p in df["node_labels"]: if p in ["", 'Data Spatiality<br>']: colors.append("white") elif p in ['With_SNE']: colors.append("green") elif p in ["WithOut_SNE"]: colors.append("blue") return colors ### def tne_color(df): """ fnx to differentiate pie diagram colors, for temporal named entities """ colors = [] for p in df["node_labels"]: if p in ["", 'Data Temporality<br>']: colors.append("white") elif p in ["<1 an"]: colors.append("blue") elif p in ["1 à 5 ans"]: colors.append("brown") else: colors.append("red") return colors ### def drw_pie(df, colors): # colors = tne_color(df) """ fxn to draw pie diagram """ fig=go.Figure( data=go.Sunburst( ids=df["node_names"], labels=df["node_labels"], parents=df["node_parent"], marker=dict(colors=colors), values=df["node_counts"], branchvalues="total", texttemplate = ('%{label}', '%{label}<br>%{percentParent:.1%}', '%{label}<br>%{percentParent:.1%}', '%{label}<br>%{percentParent:.1%}', '%{label}<br>%{percentParent:.1%}', '%{label}<br>%{percentParent:.1%}', '%{label}<br>%{percentParent:.1%}', '%{label}<br>%{percentParent:.1%}', '%{label}<br>%{percentParent:.1%}'),),) fig.show() # corpus_data = pd.read_csv('corpus.csv') def run(corpus_data): """ fxn that separate the corpus, according to the spatio-temporal coverage. """ corpus_data_tne, range_1, range_2, range_3 = process_date(corpus_data) SNE_NODE = {'node_names': ['Corpus', 'With_SNE', 'WithOut_SNE'], 'node_parent': ["", "Corpus", "Corpus"], 'node_labels': ['Data Spatiality<br>','With_SNE', 'WithOut_SNE'], #'node_counts': [len(corpus), len(corpus_with_extend), len(corpus_without_extend)] 'node_counts': [len(corpus_data), corpus_data['ent0'].isna().sum(), len(corpus_data)- corpus_data['ent0'].isna().sum()] } TNE_NODE = {'node_names': ['Corpus',"WithOut_TNE",'With_TNE', "<1 an", "1 à 5 ans","> 5 ans"], 'node_parent': ["", "Corpus", "Corpus", "With_TNE",'With_TNE','With_TNE'], 'node_labels': ['Data Temporality<br>',"WithOut_TNE",'With_TNE',"<1 an", "1 à 5 ans","> 5 ans"], #'node_counts': [len(corpus), len(corpus_with_extend), len(corpus_without_extend)] 'node_counts': [len(corpus_data),len(corpus_data)-len(corpus_data_tne),len(corpus_data_tne), len(range_1), len(range_2),len(range_3)] } df1 = pd.DataFrame(TNE_NODE) df2 = pd.DataFrame(SNE_NODE) # colors = sne_color(df) colors1 = tne_color(df1) colors2 = sne_color(df2) return df1, colors1, df2, colors2 # drw_pie(df,colors) def file_selector(folder_path='.'): """ Fxn to select the database file localy """ filenames = os.listdir(folder_path) selected_filename = st.selectbox('Select a file', filenames) return os.path.join(folder_path, selected_filename) def main(): """ main Fxn, we build the UI with Streamlit (st) """ # df_main = pd.DataFrame() cols = ['name', 'title', 'text', 'pertinence', 'ent0', 'date', 'thematic'] st.title('3M Thematic Corpus Builder') # menu = ["Home", "Demo Data", "Data"] # menu to be selected menu = ["Demo Data"] choice = st.sidebar.selectbox("Menu", menu) if choice == "Demo Data": # we are using Demo Data menu in this section st.subheader("Parametres de la Recherche") txt,start_date, end_date = st.beta_columns([2,1,1]) them_option = txt.selectbox('Choisir une thématique', ('agriculture','hydrologie','urbanisation')) # start_date.success("start_date") # st.write('Thématique :',them_option)# themat) start_date = start_date.number_input("start_date",1995,2040) # st.write('Date Initale :', start_date) # end_date.success("end_date") end_date = end_date.number_input("end_date",1996,2040) st.write('Thématique :',them_option, '\t Date Initale :', start_date, '\t Date Finale :', end_date) st.subheader("Base de données") # thematique = st.text_area("Nom de thematique --> agriculture or hydrologie or urbanisation") filename = file_selector() st.write('You selected `%s`' % filename) # filename = st.file_uploader("Select an existing DataBase",type=['jsonl']) Scrap_button = st.button("Start Retriving") # st.form_submit_button(label = 'submit') df_main = '' if Scrap_button: # file_details = {"Filename":filename.name,"FileType":filename.type,"FileSize":filename.size} # st.write(file_details) # col1,col2 = st.beta_columns(2) df = look_for_thematic_data(filename,them_option)#themat)#thematique) df = pd.DataFrame(df, columns=cols) df_main = df if them_option is not None: st.write('Paramètres de la recherche :', them_option ,start_date,end_date) # df['ent0'] = st.success('TOP@10 of the Corpus DataFrame') st.dataframe(df.head(10)) st.write(repr(len(df)) + ' documents dans le corpus') # st.write('Size of query corpus : ',df.memory_usage(index=True).sum() ) # st.write('Size of query corpus : ', df.info(memory_usage='deep')) st.write('Size of query corpus : ', repr(round(sys.getsizeof(df)/1000000,2))+ ' '+'Mb') # st.int(len(df)) : >>> sys.getsizeof(df) # st.dataframe(df) # st.markdown(get_table_download_link(df), unsafe_allow_html=True) st.markdown(get_table_download_link_csv(df), unsafe_allow_html=True) # Distribution = st.button("Vizualise Data Distribution") # st.form_submit_button(label = 'submit') st.markdown("<h1 style='text-align: center; color: blue;'>Data Spatio-Temporal Distribution</h1>", unsafe_allow_html=True) # col1= st.beta_container() """ divided in two cols, in the first, we display Spatiality diagram et temporality in the second """ col1,col2 = st.beta_columns(2) # col1,col2 = st.beta_ # st.beta_columns(2) # if Distribution: # st.success('Quantitative Distribution') with col1: st.success('Spatiality Coverage') df1, colors1, df2, colors2 = run(df_main) ########### fig2=go.Figure( data=go.Sunburst( ids=df2["node_names"],labels=df2["node_labels"], parents=df2["node_parent"], marker=dict(colors=colors2), values=df2["node_counts"], branchvalues="total", texttemplate = ('%{label}', '%{label}<br>%{percentParent:.1%}', '%{label}<br>%{percentParent:.1%}'),),) # fig2.update_layout(margin = dict(t=0, l=0, r=0, b=0)) fig2.update_layout(width=250, height=250, autosize=True, margin=dict(t=0, b=0, l=0, r=0), template="plotly_white",) # fig2.update_layout(autosize=False, width=500,height=500,) # st.plotly_chart(fig2, use_container_width = True) st.plotly_chart(fig2, use_container_width = True) ######################## with col2: st.success("Temporality Coverage") fig1=go.Figure( data=go.Sunburst( ids=df1["node_names"],labels=df1["node_labels"], parents=df1["node_parent"], marker=dict(colors=colors1), values=df1["node_counts"], branchvalues="total", texttemplate = ('%{label}', '%{label}<br>%{percentParent:.1%}', '%{label}<br>%{percentParent:.1%}', '%{label}<br>%{percentParent:.1%}', '%{label}<br>%{percentParent:.1%}', '%{label}<br>%{percentParent:.1%}'),),) # fig1.update_layout(margin = dict(t=0, l=0, r=0, b=0)) fig1.update_layout(width=250, height=250, autosize=True, margin=dict(t=0, b=0, l=0, r=0), template="plotly_white",) st.plotly_chart(fig1, use_container_width = True) """ make two cols for wordcloud viz, spatial and temporal """ col11,col22 = st.beta_columns(2) with col11: st.success('Spatiality WordCloud') #Final word cloud after all the cleaning and pre-processing wordcloud = WordCloud(background_color='black').generate(' '.join(df['ent0'][df['ent0'].notnull()].astype(str) )) plt.imshow(wordcloud) plt.axis("off") # plt.show() st.pyplot(plt) with col22: st.success('Temporality WordCloud') counts = df['date'].dt.year.value_counts() counts.index = counts.index.map(str) wordcloud = WordCloud().generate_from_frequencies(counts) plt.figure() plt.imshow(wordcloud, interpolation="bilinear") plt.axis("off") # plt.show() st.pyplot(plt) if __name__ == '__main__': main()
StarcoderdataPython
6529574
<filename>setup.py from distutils.core import setup with open('README.rst') as f: readme = f.read() setup( name='pyvsc', version='0.1', description='VSC Losses Electrothermal Model', long_description=readme, author='<NAME>', author_email='<EMAIL>', packages=['pyvsc', 'pyvsc.tests'], classifiers=[ 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 3.6', ] )
StarcoderdataPython