content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.] # snippet-sourcedescription:[kda-python-datagenerator-stockticker.py demonstrates how to generate sample data for Amazon Kinesis Data Analytics SQL applications.] # snippet-service:[kinesisanalytics] # snippet-keyword:[Python] # snippet-sourcesyntax:[python] # snippet-sourcesyntax:[python] # snippet-keyword:[Amazon Kinesis Data Analytics] # snippet-keyword:[Code Sample] # snippet-sourcetype:[full-example] # snippet-sourcedate:[2019-01-29] # snippet-sourceauthor:[fletpatr (AWS)] # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # This file is licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. A copy of the # License is located at # # http://aws.amazon.com/apache2.0/ # # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. # snippet-start:[kinesisanalytics.python.datagenerator.stockticker] import json import boto3 import random import datetime kinesis = boto3.client('kinesis') while True: data = json.dumps(getReferrer()) print(data) kinesis.put_record( StreamName="ExampleInputStream", Data=data, PartitionKey="partitionkey") # snippet-end:[kinesisanalytics.python.datagenerator.stockticker]
[ 2, 39442, 12, 23893, 33250, 4711, 389, 15940, 329, 262, 30865, 2205, 1074, 338, 6291, 18388, 13, 2141, 407, 4781, 8183, 198, 2, 39442, 12, 82, 30555, 7260, 33250, 74, 6814, 12, 29412, 12, 19608, 363, 877, 1352, 12, 13578, 83, 15799, ...
3.051081
509
import face_recognition from flask import Flask, request, redirect, Response import camera import firestore as db # You can change this to any folder on your system ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'} app = Flask(__name__) if __name__ == "__main__": app.run(host='0.0.0.0', port=5001, debug=True)
[ 11748, 1986, 62, 26243, 653, 198, 6738, 42903, 1330, 46947, 11, 2581, 11, 18941, 11, 18261, 198, 11748, 4676, 198, 11748, 2046, 8095, 355, 20613, 198, 198, 2, 921, 460, 1487, 220, 220, 220, 220, 428, 284, 597, 9483, 319, 534, 1080, ...
2.762712
118
import numpy as np import logging import numbers import torch import math import json import sys from torch.optim.lr_scheduler import LambdaLR from torchvision.transforms.functional import pad def get_padding(image): w, h = image.size max_wh = np.max([w, h]) h_padding = (max_wh - w) / 2 v_padding = (max_wh - h) / 2 l_pad = h_padding if h_padding % 1 == 0 else h_padding + 0.5 t_pad = v_padding if v_padding % 1 == 0 else v_padding + 0.5 r_pad = h_padding if h_padding % 1 == 0 else h_padding - 0.5 b_pad = v_padding if v_padding % 1 == 0 else v_padding - 0.5 padding = (int(l_pad), int(t_pad), int(r_pad), int(b_pad)) return padding def adjust_learning_rate(optimizer, epoch, lr): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" lr = lr * (0.1 ** (epoch // 30)) for param_group in optimizer.param_groups: param_group['lr'] = lr
[ 11748, 299, 32152, 355, 45941, 198, 11748, 18931, 198, 11748, 3146, 198, 11748, 28034, 198, 11748, 10688, 198, 11748, 33918, 198, 11748, 25064, 198, 198, 6738, 28034, 13, 40085, 13, 14050, 62, 1416, 704, 18173, 1330, 21114, 6814, 35972, 1...
2.52139
374
import pytest from tests.pylint_plugins.utils import create_message, extract_node, skip_if_pylint_unavailable pytestmark = skip_if_pylint_unavailable()
[ 11748, 12972, 9288, 198, 198, 6738, 5254, 13, 79, 2645, 600, 62, 37390, 13, 26791, 1330, 2251, 62, 20500, 11, 7925, 62, 17440, 11, 14267, 62, 361, 62, 79, 2645, 600, 62, 403, 15182, 198, 198, 9078, 9288, 4102, 796, 14267, 62, 361, ...
2.943396
53
import pandas as pd import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.colors import csv from scipy.stats import mode import math as m import os import collections #set working directory #os.chdir("/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr") #bkpt_name = "1" #example: plot_bcs_bkpt("1", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr", "/mnt/ix1/Projects/M002_131217_gastric/P00526/P00526_WG10_150722_gastric/A20_170516_hmw_maps/metr")
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 198, 6759, 29487, 8019, 13, 1904, 10786, 46384, 11537, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 2603, 2...
2.160714
280
# tests
[ 198, 198, 2, 5254 ]
2.25
4
import logging from flask import Blueprint from flask import Flask, render_template, request, flash from flask_wtf import FlaskForm from wtforms import StringField, validators, SelectField, BooleanField from wtforms.fields.html5 import IntegerRangeField from wtforms.widgets import TextArea import langid from utils.utils import templated blueprint_langid = Blueprint('langid', __name__)
[ 11748, 18931, 198, 198, 6738, 42903, 1330, 39932, 198, 198, 6738, 42903, 1330, 46947, 11, 8543, 62, 28243, 11, 2581, 11, 7644, 198, 6738, 42903, 62, 86, 27110, 1330, 46947, 8479, 198, 6738, 266, 83, 23914, 1330, 10903, 15878, 11, 4938, ...
3.666667
108
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import *
[ 2, 15069, 2211, 12, 1238, 2481, 13914, 45036, 3549, 2351, 4765, 11, 11419, 290, 584, 198, 2, 1338, 441, 4935, 34152, 13, 4091, 262, 1353, 12, 5715, 27975, 38162, 9947, 2393, 329, 3307, 13, 198, 2, 198, 2, 30628, 55, 12, 34156, 12, ...
3.421875
64
# Copyright (C) July 2018: TF TECH NV in Belgium see https://www.threefold.tech/ # In case TF TECH NV ceases to exist (e.g. because of bankruptcy) # then Incubaid NV also in Belgium will get the Copyright & Authorship for all changes made since July 2018 # and the license will automatically become Apache v2 for all code related to Jumpscale & DigitalMe # This file is part of jumpscale at <https://github.com/threefoldtech>. # jumpscale is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # jumpscale is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License v3 for more details. # # You should have received a copy of the GNU General Public License # along with jumpscale or jumpscale derived works. If not, see <http://www.gnu.org/licenses/>. # LICENSE END from Jumpscale import j from .JSConfigBCDBBase import JSConfigBCDBBase
[ 2, 15069, 357, 34, 8, 2901, 2864, 25, 220, 24958, 44999, 23973, 287, 15664, 766, 3740, 1378, 2503, 13, 15542, 11379, 13, 13670, 14, 198, 2, 554, 1339, 24958, 44999, 23973, 42217, 284, 2152, 357, 68, 13, 70, 13, 780, 286, 17235, 8, ...
3.790323
310
import utils as utl import error_measures as err # Regression Tree Node # Regression Tree
[ 11748, 3384, 4487, 355, 3384, 75, 198, 11748, 4049, 62, 47336, 355, 11454, 198, 198, 2, 3310, 2234, 12200, 19081, 628, 198, 2, 3310, 2234, 12200, 198 ]
3.444444
27
from starlette.applications import Starlette from starlette.middleware.gzip import GZipMiddleware from starlette.middleware.cors import CORSMiddleware from starlette.staticfiles import StaticFiles app = Starlette(debug=False, template_directory='src/site/templates') app.add_middleware(GZipMiddleware, minimum_size=500) app.add_middleware(CORSMiddleware, allow_origins=['*']) app.mount('/static', StaticFiles(directory='src/site/media'), name='static')
[ 6738, 3491, 21348, 13, 1324, 677, 602, 1330, 2907, 21348, 198, 6738, 3491, 21348, 13, 27171, 1574, 13, 70, 13344, 1330, 402, 41729, 34621, 1574, 198, 6738, 3491, 21348, 13, 27171, 1574, 13, 66, 669, 1330, 23929, 12310, 2509, 1574, 198, ...
3.313869
137
#! /usr/bin/env python3 # -*- coding: utf-8 -*- """ Contains core logic for Rainman2 """ __author__ = 'Ari Saha (arisaha@icloud.com), Mingyang Liu(liux3941@umn.edu)' __date__ = 'Wednesday, February 14th 2018, 11:42:09 am'
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 198, 4264, 1299, 4755, 9156, 329, 10301, 805, 17, 198, 37811, 628, 198, 834, 9800, 834, 796, 705, ...
2.445652
92
""" BespokeFit Creating bespoke parameters for individual molecules. """ import logging import sys from ._version import get_versions versions = get_versions() __version__ = versions["version"] __git_revision__ = versions["full-revisionid"] del get_versions, versions # Silence verbose messages when running the CLI otherwise you can't read the output # without seeing tens of 'Unable to load AmberTools' or don't import simtk warnings... if sys.argv[0].endswith("openff-bespoke"): from openff.bespokefit.utilities.logging import DeprecationWarningFilter # if "openff-bespoke" logging.getLogger("openff.toolkit").setLevel(logging.ERROR) logging.getLogger().addFilter(DeprecationWarningFilter())
[ 37811, 198, 33, 9774, 2088, 31805, 198, 32071, 7284, 35924, 10007, 329, 1981, 17745, 13, 198, 37811, 198, 11748, 18931, 198, 11748, 25064, 198, 198, 6738, 47540, 9641, 1330, 651, 62, 47178, 198, 198, 47178, 796, 651, 62, 47178, 3419, 19...
3.334884
215
# -*- coding: utf-8 -*- """ Created on Tue Apr 03 11:06:37 2018 @author: vmg """ import sdf import numpy as np # Load 2006 LUT for interpolation # 2006 Groeneveld Look-Up Table as presented in # "2006 CHF Look-Up Table", Nuclear Engineering and Design 237, pp. 190-1922. # This file requires the file 2006LUTdata.txt # Pressure range [MPa] from 2006 LUT, convert to [Pa] P = np.array((0.10,0.30,0.50,1.0,2.0,3.0,5.0,7.0,10.0,12.0,14.0,16.0,18.0,20.0,21.0))*1e6 # Mass Flux range [kg/m^2-s] from 2006 .LUT. G = np.array((0.,50.,100.,300.,500.,750.,1000.,1500.,2000.,2500.,3000.,3500.,4000.,4500.,5000.,5500.,6000.,6500.,7000.,7500.,8000.)) # Quality range from 2006 LUT x = np.array((-0.50,-0.40,-0.30,-0.20,-0.15,-0.10,-0.05,0.00,0.05,0.10,0.15,0.20,0.25,0.30,0.35,0.40,0.45,0.50,0.60,0.70,0.80,0.90,1.00)) # Critical heat flux [kW/m^2] from 2006 LUT, convert to [W/m^2] q_raw=np.loadtxt('../Data/2006LUTdata.txt')*1e3 # Convert the imported array into a (MxNxQ) where: # M is number of mass flux divisions # N is number of quality divisions # Q is number of pressure divisions lenG = len(G) lenx = len(x) lenP = len(P) q = np.zeros((lenG,lenx,lenP)) for i in xrange(lenG): for j in xrange(lenx): for k in xrange(lenP): q[i,j,k] = q_raw[i + k*lenG,j] # Create the datasets: ds_G = sdf.Dataset('G', data=G, unit='kg/(m2.s)', is_scale=True, display_name='Mass Flux') ds_x = sdf.Dataset('x', data=x, unit='1', is_scale=True, display_name='Quality') ds_P = sdf.Dataset('P', data=P, unit='Pa', is_scale=True, display_name='Pressure') ds_q = sdf.Dataset('q', data=q, unit='W/m2', scales=[ds_G,ds_x,ds_P]) # Create the root group and write the file: g = sdf.Group('/', comment='2006 CHF LUT', datasets=[ds_G,ds_x,ds_P,ds_q]) sdf.save('../Data/2006LUT.sdf', g)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 30030, 2758, 7643, 1367, 25, 3312, 25, 2718, 2864, 198, 198, 31, 9800, 25, 410, 11296, 198, 37811, 198, 198, 11748, 264, 7568, 198, 11748, 299, ...
2.149644
842
import unittest from pathlib import Path from pprint import pprint from vint.compat.itertools import zip_longest from vint.linting.linter import Linter from vint.linting.config.config_default_source import ConfigDefaultSource
[ 11748, 555, 715, 395, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 279, 4798, 1330, 279, 4798, 198, 6738, 410, 600, 13, 5589, 265, 13, 270, 861, 10141, 1330, 19974, 62, 6511, 395, 198, 6738, 410, 600, 13, 75, 600, 278, 13, 2815, ...
3.304348
69
import copy import json import logging import os import sys import time from collections import defaultdict import numpy as np import tensorflow as tf from sklearn import decomposition from .. import dp_logging from . import labeler_utils from .base_model import AutoSubRegistrationMeta, BaseModel, BaseTrainableModel _file_dir = os.path.dirname(os.path.abspath(__file__)) logger = dp_logging.get_child_logger(__name__) tf_logger = logging.getLogger('tensorflow') tf_logger.addFilter(NoV1ResourceMessageFilter()) def build_embd_dictionary(filename): """ Returns a numpy embedding dictionary from embed file with GloVe-like format :param filename: Path to the embed file for loading :type filename: str """ embd_table = dict() with open(filename, 'r') as embds: for line in embds: line = line.strip().split() embd_table[line[0]] = np.asarray(line[1:]) return embd_table def create_glove_char(n_dims, source_file=None): """ Embeds GloVe chars embeddings from source file to n_dims principal components in a new file :param n_dims: Final number of principal component dims of the embeddings :type n_dims: int :param source_file: Location of original embeddings to factor down :type source_file: str """ if source_file is None: source_file = os.path.join(_file_dir, "embeddings/glove.840B.300d-char.txt") # get embedding table first and vectors as array embd_table = build_embd_dictionary(source_file) embd_words, embd_matrix = [ np.asarray(ls) if i > 0 else list(ls) for i, ls in enumerate(zip(*embd_table.items()))] # get PCA embedder pca = decomposition.PCA(n_components=n_dims) reduced_embds = pca.fit_transform(embd_matrix) # write to file dir_name = os.path.dirname(source_file) embd_file_name = os.path.join(dir_name, 'glove-reduced-{}D.txt'.format(n_dims)) with open(embd_file_name, 'w') as file: for word, embd in zip(embd_words, reduced_embds): file.write(word + " " + ' '.join(str(num) for num in embd) + "\n") class CharacterLevelCnnModel(BaseTrainableModel, metaclass=AutoSubRegistrationMeta): # boolean if the label mapping requires the mapping for index 0 reserved requires_zero_mapping = True def __init__(self, label_mapping=None, parameters=None): """ CNN Model Initializer. initialize epoch_id :param label_mapping: maps labels to their encoded integers :type label_mapping: dict :param parameters: Contains all the appropriate parameters for the model. Must contain num_labels. Other possible parameters are: max_length, max_char_encoding_id, dim_embed, size_fc dropout, size_conv, num_fil, optimizer, default_label :type parameters: dict :return: None """ # parameter initialization if not parameters: parameters = {} parameters.setdefault('max_length', 3400) parameters.setdefault('max_char_encoding_id', 127) parameters.setdefault('dim_embed', 64) parameters.setdefault('size_fc', [96, 96]) parameters.setdefault('dropout', 0.073) parameters.setdefault('size_conv', 13) parameters.setdefault('default_label', "UNKNOWN") parameters.setdefault('num_fil', [48 for _ in range(4)]) parameters['pad_label'] = 'PAD' self._epoch_id = 0 # reconstruct flags for model self._model_num_labels = 0 self._model_default_ind = -1 BaseModel.__init__(self, label_mapping, parameters) def __eq__(self, other): """ Checks if two models are equal with one another, may only check important variables, i.e. may not check model itself. :param self: a model :param other: a model :type self: BaseModel :type other: BaseModel :return: Whether or not self and other are equal :rtype: bool """ if self._parameters != other._parameters \ or self._label_mapping != other._label_mapping: return False return True def _validate_parameters(self, parameters): """ Validate the parameters sent in. Raise error if invalid parameters are present. :param parameters: parameter dict containing the following parameters: max_length: Maximum char length in a sample max_char_encoding_id: Maximum integer value for encoding the input dim_embed: Number of embedded dimensions size_fc: Size of each fully connected layers dropout: Ratio of dropout in the model size_conv: Convolution kernel size default_label: Key for label_mapping that is the default label pad_label: Key for entities_dict that is the pad label num_fil: Number of filters in each convolution layer :type parameters: dict :return: None """ errors = [] list_of_necessary_params = ['max_length', 'max_char_encoding_id', 'dim_embed', 'size_fc', 'dropout', 'size_conv', 'default_label', 'pad_label', 'num_fil'] # Make sure the necessary parameters are present and valid. for param in parameters: if param in ['max_length', 'max_char_encoding_id', 'dim_embed', 'size_conv']: if not isinstance(parameters[param], (int, float)) \ or parameters[param] < 0: errors.append(param + " must be a valid integer or float " "greater than 0.") elif param == 'dropout': if not isinstance(parameters[param], (int, float)) \ or parameters[param] < 0 or parameters[param] > 1: errors.append(param + " must be a valid integer or float " "from 0 to 1.") elif param == 'size_fc' or param == 'num_fil': if not isinstance(parameters[param], list) \ or len(parameters[param]) == 0: errors.append(param + " must be a non-empty list of " "integers.") else: for item in parameters[param]: if not isinstance(item, int): errors.append(param + " must be a non-empty " "list of integers.") break elif param == 'default_label': if not isinstance(parameters[param], str): error = str(param) + " must be a string." errors.append(error) # Error if there are extra parameters thrown in for param in parameters: if param not in list_of_necessary_params: errors.append(param + " is not an accepted parameter.") if errors: raise ValueError('\n'.join(errors)) def set_label_mapping(self, label_mapping): """ Sets the labels for the model :param label_mapping: label mapping of the model :type label_mapping: dict :return: None """ if not isinstance(label_mapping, (list, dict)): raise TypeError("Labels must either be a non-empty encoding dict " "which maps labels to index encodings or a list.") label_mapping = copy.deepcopy(label_mapping) if 'PAD' not in label_mapping: if isinstance(label_mapping, list): # if list missing PAD label_mapping = ['PAD'] + label_mapping elif 0 not in label_mapping.values(): # if dict missing PAD and 0 label_mapping.update({'PAD': 0}) if (isinstance(label_mapping, dict) and label_mapping.get('PAD', None) != 0): # dict with bad PAD raise ValueError("`PAD` must map to index zero.") if self._parameters['default_label'] not in label_mapping: raise ValueError("The `default_label` of {} must exist in the " "label mapping.".format( self._parameters['default_label'])) super().set_label_mapping(label_mapping) def _need_to_reconstruct_model(self): """ Determines whether or not the model needs to be reconstructed. :return: bool of whether or not the model needs to reconstruct. """ if not self._model: return False default_ind = self.label_mapping[self._parameters['default_label']] return self.num_labels != self._model_num_labels or \ default_ind != self._model_default_ind def save_to_disk(self, dirpath): """ Saves whole model to disk with weights :param dirpath: directory path where you want to save the model to :type dirpath: str :return: None """ if not self._model: self._construct_model() elif self._need_to_reconstruct_model(): self._reconstruct_model() model_param_dirpath = os.path.join(dirpath, "model_parameters.json") with open(model_param_dirpath, 'w') as fp: json.dump(self._parameters, fp) labels_dirpath = os.path.join(dirpath, "label_mapping.json") with open(labels_dirpath, 'w') as fp: json.dump(self.label_mapping, fp) self._model.save(os.path.join(dirpath)) def _construct_model(self): """ Model constructor for the data labeler. This also serves as a weight reset. :return: None """ num_labels = self.num_labels default_ind = self.label_mapping[self._parameters['default_label']] # Reset model tf.keras.backend.clear_session() # generate glove embedding create_glove_char(self._parameters['dim_embed']) # generate model self._model = tf.keras.models.Sequential() # default parameters max_length = self._parameters['max_length'] max_char_encoding_id = self._parameters['max_char_encoding_id'] # Encoding layer self._model.add(tf.keras.layers.Input(shape=(None,), dtype=tf.string)) self._model.add( tf.keras.layers.Lambda(encoding_function, output_shape=tuple([max_length]))) # Create a pre-trained weight matrix # character encoding indices range from 0 to max_char_encoding_id, # we add one extra index for out-of-vocabulary character embed_file = os.path.join( _file_dir, "embeddings/glove-reduced-{}D.txt".format( self._parameters['dim_embed'])) embedding_matrix = np.zeros((max_char_encoding_id + 2, self._parameters['dim_embed'])) embedding_dict = build_embd_dictionary(embed_file) input_shape = tuple([max_length]) # Fill in the weight matrix: let pad and space be 0s for ascii_num in range(max_char_encoding_id): if chr(ascii_num) in embedding_dict: embedding_matrix[ascii_num + 1] = embedding_dict[chr(ascii_num)] self._model.add(tf.keras.layers.Embedding( max_char_encoding_id + 2, self._parameters['dim_embed'], weights=[embedding_matrix], input_length=input_shape[0], trainable=True)) # Add the convolutional layers for fil in self._parameters['num_fil']: self._model.add(tf.keras.layers.Conv1D( filters=fil, kernel_size=self._parameters['size_conv'], activation='relu', padding='same')) if self._parameters['dropout']: self._model.add( tf.keras.layers.Dropout(self._parameters['dropout'])) # Add batch normalization, set fused = True for compactness self._model.add( tf.keras.layers.BatchNormalization(fused=False, scale=True)) # Add the fully connected layers for size in self._parameters['size_fc']: self._model.add( tf.keras.layers.Dense(units=size, activation='relu')) if self._parameters['dropout']: self._model.add( tf.keras.layers.Dropout(self._parameters['dropout'])) # Add the final Softmax layer self._model.add( tf.keras.layers.Dense(num_labels, activation='softmax')) # Output the model into a .pb file for TensorFlow argmax_layer = tf.keras.backend.argmax(self._model.output) # Create confidence layers final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer( num_labels, threshold=0.0, default_ind=default_ind) argmax_outputs = self._model.outputs + \ [argmax_layer, final_predicted_layer(argmax_layer, self._model.output)] self._model = tf.keras.Model(self._model.inputs, argmax_outputs) # Compile the model softmax_output_layer_name = self._model.outputs[0].name.split('/')[0] losses = {softmax_output_layer_name: "categorical_crossentropy"} # use f1 score metric f1_score_training = F1Score(num_classes=num_labels, average='micro') metrics = {softmax_output_layer_name: ['acc', f1_score_training]} self._model.compile(loss=losses, optimizer="adam", metrics=metrics) self._epoch_id = 0 self._model_num_labels = num_labels self._model_default_ind = default_ind def reset_weights(self): """ Reset the weights of the model. :return: None """ self._construct_model() def _reconstruct_model(self): """ Reconstruct the appropriate layers if the number of number of labels is altered :return: None """ # Reset model tf.keras.backend.clear_session() num_labels = self.num_labels default_ind = self.label_mapping[self._parameters['default_label']] # Remove the 3 output layers (dense_2', 'tf_op_layer_ArgMax', # 'thresh_arg_max_layer') for _ in range(3): self._model.layers.pop() # Add the final Softmax layer to the previous spot final_softmax_layer = tf.keras.layers.Dense( num_labels, activation='softmax', name="dense_2")( self._model.layers[-4].output) # Output the model into a .pb file for TensorFlow argmax_layer = tf.keras.backend.argmax(final_softmax_layer) # Create confidence layers final_predicted_layer = CharacterLevelCnnModel._argmax_threshold_layer( num_labels, threshold=0.0, default_ind=default_ind) argmax_outputs = [final_softmax_layer] + \ [argmax_layer, final_predicted_layer(argmax_layer, final_softmax_layer)] self._model = tf.keras.Model(self._model.inputs, argmax_outputs) # Compile the model softmax_output_layer_name = self._model.outputs[0].name.split('/')[0] losses = {softmax_output_layer_name: "categorical_crossentropy"} # use f1 score metric f1_score_training = F1Score(num_classes=num_labels, average='micro') metrics = {softmax_output_layer_name: ['acc', f1_score_training]} self._model.compile(loss=losses, optimizer="adam", metrics=metrics) self._epoch_id = 0 self._model_num_labels = num_labels self._model_default_ind = default_ind def fit(self, train_data, val_data=None, batch_size=32, label_mapping=None, reset_weights=False, verbose=True): """ Train the current model with the training data and validation data :param train_data: Training data used to train model :type train_data: Union[list, np.ndarray] :param val_data: Validation data used to validate the training :type val_data: Union[list, np.ndarray] :param batch_size: Used to determine number of samples in each batch :type batch_size: int :param label_mapping: maps labels to their encoded integers :type label_mapping: Union[dict, None] :param reset_weights: Flag to determine whether to reset the weights or not :type reset_weights: bool :param verbose: Flag to determine whether to print status or not :type verbose: bool :return: None """ if label_mapping is not None: self.set_label_mapping(label_mapping) if not self._model: self._construct_model() else: if self._need_to_reconstruct_model(): self._reconstruct_model() if reset_weights: self.reset_weights() history = defaultdict() f1 = None f1_report = [] self._model.reset_metrics() softmax_output_layer_name = self._model.outputs[0].name.split('/')[0] start_time = time.time() batch_id = 0 for x_train, y_train in train_data: model_results = self._model.train_on_batch( x_train, {softmax_output_layer_name: y_train}) sys.stdout.flush() if verbose: sys.stdout.write( "\rEPOCH %d, batch_id %d: loss: %f - acc: %f - " "f1_score %f" % (self._epoch_id, batch_id, *model_results[1:])) batch_id += 1 for i, metric_label in enumerate(self._model.metrics_names): history[metric_label] = model_results[i] if val_data: f1, f1_report = self._validate_training(val_data) history['f1_report'] = f1_report val_f1 = f1_report['weighted avg']['f1-score'] \ if f1_report else np.NAN val_precision = f1_report['weighted avg']['precision'] \ if f1_report else np.NAN val_recall = f1_report['weighted avg']['recall'] \ if f1_report else np.NAN epoch_time = time.time() - start_time logger.info("\rEPOCH %d (%ds), loss: %f - acc: %f - f1_score %f -- " "val_f1: %f - val_precision: %f - val_recall %f" % (self._epoch_id, epoch_time, *model_results[1:], val_f1, val_precision, val_recall)) self._epoch_id += 1 return history, f1, f1_report def _validate_training(self, val_data, batch_size_test=32, verbose_log=True, verbose_keras=False): """ Validate the model on the test set and return the evaluation metrics. :param val_data: data generator for the validation :type val_data: iterator :param batch_size_test: Number of samples to process in testing :type batch_size_test: int :param verbose_log: whether or not to print out scores for training, etc. :type verbose_log: bool :param verbose_keras: whether or not to print out scores for training, from keras. :type verbose_keras: bool return (f1-score, f1 report). """ f1 = None f1_report = None if val_data is None: return f1, f1_report # Predict on the test set batch_id = 0 y_val_pred = [] y_val_test = [] for x_val, y_val in val_data: y_val_pred.append(self._model.predict( x_val, batch_size=batch_size_test, verbose=verbose_keras)[1]) y_val_test.append(np.argmax(y_val, axis=-1)) batch_id += 1 sys.stdout.flush() if verbose_log: sys.stdout.write("\rEPOCH %g, validation_batch_id %d" % (self._epoch_id, batch_id)) tf.keras.backend.set_floatx('float32') # Clean the predicted entities and the actual entities f1, f1_report = labeler_utils.evaluate_accuracy( np.concatenate(y_val_pred, axis=0), np.concatenate(y_val_test, axis=0), self.num_labels, self.reverse_label_mapping, verbose=verbose_keras) return f1, f1_report def predict(self, data, batch_size=32, show_confidences=False, verbose=True): """ Run model and get predictions :param data: text input :type data: Union[list, numpy.ndarray] :param batch_size: number of samples in the batch of data :type batch_size: int :param show_confidences: whether user wants prediction confidences :type show_confidences: :param verbose: Flag to determine whether to print status or not :type verbose: bool :return: char level predictions and confidences :rtype: dict """ if not self._model: raise ValueError("You are trying to predict without a model. " "Construct/Load a model before predicting.") elif self._need_to_reconstruct_model(): raise RuntimeError("The model label mapping definitions have been " "altered without additional training. Please " "train the model or reset the label mapping to " "predict.") # Pre-allocate space for predictions confidences = [] sentence_lengths = np.zeros((batch_size,), dtype=int) predictions = np.zeros((batch_size, self._parameters['max_length'])) if show_confidences: confidences = np.zeros((batch_size, self._parameters['max_length'], self.num_labels)) # Run model with batching allocation_index = 0 for batch_id, batch_data in enumerate(data): model_output = self._model( tf.convert_to_tensor(batch_data) ) # Count number of samples in batch to prevent array mismatch num_samples_in_batch = len(batch_data) allocation_index = batch_id * batch_size # Double array size if len(predictions) <= allocation_index: predictions = np.pad(predictions, ((0, len(predictions)), (0, 0)), mode='constant') sentence_lengths = np.pad( sentence_lengths, pad_width=((0, len(sentence_lengths)),), mode='constant') if show_confidences: confidences = np.pad(confidences, ((0, len(predictions)), (0, 0), (0, 0)), mode='constant') if show_confidences: confidences[allocation_index:allocation_index + num_samples_in_batch] = model_output[0].numpy() predictions[allocation_index:allocation_index + num_samples_in_batch] = model_output[1].numpy() sentence_lengths[allocation_index:allocation_index + num_samples_in_batch] = list(map(lambda x: len(x[0]), batch_data)) allocation_index += num_samples_in_batch # Convert predictions, confidences to lists from numpy predictions_list = [i for i in range(0, allocation_index)] confidences_list = None if show_confidences: confidences_list = [i for i in range(0, allocation_index)] # Append slices of predictions to return prediction & confidence matrices for index, sentence_length \ in enumerate(sentence_lengths[:allocation_index]): predictions_list[index] = list(predictions[index][:sentence_length]) if show_confidences: confidences_list[index] = list(confidences[index][:sentence_length]) if show_confidences: return {'pred': predictions_list, 'conf': confidences_list} return {'pred': predictions_list} def details(self): """ Prints the relevant details of the model (summary, parameters, label mapping) """ print("\n###### Model Details ######\n") self._model.summary() print("\nModel Parameters:") for key, value in self._parameters.items(): print("{}: {}".format(key, value)) print("\nModel Label Mapping:") for key, value in self.label_mapping.items(): print("{}: {}".format(key, value))
[ 11748, 4866, 198, 11748, 33918, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 640, 198, 6738, 17268, 1330, 4277, 11600, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 6738, ...
2.129375
11,857
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime import json from flask import Blueprint, request from flask_admin import BaseView, expose import pandas as pd from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook from airflow.hooks.mysql_hook import MySqlHook from airflow.hooks.presto_hook import PrestoHook from airflow.plugins_manager import AirflowPlugin from airflow.www import utils as wwwutils from airflow.www.decorators import gzipped METASTORE_CONN_ID = 'metastore_default' METASTORE_MYSQL_CONN_ID = 'metastore_mysql' PRESTO_CONN_ID = 'presto_default' HIVE_CLI_CONN_ID = 'hive_default' DEFAULT_DB = 'default' DB_WHITELIST = None DB_BLACKLIST = ['tmp'] TABLE_SELECTOR_LIMIT = 2000 # Keeping pandas from truncating long strings pd.set_option('display.max_colwidth', -1) # Creating a flask admin BaseView v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser") # Creating a flask blueprint to intergrate the templates and static folder bp = Blueprint( "metastore_browser", __name__, template_folder='templates', static_folder='static', static_url_path='/static/metastore_browser') # Defining the plugin class
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 9387, 351,...
3.298157
597
#!/usr/bin/env python3 # SPDX-FileCopyrightText: 2021 Aaron Dewes <aaron.dewes@protonmail.com> # # SPDX-License-Identifier: MIT import stat import tempfile import threading from typing import List from sys import argv import os import requests import shutil import json import yaml import subprocess from lib.composegenerator.v0.generate import createComposeConfigFromV0 from lib.composegenerator.v1.generate import createComposeConfigFromV1 from lib.appymlgenerator import convertComposeYMLToAppYML from lib.validate import findAndValidateApps from lib.metadata import getAppRegistry, getSimpleAppRegistry from lib.entropy import deriveEntropy # For an array of threads, join them and wait for them to finish # The directory with this script scriptDir = os.path.dirname(os.path.realpath(__file__)) nodeRoot = os.path.join(scriptDir, "..", "..") appsDir = os.path.join(nodeRoot, "apps") appSystemDir = os.path.join(nodeRoot, "app-system") sourcesList = os.path.join(appSystemDir, "sources.list") appDataDir = os.path.join(nodeRoot, "app-data") userFile = os.path.join(nodeRoot, "db", "user.json") legacyScript = os.path.join(nodeRoot, "scripts", "app") # Returns a list of every argument after the second one in sys.argv joined into a string by spaces # Loads an app.yml and converts it to a docker-compose.yml # Parse the sources.list repo file, which contains a list of sources in the format # <git-url> <branch> # For every line, clone the repo to a temporary dir and checkout the branch # Then, check that repos apps in the temporary dir/apps and for every app, # overwrite the current app dir with the contents of the temporary dir/apps/app # Also, keep a list of apps from every repo, a repo later in the file may not overwrite an app from a repo earlier in the file
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 2, 30628, 55, 12, 8979, 15269, 8206, 25, 33448, 12139, 25075, 274, 1279, 64, 8045, 13, 67, 413, 274, 31, 1676, 1122, 4529, 13, 785, 29, 198, 2, 198, 2, 30628, 55, 12, 34...
3.270417
551
from query.base import BaseQuery
[ 6738, 12405, 13, 8692, 1330, 7308, 20746, 628, 628, 628, 628, 198 ]
3.416667
12
from kim import Mapper, field from example.models import Planet, Character
[ 6738, 479, 320, 1330, 337, 11463, 11, 2214, 198, 198, 6738, 1672, 13, 27530, 1330, 11397, 11, 15684, 628, 198 ]
3.9
20
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com> # Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com> # Based on func.py # (c) 2014, Michael Scherer <misc@zarb.org> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = ''' author: Michael Scherer (@mscherer) <misc@zarb.org> name: saltstack short_description: Allow ansible to piggyback on salt minions description: - This allows you to use existing Saltstack infrastructure to connect to targets. ''' import os import base64 from ansible import errors from ansible.plugins.connection import ConnectionBase HAVE_SALTSTACK = False try: import salt.client as sc HAVE_SALTSTACK = True except ImportError: pass
[ 2, 13403, 319, 1957, 13, 9078, 357, 66, 8, 2321, 11, 3899, 1024, 23303, 272, 1279, 76, 40302, 13, 2934, 3099, 272, 31, 14816, 13, 785, 29, 198, 2, 13403, 319, 442, 15763, 13, 9078, 357, 66, 8, 2211, 11, 1737, 7750, 337, 23790, 1...
2.894569
313
from django.shortcuts import render_to_response from django.http import HttpResponseRedirect from django.template import RequestContext from django.utils.translation import ugettext_lazy as _ from servers.models import Compute from create.models import Flavor from instance.models import Instance from libvirt import libvirtError from vrtManager.create import wvmCreate from vrtManager import util from create.forms import FlavorAddForm, NewVMForm def create(request, host_id): """ Create new instance. """ if not request.user.is_authenticated(): return HttpResponseRedirect('/login') errors = [] compute = Compute.objects.get(id=host_id) flavors = Flavor.objects.filter().order_by('id') try: conn = wvmCreate(compute.hostname, compute.login, compute.password, compute.type) storages = sorted(conn.get_storages()) networks = sorted(conn.get_networks()) instances = conn.get_instances() get_images = sorted(conn.get_storages_images()) mac_auto = util.randomMAC() except libvirtError as err: errors.append(err.message) if not storages: msg = _("You haven't defined have any storage pools") errors.append(msg) if not networks: msg = _("You haven't defined have any network pools") errors.append(msg) if request.method == 'POST': if 'create_flavor' in request.POST: form = FlavorAddForm(request.POST) if form.is_valid(): data = form.cleaned_data create_flavor = Flavor(label=data['label'], vcpu=data['vcpu'], memory=data['memory'], disk=data['disk']) create_flavor.save() return HttpResponseRedirect(request.get_full_path()) if 'delete_flavor' in request.POST: flavor_id = request.POST.get('flavor', '') delete_flavor = Flavor.objects.get(id=flavor_id) delete_flavor.delete() return HttpResponseRedirect(request.get_full_path()) if 'create' in request.POST: volumes = {} form = NewVMForm(request.POST) if form.is_valid(): data = form.cleaned_data if instances: if data['name'] in instances: msg = _("A virtual machine with this name already exists") errors.append(msg) if not errors: if data['hdd_size']: if not data['mac']: msg = _("No Virtual Machine MAC has been entered") errors.append(msg) else: try: path = conn.create_volume(data['storage'], data['name'], data['hdd_size']) volumes[path] = conn.get_volume_type(path) except libvirtError as msg_error: errors.append(msg_error.message) elif data['template']: templ_path = conn.get_volume_path(data['template']) clone_path = conn.clone_from_template(data['name'], templ_path) volumes[clone_path] = conn.get_volume_type(clone_path) else: if not data['images']: msg = _("First you need to create or select an image") errors.append(msg) else: for vol in data['images'].split(','): try: path = conn.get_volume_path(vol) volumes[path] = conn.get_volume_type(path) except libvirtError as msg_error: errors.append(msg_error.message) if not errors: uuid = util.randomUUID() try: conn.create_instance(data['name'], data['memory'], data['vcpu'], data['host_model'], uuid, volumes, data['networks'], data['virtio'], data['mac']) create_instance = Instance(compute_id=host_id, name=data['name'], uuid=uuid) create_instance.save() return HttpResponseRedirect('/instance/%s/%s/' % (host_id, data['name'])) except libvirtError as msg_error: if data['hdd_size']: conn.delete_volume(volumes.keys()[0]) errors.append(msg_error.message) conn.close() return render_to_response('create.html', locals(), context_instance=RequestContext(request))
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 62, 1462, 62, 26209, 198, 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 31077, 7738, 1060, 198, 6738, 42625, 14208, 13, 28243, 1330, 19390, 21947, 198, 6738, 42625, 14208, 13, 26791, 13,...
1.867501
2,717
import torch __author__ = 'Andres'
[ 11748, 28034, 198, 198, 834, 9800, 834, 796, 705, 1870, 411, 6 ]
2.916667
12
import a_file
[ 11748, 257, 62, 7753, 198 ]
2.8
5
import streamlit as st from ui.session_state import SessionState, get_state from infer import ModelStage
[ 11748, 4269, 18250, 355, 336, 198, 6738, 334, 72, 13, 29891, 62, 5219, 1330, 23575, 9012, 11, 651, 62, 5219, 198, 6738, 13249, 1330, 9104, 29391, 628 ]
3.925926
27
''' MIT License Copyright (c) 2020 Rashid Lafraie Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import os import ctypes import numpy as np from .TrainDataLoader import TrainDataLoader
[ 7061, 6, 198, 36393, 13789, 198, 198, 15269, 357, 66, 8, 12131, 27620, 312, 31528, 430, 494, 198, 198, 5990, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 198, 1659, 428, 3788, 290, 3917, 10314, ...
3.820261
306
import os import sys import unittest import torch import torch._C from pathlib import Path from test_nnapi import TestNNAPI from torch.testing._internal.common_utils import TEST_WITH_ASAN # Make the helper files in test/ importable pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.append(pytorch_test_dir) if __name__ == "__main__": raise RuntimeError( "This test file is not meant to be run directly, use:\n\n" "\tpython test/test_jit.py TESTNAME\n\n" "instead." ) """ Unit Tests for Nnapi backend with delegate Inherits most tests from TestNNAPI, which loads Android NNAPI models without the delegate API. """ # First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests. # Second skip is because ASAN is currently causing an error. # It is still unclear how to resolve this. T95764916 torch_root = Path(__file__).resolve().parent.parent.parent lib_path = torch_root / 'build' / 'lib' / 'libnnapi_backend.so'
[ 11748, 28686, 198, 11748, 25064, 198, 11748, 555, 715, 395, 198, 198, 11748, 28034, 198, 11748, 28034, 13557, 34, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 1332, 62, 20471, 15042, 1330, 6208, 6144, 17614, 198, 6738, 28034, 13, 33407,...
2.932153
339
train_data_path = "../data/no_cycle/train.data" dev_data_path = "../data/no_cycle/dev.data" test_data_path = "../data/no_cycle/test.data" word_idx_file_path = "../data/word.idx" word_embedding_dim = 100 train_batch_size = 32 dev_batch_size = 500 test_batch_size = 500 l2_lambda = 0.000001 learning_rate = 0.001 epochs = 100 encoder_hidden_dim = 200 num_layers_decode = 1 word_size_max = 1 dropout = 0.0 path_embed_method = "lstm" # cnn or lstm or bi-lstm unknown_word = "<unk>" PAD = "<PAD>" GO = "<GO>" EOS = "<EOS>" deal_unknown_words = True seq_max_len = 11 decoder_type = "greedy" # greedy, beam beam_width = 4 attention = True num_layers = 1 # 1 or 2 # the following are for the graph encoding method weight_decay = 0.0000 sample_size_per_layer = 4 sample_layer_size = 4 hidden_layer_dim = 100 feature_max_len = 1 feature_encode_type = "uni" # graph_encode_method = "max-pooling" # "lstm" or "max-pooling" graph_encode_direction = "bi" # "single" or "bi" concat = True encoder = "gated_gcn" # "gated_gcn" "gcn" "seq" lstm_in_gcn = "none" # before, after, none
[ 27432, 62, 7890, 62, 6978, 796, 366, 40720, 7890, 14, 3919, 62, 13696, 14, 27432, 13, 7890, 1, 198, 7959, 62, 7890, 62, 6978, 796, 366, 40720, 7890, 14, 3919, 62, 13696, 14, 7959, 13, 7890, 1, 198, 9288, 62, 7890, 62, 6978, 796, ...
2.40625
448
from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox from InftyDoubleSpinBox import InftyDoubleSpinBox from PyQt5.QtCore import pyqtSignal, Qt import helplib as hl import numpy as np
[ 6738, 9485, 48, 83, 20, 13, 48, 83, 54, 312, 11407, 1330, 1195, 33986, 11, 1195, 38300, 11, 1195, 41339, 32517, 11, 1195, 9787, 14253, 11, 1195, 13247, 14253, 198, 6738, 554, 19628, 25628, 4561, 259, 14253, 1330, 554, 19628, 25628, 45...
2.75
76
import roslib roslib.load_manifest('sensor_msgs') roslib.load_manifest('dynamic_reconfigure') import rospy import sensor_msgs.msg import dynamic_reconfigure.srv import dynamic_reconfigure.encoding import numpy as np import time import os.path import queue def clear_queue(self): q = self.im_queue while 1: try: q.get_nowait() except queue.Empty: break class SimultaneousCameraRunner(_Runner): class SequentialCameraRunner(_Runner):
[ 11748, 686, 6649, 571, 198, 4951, 8019, 13, 2220, 62, 805, 8409, 10786, 82, 22854, 62, 907, 14542, 11537, 198, 4951, 8019, 13, 2220, 62, 805, 8409, 10786, 67, 28995, 62, 260, 11250, 495, 11537, 198, 198, 11748, 686, 2777, 88, 198, 1...
2.368664
217
import argparse import logging import numpy as np import os import pandas as pd import random import subprocess from pathlib import Path from hyperopt import hp from hyperopt.pyll.stochastic import sample from hfta.hfht import (tune_hyperparameters, attach_common_args, rearrange_algorithm_kwargs, handle_integers, generate_fusible_param_flags, generate_nonfusible_param) from hfta.workflow import extract_logging_level from hfta.hfht.utils import fuse_dicts if __name__ == '__main__': args = attach_args().parse_args() rearrange_algorithm_kwargs(args) logging.basicConfig(level=extract_logging_level(args)) args.outdir = os.path.abspath(os.path.expanduser(args.outdir)) args.dataset = os.path.abspath(os.path.expanduser(args.dataset)) main(args)
[ 11748, 1822, 29572, 198, 11748, 18931, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 4738, 198, 11748, 850, 14681, 198, 6738, 3108, 8019, 1330, 10644, 198, 198, 6738, 8718, 8738, ...
2.599359
312
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2018-03-06 04:00 from __future__ import unicode_literals from django.db import migrations, models
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 13, 19, 319, 2864, 12, 3070, 12, 3312, 8702, 25, 405, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, ...
2.736842
57
# # Copyright(c) 2020 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause-Clear # from core.test_run_utils import TestRun from utils.installer import install_iotrace, check_if_installed from utils.iotrace import IotracePlugin from utils.misc import kill_all_io from test_tools.fio.fio import Fio
[ 2, 198, 2, 15069, 7, 66, 8, 12131, 8180, 10501, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 347, 10305, 12, 18, 12, 2601, 682, 12, 19856, 198, 2, 198, 198, 6738, 4755, 13, 9288, 62, 5143, 62, 26791, 1330, 6208, 10987, 198...
2.990196
102
from models import Song from random import choice
[ 6738, 4981, 1330, 10940, 198, 6738, 4738, 1330, 3572, 198 ]
5
10
#!/usr/bin/python # custom_dialect.py import csv csv.register_dialect("hashes", delimiter="#") f = open('items3.csv', 'w') with f: writer = csv.writer(f, dialect="hashes") writer.writerow(("pencils", 2)) writer.writerow(("plates", 1)) writer.writerow(("books", 4))
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 2, 2183, 62, 38969, 478, 13, 9078, 198, 198, 11748, 269, 21370, 198, 198, 40664, 13, 30238, 62, 38969, 478, 7203, 71, 7465, 1600, 46728, 2676, 25698, 4943, 198, 198, 69, 796, 1280, 107...
2.4
120
from typing import Optional from flask_wtf import FlaskForm from wtforms import StringField, SelectField, SubmitField from wtforms.validators import DataRequired, Length, Email from servicex.models import UserModel
[ 6738, 19720, 1330, 32233, 198, 198, 6738, 42903, 62, 86, 27110, 1330, 46947, 8479, 198, 6738, 266, 83, 23914, 1330, 10903, 15878, 11, 9683, 15878, 11, 39900, 15878, 198, 6738, 266, 83, 23914, 13, 12102, 2024, 1330, 6060, 37374, 11, 2231...
4.037037
54
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, division import os, sys import tensorflow as tf import tf_slim as slim from tensorflow.python.tools import freeze_graph sys.path.append('../../') from data.io.image_preprocess import short_side_resize_for_inference_data from libs.configs import cfgs from libs.networks import build_whole_network CKPT_PATH = '/home/yjr/PycharmProjects/Faster-RCNN_Tensorflow/output/trained_weights/FasterRCNN_20180517/voc_200000model.ckpt' OUT_DIR = '../../output/Pbs' PB_NAME = 'FasterRCNN_Res101_Pascal.pb' if __name__ == '__main__': os.environ["CUDA_VISIBLE_DEVICES"] = '' export_frozenPB()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 3601, 62, 8818, 11, 7297, 198, 198, 11748, 28686, 11, 25064, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 117...
2.666667
252
""" Adapter Removal templates """ # AdapterRemoval # # {0}: executable # {1}: fastq1 abs # {2}: fastq2 abs # {3}: fastq1 # {4}: fastq2 # {5}: minimum length # {6}: mismatch_rate # {7}: min base uality # {8}: min merge_length __ADAPTER_REMOVAL__=""" {0} --collapse --file1 {1} --file2 {2} --outputstats {3}.stats --trimns --outputcollapsed {3}.collapsed --minlength {5} --output1 {3}.p1 --output2 {4}.p2 --mm {6} --minquality {7} --minalignmentlength {8} --trimqualities """ import os from ngadnap.dependency_graph.graph import CommandNode
[ 37811, 198, 220, 220, 220, 43721, 46209, 24019, 198, 37811, 198, 198, 2, 43721, 8413, 8325, 220, 198, 2, 198, 2, 1391, 15, 38362, 28883, 198, 2, 1391, 16, 38362, 3049, 80, 16, 2352, 198, 2, 1391, 17, 38362, 3049, 80, 17, 2352, 198...
2.527273
220
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from heat.engine.resources.openstack.neutron import net from heat.engine.resources.openstack.neutron import port from heat.engine.resources.openstack.neutron import subnet def resource_mapping(): return { 'OS::Neutron::Net': ImmutableNet, 'OS::Neutron::Port': ImmutablePort, 'OS::Neutron::Subnet': ImmutableSubnet, }
[ 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 921, 743, 7330, 198, 2, ...
3.045161
310
from os.path import join, abspath, dirname, exists from slybot import __version__ from setuptools import setup, find_packages from setuptools.command.bdist_egg import bdist_egg from setuptools.command.sdist import sdist install_requires = ['Scrapy', 'scrapely', 'loginform', 'lxml', 'jsonschema', 'dateparser', 'scrapyjs', 'page_finder', 'six'] extras = { 'tests': ['nose', 'nose-timer'], 'clustering': ['page_clustering'] } setup(name='slybot', version=__version__, license='BSD', description='Slybot crawler', author='Scrapy project', author_email='info@scrapy.org', url='http://github.com/scrapinghub/portia', packages=find_packages(exclude=('tests', 'tests.*')), platforms=['Any'], scripts=['bin/slybot', 'bin/portiacrawl'], install_requires=install_requires, extras_require=extras, package_data={'': ['slybot/splash-script-combined.js']}, include_package_data=True, cmdclass={ 'bdist_egg': bdist_egg_command, 'sdist': sdist_command }, classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7' ])
[ 6738, 28686, 13, 6978, 1330, 4654, 11, 2352, 6978, 11, 26672, 3672, 11, 7160, 198, 6738, 49822, 13645, 1330, 11593, 9641, 834, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 198, 6738, 900, 37623, 10141, 13, 21812, 13, ...
2.382003
589
import torch import torch.nn as nn from .yolo_layer import * from .yolov3_base import * ################################################################### ## Backbone and helper modules
[ 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 198, 6738, 764, 88, 14057, 62, 29289, 1330, 1635, 198, 6738, 764, 88, 349, 709, 18, 62, 8692, 1330, 1635, 628, 198, 29113, 29113, 21017, 198, 2235, 5157, 15992, 290, 31904...
4.288889
45
import os import pytest from modelkit.assets import errors from tests.conftest import skip_unless
[ 11748, 28686, 198, 198, 11748, 12972, 9288, 198, 198, 6738, 2746, 15813, 13, 19668, 1330, 8563, 198, 6738, 5254, 13, 1102, 701, 395, 1330, 14267, 62, 25252, 628, 628, 198 ]
3.466667
30
from django.test import TestCase from django.contrib.auth.models import User from wiki.models import Page # Create your tests here. def test_detail_page(self): """ Test to see if slug generated when saving a Page.""" # Create a user and save to the database user = User.objects.create() user.save() # Create a page and save to the database page = Page(title="My Detail Test Page", content="details_test", author=user) page.save() # Slug is generated matches with what we expect slug = page.slug response = self.client.get(f'/{slug}/') self.assertEqual(response.status_code, 200) info = self.client.get('/') self.assertContains(info, 'makewiki', html=True) def test_edit_page(self): """Test edit page.""" # Test data that will be displayed on the screen user = User.objects.create() user.save() page = Page.objects.create(title="My Test Page", content="edit_test", author=user) page.save() # Make a GET request to the MakeWiki homepage that will get a response back post_data = { 'title': 'Who', 'content': 'Are you?', 'author': user.id, } response = self.client.post('/form/', data=post_data) # Check if response is 200 self.assertEqual(response.status_code, 200) # Check the number of pages passed to the template matches the number of pages in the database end = self.client.get('/') result = end.context['pages'] self.assertQuerysetEqual(result, ['<Page: My Test Page>', '<Page: Test>'], ordered=False)
[ 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 6738, 22719, 13, 27530, 1330, 7873, 628, 198, 2, 13610, 534, 5254, 994, 13, 198, 4299, 1332, 62, 49170, 6...
2.835443
553
from .multiarmedbandit import MultiArmedBandit from .eps_greedy_constant_stepsize import EpsilonGreedyConstantStepsize from .greedy_constant_stepsize import GreedyConstantStepsize from .epsilon_greedy_average_step import EpsilonGreedyAverageStep from .greedy_average_step import GreedyAverageStep from .greedy_bayes_update import GreedyBayesianUpdate from .eps_greedy_bayes_update import EpsilonGreedyBayesianUpdate
[ 6738, 764, 41684, 12026, 3903, 270, 1330, 15237, 3163, 1150, 31407, 270, 198, 198, 6738, 764, 25386, 62, 16694, 4716, 62, 9979, 415, 62, 9662, 7857, 1330, 43427, 33576, 43887, 4716, 3103, 18797, 8600, 7857, 198, 6738, 764, 16694, 4716, ...
3.352
125
from datetime import datetime from django.core.exceptions import FieldError from django.db.models import CharField, F, Q from django.db.models.expressions import SimpleCol from django.db.models.fields.related_lookups import RelatedIsNull from django.db.models.functions import Lower from django.db.models.lookups import Exact, GreaterThan, IsNull, LessThan from django.db.models.sql.query import Query from django.db.models.sql.where import OR from django.test import TestCase from django.test.utils import register_lookup from .models import Author, Item, ObjectC, Ranking
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 7663, 12331, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, 1330, 3178, 15878, 11, 376, 11, 1195, 198, 6738, 42625, 14208, 13, 9945, 13, ...
3.394118
170
# This notebook implements a proof-of-principle for # Multi-Agent Common Knowledge Reinforcement Learning (MACKRL) # The entire notebook can be executed online, no need to download anything # http://pytorch.org/ from itertools import chain import torch import torch.nn.functional as F from torch.multiprocessing import Pool, set_start_method, freeze_support try: set_start_method('spawn') except RuntimeError: pass from torch.nn import init from torch.optim import Adam, SGD import numpy as np import matplotlib.pyplot as plt use_cuda = False payoff_values = [] payoff_values.append(torch.tensor([ # payoff values [5, 0, 0, 2, 0], [0, 1, 2, 4, 2], [0, 0, 0, 2, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 0], ], dtype=torch.float32) * 0.2) payoff_values.append( torch.tensor([ # payoff values [0, 0, 1, 0, 5], [0, 0, 2, 0, 0], [1, 2, 4, 2, 1], [0, 0, 2, 0, 0], [0, 0, 1, 0, 0], ], dtype=torch.float32) * 0.2) n_agents = 2 n_actions = len(payoff_values[0]) n_states_dec = 5 n_states_joint = 3 n_mix_hidden = 3 p_observation = 0.5 p_ck_noise = [0.0] # Number of gradient steps t_max = 202 # We'll be using a high learning rate, since we have exact gradients lr = 0.05 # DEBUG: 0.05 if exact gradients! optim = 'adam' # You can reduce this number if you are short on time. (Eg. n_trials = 20) #n_trials = 100 # 30 n_trials = 20 #15 #100 std_val = 1.0 # These are the 3 settings we run: MACRKL, Joint-action-learner (always uses CK), # Independent Actor-Critic (always uses decentralised actions selection) labels = ["IAC", "JAL"] p_vec = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0] final_res = [] # # Pair-Controller with 3 input state (no CK, CK & Matrix ID = 0, CK & Matrix ID = 1), n_actions^2 actions for # # joint action + 1 action for delegation to the independent agents. # theta_joint = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True), std=0.1) # Produce marginalised policy: pi_pc[0] * pi^a * pi^b + p(u^ab) # Calculate the expected return: sum_{\tau} P(\tau | pi) R(\tau) if __name__ == "__main__": freeze_support() main()
[ 2, 770, 20922, 23986, 257, 6617, 12, 1659, 12, 1050, 1939, 2480, 329, 198, 2, 15237, 12, 36772, 8070, 20414, 22299, 13442, 18252, 357, 44, 8120, 7836, 8, 198, 2, 383, 2104, 20922, 460, 307, 10945, 2691, 11, 645, 761, 284, 4321, 1997...
2.473502
868
import sys import os import glob import json from robot import rebot from robot.api import TestSuite sys.path.append(os.path.join(os.path.dirname(__file__), '..')) if __name__ == "__main__": main_suite = TestSuite('School Bus Scenario') main_suite.resource.imports.library('lib/simulation.py') testcase_paths = glob.glob('data/testdata/04_school_bus/*.json') testcase_paths.sort() for testcase_path in testcase_paths[110:113]: with open(testcase_path) as f: testdata = json.load(f) tags = list(testdata['testcase']['context'].values()) +\ list(testdata['testcase']['input'].values()) school_bus_test = main_suite.tests.create(testdata['testcase']['name'], tags=tags) school_bus_test.setup.config(name='Setup Scenario', args=[testcase_path]) school_bus_test.body.create_keyword('Start Simulation') school_bus_test.body.create_keyword('Validate Result') school_bus_test.teardown.config(name='Test Case Teardown') main_suite.run(output='results/04_school_bus/output.xml') rebot('results/04_school_bus/output.xml', log="results/04_school_bus/log.html", report="results/04_school_bus/report.html") """ rebot --tagstatcombine "8:00AMANDSunny:8AM and Sunny(C1)" --tagstatcombine "8:00AMANDCloudy:8AM and Cloudy(C2)" --tagstatcombine "8:00AMANDRainning:8AM and Rainning(C3)" --tagstatcombine "8:00AMANDFoggy:8AM and Foggy(C4)" --tagstatcombine "12:00PMANDSunny:12PM and Sunny(C5)" --tagstatcombine "12:00PMANDCloudy:12PM and Cloudy(C6)" --tagstatcombine "12:00PMANDRainning:12PM and Rainning(C7)" --tagstatcombine "12:00PMANDFoggy:12PM and Foggy(C8)" --tagstatcombine "3:00PMANDSunny:3PM and Sunny(C9)" --tagstatcombine "3:00PMANDCloudy:3PM and Cloudy(C10)" --tagstatcombine "3:00PMANDRainning:3PM and Rainning(C11)" --tagstatcombine "3:00PMANDFoggy:3PM and Foggy(C12)" --tagstatcombine "5:00PMANDSunny:5PM and Sunny(C13)" --tagstatcombine "5:00PMANDCloudy:5PM and Cloudy(C14)" --tagstatcombine "5:00PMANDRainning:5PM and Ranining(C15)" --tagstatcombine "5:00PMANDFoggy:5PM and Foggy(C16)" --tagstatcombine "7:00PMANDSunny:7PM and Sunny(C17)" --tagstatcombine "7:00PMANDCloudy:7PM and Cloudy(C18)" --tagstatcombine "7:00PMANDRainning:7PM and Rainning(C19)" --tagstatcombine "7:00PMANDFoggy:7PM and Foggy(C20)" --tagstatcombine MovingANDBackward_lane:Moving\ and\ Backward\ lane\(I12\) --tagstatcombine MovingANDForward_lane:Moving\ and\ Forward\ lane\(I9\) --tagstatcombine LoadingANDBackward_lane:Loading\ and\ Backward\ lane\(I6\) --tagstatcombine LoadingANDForward_lane:Loading\ and\ Forward\ lane\(I3\) --tagstatcombine StopANDBackward_lane:Stop\ and\ Backward\ lane\(I18\) --tagstatcombine StopANDForward_lane:Stop\ and\ Forward\ lane\(I15\) --tagstatexclude Forward_lane --tagstatexclude Backward_lane --tagstatexclude Moving --tagstatexclude Loading --tagstatexclude Stop --tagstatexclude 8\:00AM --tagstatexclude 12\:00PM --tagstatexclude 3\:00PM --tagstatexclude 5\:00PM --tagstatexclude 7\:00PM --tagstatexclude Sunny --tagstatexclude Foggy --tagstatexclude Rainning --tagstatexclude Cloudy -r combined_report.html -l combined_log.html output.xml """
[ 11748, 25064, 198, 11748, 28686, 198, 11748, 15095, 198, 11748, 33918, 198, 6738, 9379, 1330, 3405, 313, 198, 6738, 9379, 13, 15042, 1330, 6208, 5606, 578, 198, 17597, 13, 6978, 13, 33295, 7, 418, 13, 6978, 13, 22179, 7, 418, 13, 6978...
2.560193
1,246
# The purpose of this script is to check all the maintenance branches of the # given repository, and find which pull requests are included in which # branches. The output is a JSON file that contains for each pull request the # list of all branches in which it is included. We look specifically for the # message "Merge pull request #xxxx " in commit messages, so this is not # completely foolproof, but seems to work for now. import os import sys import json import re import subprocess import tempfile from collections import defaultdict from astropy.utils.console import color_print from common import get_branches if sys.argv[1:]: REPOSITORY_NAME = sys.argv[1] else: REPOSITORY_NAME = 'astropy/astropy' print("The repository this script currently works with is '{}'.\n" .format(REPOSITORY_NAME)) REPOSITORY = f'git://github.com/{REPOSITORY_NAME}.git' NAME = os.path.basename(REPOSITORY_NAME) DIRTOCLONEIN = tempfile.mkdtemp() # set this to a non-temp directory to retain the clone between runs ORIGIN = 'origin' # set this to None to not fetch anything but rather use the directory as-is. STARTDIR = os.path.abspath('.') # The branches we are interested in BRANCHES = get_branches(REPOSITORY_NAME) # Read in a list of all the PRs with open(f'merged_pull_requests_{NAME}.json') as merged: merged_prs = json.load(merged) # Set up a dictionary where each key will be a PR and each value will be a list # of branches in which the PR is present pr_branches = defaultdict(list) try: # Set up repository color_print(f'Cloning {REPOSITORY}', 'green') os.chdir(DIRTOCLONEIN) if os.path.isdir(NAME): # already exists... assume its the right thing color_print('"{}" directory already exists - assuming it is an already ' 'existing clone'.format(NAME), 'yellow') os.chdir(NAME) if ORIGIN: subprocess.call(f'git fetch {ORIGIN}', shell=True) else: subprocess.call(f'git clone {REPOSITORY}', shell=True) os.chdir(NAME) # Loop over branches and find all PRs in the branch for branch in BRANCHES: # Change branch color_print(f'Switching to branch {branch}', 'green') subprocess.call('git reset --hard', shell=True) subprocess.call('git clean -fxd', shell=True) subprocess.call(f'git checkout {branch}', shell=True) if ORIGIN: subprocess.call(f'git reset --hard {ORIGIN}/{branch}', shell=True) # Extract log: log = subprocess.check_output('git log', shell=True).decode('utf-8') # Check for the presence of the PR in the log for pr in (re.findall(r'Merge pull request #(\d+) ', log) + re.findall(r'Backport PR #(\d+):', log)): pr_branches[pr].append(branch) finally: os.chdir(STARTDIR) with open(f'pull_requests_branches_{NAME}.json', 'w') as f: json.dump(pr_branches, f, sort_keys=True, indent=2)
[ 2, 383, 4007, 286, 428, 4226, 318, 284, 2198, 477, 262, 9262, 13737, 286, 262, 198, 2, 1813, 16099, 11, 290, 1064, 543, 2834, 7007, 389, 3017, 287, 543, 198, 2, 13737, 13, 383, 5072, 318, 257, 19449, 2393, 326, 4909, 329, 1123, 28...
2.649374
1,118
from datetime import datetime as dt import os import numpy as np import settings
[ 6738, 4818, 8079, 1330, 4818, 8079, 355, 288, 83, 198, 11748, 28686, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 11748, 6460, 628, 198 ]
3.4
25
#----------------------------------------------------------------------------- # Copyright (c) 2013-2020, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License (version 2 # or later) with exception for distributing the bootloader. # # The full license is in the file COPYING.txt, distributed with this software. # # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) #----------------------------------------------------------------------------- import os from PyInstaller.utils.hooks import qt_menu_nib_dir from PyInstaller.compat import getsitepackages, is_darwin, is_win # On Windows system PATH has to be extended to point to the PyQt4 directory. # The PySide directory contains Qt dlls. We need to avoid including different # version of Qt libraries when there is installed another application (e.g. QtCreator) if is_win: from PyInstaller.utils.win32.winutils import extend_system_path extend_system_path([os.path.join(x, 'PyQt4') for x in getsitepackages()]) hiddenimports = ['sip'] # For Qt to work on Mac OS X it is necessary to include directory qt_menu.nib. # This directory contains some resource files necessary to run PyQt or PySide # app. if is_darwin: datas = [ (qt_menu_nib_dir('PyQt4'), 'qt_menu.nib'), ]
[ 2, 10097, 32501, 198, 2, 15069, 357, 66, 8, 2211, 12, 42334, 11, 9485, 15798, 263, 7712, 4816, 13, 198, 2, 198, 2, 4307, 6169, 739, 262, 2846, 286, 262, 22961, 3611, 5094, 13789, 357, 9641, 362, 198, 2, 393, 1568, 8, 351, 6631, ...
3.445026
382
import inspect def get_default_args(func): """Get default arguments of a function. """ signature = inspect.signature(func) return { k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty }
[ 11748, 10104, 628, 198, 4299, 651, 62, 12286, 62, 22046, 7, 20786, 2599, 198, 220, 220, 220, 37227, 3855, 4277, 7159, 286, 257, 2163, 13, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 9877, 796, 10104, 13, 12683, 1300, 7, 20786, 8,...
2.598131
107
""" Utilizando Lambdas Conhecidas por Expresses Lambdas, ou simplesmente Lambdas, so funes sem nome, ou seja, funes annimas. # Funo em Python def funcao(x): return 3 * x + 1 print(funcao(4)) print(funcao(7)) # Expresso Lambda lambda x: 3 * x + 1 # Como utlizar a expresso lambda? calc = lambda x: 3 * x + 1 print(calc(4)) print(calc(7)) # Podemos ter expresses lambdas com mltiplas entradas nome_compelto = lambda nome, sobrenome: nome.strip().title() + ' ' + sobrenome.strip().title() print(nome_compelto(' paulo', ' SILVA ')) print(nome_compelto(' MARIA ', ' albertina ')) # Em funes Python podemos ter nenhuma ou vrias entradas. Em Lambdas tambm hello = lambda: 'Hello World!' uma = lambda x: 3 * x + 1 duas = lambda x, y: (x * y) ** 0.5 tres = lambda x, y, z: 3 / (1 / x + 1 / 7 + 1 / z) # n = lambda x1, x2, ..., xn: <expresso> print(hello()) print(uma(6)) print(duas(5, 7)) print(tres(3, 6, 9)) # OBS: Se passarmos mais argumentos do que parmetros esperados teremos TypeError # Exemplo autores = ['Paulo Silva', 'Maria Albertina', 'Luis Marques Nunes', 'Carlos Nunes', 'Ana S. Leito', 'Ins Garcia', 'Claudia Sofia', 'I. L. Antunes', 'Amrico Silva'] print(autores) # ['Paulo Silva', 'Maria Albertina', 'Luis Marques Nunes', 'Carlos Nunes', # 'Ana S. Leito', 'Ins Garcia', 'Claudia Sofia', 'I. L. Antunes', 'Amrico Silva'] # Ordenar pelo sobrenome autores.sort(key=lambda sobrenome: sobrenome.split(' ')[-1].lower()) print(autores) # ['Maria Albertina', 'I. L. Antunes', 'Ins Garcia', 'Ana S. Leito', # 'Luis Marques Nunes', 'Carlos Nunes', 'Paulo Silva', 'Amrico Silva', 'Claudia Sofia'] """ # Funo Quadrtica # f(x) = a * x ** 2 + b * x + c # Definindo a funo def geradora_funcao_quadratica(a, b, c): """ Retorna a funo f(x) = a * x ** 2 + b * x + c """ return lambda x: a * x ** 2 + b * x + c teste = geradora_funcao_quadratica(2, 3, -5) print(teste(0)) print(teste(1)) print(teste(2)) print(geradora_funcao_quadratica(3, 0, 1)(2))
[ 37811, 198, 18274, 346, 528, 25440, 21114, 67, 292, 198, 198, 3103, 258, 66, 24496, 16964, 10604, 274, 21114, 67, 292, 11, 267, 84, 985, 2374, 434, 68, 21114, 67, 292, 11, 523, 1257, 274, 5026, 299, 462, 11, 267, 84, 384, 6592, 11...
2.307868
877
# Lista dentro de dicionrio campeonato = dict() gol = [] aux = 0 campeonato['Jogador'] = str(input('Digite o nome do jogador: ')) print() partidas = int(input('Quantas partidas ele jogou? ')) print() for i in range(0, partidas): aux = int(input(f'Quantos gols na partida {i + 1}? ')) gol.append(aux) print() campeonato['Gols'] = gol[:] campeonato['Total'] = sum(gol) print('=' * 55) print() print(campeonato) print() print('=' * 55) print() for k, v in campeonato.items(): print(f'O campo {k} tem o valor: {v}') print() print('=' * 55) print(f'O jogador {campeonato["Jogador"]} jogou {partidas} partidas.') print() for i in range(0, partidas): print(f'Na partida {i + 1} ele fez {gol[i]} gol(s).') print() print(f'No total ele fez {campeonato["Total"]} gols.') print('=' * 55)
[ 2, 7343, 64, 18794, 305, 390, 288, 47430, 27250, 201, 198, 20991, 431, 261, 5549, 796, 8633, 3419, 201, 198, 70, 349, 796, 17635, 201, 198, 14644, 796, 657, 201, 198, 20991, 431, 261, 5549, 17816, 41, 519, 7079, 20520, 796, 965, 7, ...
2.233062
369
import numpy
[ 11748, 299, 32152, 628 ]
3.5
4
import helpers import json import re datfilepath = "../github-data/labRepos_CreationHistory.json" allData = {} # Check for and read existing data file allData = helpers.read_existing(datfilepath) # Read repo info data file (to use as repo list) dataObj = helpers.read_json("../github-data/labReposInfo.json") # Populate repo list repolist = [] print("Getting internal repos ...") repolist = sorted(dataObj["data"].keys()) print("Repo list complete. Found %d repos." % (len(repolist))) # Read pretty GraphQL query query_in = helpers.read_gql("../queries/repo-CreationDate.gql") # Rest endpoint query query_commits_in = "/repos/OWNNAME/REPONAME/commits?until=CREATETIME&per_page=100" query_commits_in2 = "/repos/OWNNAME/REPONAME/commits?per_page=100" # Retrieve authorization token authhead = helpers.get_gitauth() # Iterate through internal repos print("Gathering data across multiple paginated queries...") collective = {u'data': {}} tab = " " for repo in repolist: # History doesn't change, only update new repos or those that had no previous commits if "data" in allData.keys() and repo in allData["data"].keys(): if allData["data"][repo]["firstCommitAt"]: print(tab + "Already recorded data for '%s'" % (repo)) continue pageNum = 1 print("\n'%s'" % (repo)) print(tab + "page %d" % (pageNum)) repoSplit = repo.split("/") # Query 1 print(tab + "Get creation date and default branch") print(tab + "Modifying query...") newquery = re.sub('OWNNAME', repoSplit[0], query_in) newquery = re.sub('REPONAME', repoSplit[1], newquery) gitquery = json.dumps({'query': newquery}) print(tab + "Query ready!") # Actual query exchange outObj = helpers.query_github(authhead, gitquery) if outObj["errors"]: print(tab + "Could not complete '%s'" % (repo)) collective["data"].pop(repo, None) continue # Update collective data collective["data"][repo] = outObj["data"]["repository"] # Query 2 print(tab + "Get pre-GitHub commit timestamps") print(tab + "Modifying query...") gitquery = re.sub('OWNNAME', repoSplit[0], query_commits_in) gitquery = re.sub('REPONAME', repoSplit[1], gitquery) gitquery = re.sub('CREATETIME', collective["data"][repo]["createdAt"], gitquery) print(tab + "Query ready!") # Actual query exchange outObj = helpers.query_githubrest(authhead, gitquery) if outObj["errors"]: print(tab + "Could not get pre-GitHub commits for '%s'" % (repo)) outObj["data"] = [] # Update collective data collective["data"][repo]["commitTimestamps"] = [] for commit in outObj["data"]: collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"]) # If no pre-GitHub commits, check the greater commit history if len(collective["data"][repo]["commitTimestamps"]) > 0 and collective["data"][repo]["commitTimestamps"][0]: collective["data"][repo]["initBeforeGitHubRepo"] = True else: print(tab + "No pre-GitHub commits found, getting full history") collective["data"][repo]["initBeforeGitHubRepo"] = False # Query 3 print(tab + "Modifying query...") gitquery = re.sub('OWNNAME', repoSplit[0], query_commits_in2) gitquery = re.sub('REPONAME', repoSplit[1], gitquery) print(tab + "Query ready!") # Actual query exchange outObj = helpers.query_githubrest(authhead, gitquery) if outObj["errors"]: print(tab + "Could not complete '%s'" % (repo)) collective["data"].pop(repo, None) continue # Update collective data for commit in outObj["data"]: collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"]) # Paginate if needed hasNext = ("next" in outObj) while hasNext: pageNum += 1 print(tab + "page %d" % (pageNum)) print(tab + "Modifying query...") newquery = gitquery + "&page=" + str(pageNum) print(tab + "Query ready!") # Actual query exchange outObj = helpers.query_githubrest(authhead, newquery) if outObj["errors"]: print(tab + "Could not complete '%s'" % (repo)) collective["data"].pop(repo, None) continue # Update collective data for commit in outObj["data"]: collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"]) hasNext = ("next" in outObj) # Sort dates collective["data"][repo]["commitTimestamps"].sort() # Save earliest commit date firstdate = None if len(collective["data"][repo]["commitTimestamps"]) > 0: firstdate = collective["data"][repo]["commitTimestamps"][0] collective["data"][repo]["firstCommitAt"] = firstdate del collective["data"][repo]["commitTimestamps"] print("'%s' Done!" % (repo)) print("\nCollective data gathering complete!") # Combine new data with existing data if "data" not in allData.keys(): allData["data"] = {} for repo in collective["data"].keys(): allData["data"][repo] = collective["data"][repo] allDataString = json.dumps(allData, indent=4, sort_keys=True) # Write output file print("\nWriting file '%s'" % (datfilepath)) with open(datfilepath, "w") as fileout: fileout.write(allDataString) print("Wrote file!") print("\nDone!\n")
[ 11748, 49385, 198, 11748, 33918, 198, 11748, 302, 198, 198, 19608, 7753, 6978, 796, 366, 40720, 12567, 12, 7890, 14, 23912, 6207, 418, 62, 12443, 341, 18122, 13, 17752, 1, 198, 439, 6601, 796, 23884, 198, 198, 2, 6822, 329, 290, 1100,...
2.765256
1,819
#! /usr/bin/env python2.7 from __future__ import print_function import sys sys.path.append("../../include") import PyBool_public_interface as Bool if __name__ == "__main__": expr = Bool.parse_std("input.txt") expr = expr["main_expr"] expr = Bool.simplify(expr) expr = Bool.nne(expr) print(Bool.print_expr(expr))
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 17, 13, 22, 628, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 11748, 25064, 198, 17597, 13, 6978, 13, 33295, 7203, 40720, 40720, 17256, 4943, 198, 198, 11748, 9485, 33, ...
2.507353
136
from tkinter import * import random import time from PIL import Image from datetime import datetime from tinydb import * import os import pickle #from database1 import * from random import randint root = Tk() root.geometry("1600x800+0+0") root.title("Suman_dai_ko_DHOKAN") root.configure(bg="goldenrod4") text_Input = StringVar() operator ="" yes ="" no="" Tops = Frame(root, width=1600 ,height=50,bg="goldenrod4", relief=RIDGE) Tops.pack(side=TOP) f1 = Frame(root, width = 800 ,height=500,bg="goldenrod4",relief=SUNKEN) f1.pack(side=LEFT) f2 = Frame(root, width = 300,height = 700,bg="dark slate blue",relief=SUNKEN) f2.pack(side=RIGHT) #f3= Frame(root,width=1600,height=300,fg="blue", bg="powder blue", relief=SUNKEN).pack(side=Bottom) #==========================================================Time======================================= localtime=time.asctime(time.localtime(time.time())) #datetime=Label(Tops,font("arial",20,"bold"),text=nowTime,bd=10 ,bg="black", #fg="white", anchor="w").pack() #====================================debugged======================== shirt = IntVar() pant = IntVar() sale = IntVar() buy = IntVar() deposite = IntVar() withdraw = IntVar() coat = IntVar() order = IntVar() total = IntVar() out = IntVar() before = IntVar() #order before the 60 stock = IntVar() delivery = IntVar() #########################main_gate###################### #after wards set the total from here total.set #++++++++++++++++++++++++++++++Varibales_inset+++++++++++++++++++++++++++++++++ order_bef = IntVar() stock_full = IntVar() shrting = IntVar() pant = IntVar() sari = IntVar() order_info = IntVar() delivery_report = IntVar() daily_info = IntVar() sales = IntVar() buy = IntVar() total_bank = IntVar() bank_deposite = IntVar() bank_withdraw = IntVar() due_amount = IntVar() order_info = IntVar() daily_cash = IntVar() cus_name = IntVar() cus_no = IntVar() employee = IntVar() ###############################class of algoriths######################### #++++++++++++++++++++++++++++++++++++++++tinydb example++++++++++++++++++++++ #db = TinyDB("/databse/d4ta.json") #db.insert({"cus_number":"98938232", "cus_name":"rupen"}) #def no_y(): # lis = db.all() ################Info=============== lblInfo = Label(Tops, font=("arial",60, "italic bold"),text="Botique Management Systewm",fg="white", bg="dark slate blue", bd=10, anchor="w", relief=RIDGE) lblInfo.pack() lblInfo = Label(Tops, font=("arial",30, "bold"),text=localtime,fg="white",bg="black", bd=10, anchor="w", relief=RIDGE) lblInfo.pack() #===========================================================Calculator================================== """def current_dir(): import os import sys DIR = os.getcwd() print(DIR) lblInfo = Label(Tops, font=("arial",60, "italic"),text=current_dir,fg="black",bg="powder blue",bd=10, anchor="W") lblInfo.pack() #DIR = dir #return dir """ #randomBtn=Button(f1,pady=16,padx=16,bd=8,bg="powder blue", text="C_dir", command=lambda: current_dir(dir)).pack(side=TOP) '''def malware_activate(): global cmd_active if "rupen" in cmd_active: if "rupen" in cmd_active[1]: if "ronley" in cmd_active[2]:''' #==============================another windows about me===================== #=============================getting all the infos ======================== ###########################sending emails############################ def __send_email(): '''import smtplib gmail = smtplib.SMTP("smtp.gmail.com", 587) gmail.starttls() _file = open("/root/Desktop/Desktop/python/") gmail.login("username", "password") msg = "YOUR MESSAGE" gmail.sendmail("your email adress", "the") gmail.quit()''' dialog = Tk() dialog.title("Send emails") dialog.geometry("800x800") dframe = Frame(dialog,width=800,height=800,bg="white",relief=SUNKEN).pack() email = StringVar() password = StringVar() semail = StringVar() spassword = StringVar() label = Label(dframe, font=("arial",16, "bold"), fg="white", bg="black", text="your_email").pack(side=LEFT) entry1 = Entry(dframe, font=("arial",16,"bold"), fg="white",bg="black", textvariable=email,insertwidth=1,bd=5).pack(side=RIGHT) label1 = Label(dframe, font=("arial",16, "bold"), fg="white", bg="black", text="password", relief=SUNKEN).pack() entry2 = Entry(dframe,font=("arial", 16 ,"bold"),textvariable=password, insertwidth=1,bd=5).pack(side=RIGHT) Label2 =Label(dframe,font=("arial",16, "bold"),fg="white",bg="black", text="sender_email",relief=SUNKEN).pack(side=LEFT) entry2 = Entry(dframe,font=("arial",16, "bold"),bd=5,fg="white",bg="black",textvariable=semail,insertwidth=1).pack(side=LEFT) label3 = Label(dframe,font=("arial",16,"bold"),fg="white",bg="black",text="sender_password", relief=SUNKEN).pack(side=LEFT) entry3= Entry(dframe,font=("arial",16,"bold"),fg="white",textvariable=spassword,insertwidth=1,relief=SUNKEN).pack() dialog.mainloop() #btnEmail = Button(root,font=("arial", 16, "bold"), bg="black",fg="white",text="email",command=lambda: __send_email(),relief=GROOVE).pack() #================================next section=========================== fix = Button(root, bd=10,bg="black",fg="white",command=_price_inputs,relief=GROOVE).pack(side=BOTTOM) btnru = Button(root, font=("arial 20 bold"),bd=20, bg="black",fg="white",text="click",command=ano_win1,relief=GROOVE).pack(side=BOTTOM) #fucking mazing yr coding #def yes_y(): # rupe = Toplevel(root) # rupe.title("this is second window") # return #def no_y(): #nos = Toplevel(root) #nos.title("this is nos window") #return a = Entry(f2,font=("arial", 20,"bold"), textvariable=text_Input, bd=30, insertwidth=4, bg="dark slate blue",fg="white", justify="right").grid(columnspan=4) btn7=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"), text="7",bg="dim gray", command=lambda: btnClick(7)).grid(row=2,column=0) btn8=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"), text="8",bg="dim gray", command=lambda: btnClick(8)).grid(row=2,column=1) btn9=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"), text="9",bg="dim gray", command=lambda: btnClick(9)).grid(row=2,column=2) #!!!!!!!!!!!!!!!!!!!!!!additions!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Addition=Button(f2,padx=16,pady=16,bd=8,text="+",fg="black",bg="dim gray", command=lambda: btnClick("+")).grid(row=2,column=3) btn6=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="4", bg="dim gray", command=lambda: btnClick(4)).grid(row=3,column=0) btn5=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="5", bg="dim gray", command=lambda: btnClick(5)).grid(row=3,column=1) btn4=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="6",bg="dim gray", command=lambda: btnClick(6)).grid(row=3,column=2) Subtract=Button(f2,padx=16,pady=16,bd=8,text="-", bg="dim gray", command=lambda: btnClick("-")).grid(row=3,column=3) btn3=Button(f2,padx=16,pady=16,bd=8,text="3",font=("arial", 20, "bold") ,bg="dim gray", command=lambda: btnClick(3)).grid(row=4,column=0) btn2=Button(f2,padx=16,pady=16,bd=8,text="2",font=("arial", 20, "bold"), bg="dim gray", command=lambda: btnClick(2)).grid(row=4,column=1) btn1=Button(f2,padx=16,pady=16,bd=8,text="1",font=("arial", 20, "bold") ,bg="dim gray", command=lambda: btnClick(1)).grid(row=4,column=2) Multiply=Button(f2,padx=16,pady=16,bd=8,text="*", bg="dim gray", command=lambda: btnClick("X")).grid(row=4,column=3) #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ btn0=Button(f2,padx=16,pady=16,bd=8,bg="dim gray",text="0",fg="black",font=("arial", 20, "bold"), command=lambda: btnClick(0)).grid(row=5,column=0) btnClear=Button(f2,pady=16,padx=16,bd=8, fg="black",font=("arial", 20, "bold"),text="C",bg="dim gray", command=btnClearDisplay).grid(row=5,column=1) btnEquals=Button(f2,padx=16,pady=16,fg="black",bd=8,text="=",bg="dim gray", font=("arial", 20,"bold"), command=btnEqualsInput).grid(row=5,column=2) #btn2=Button(f2,padx=16,pady=16,bd=8,fg="black",text="2",bg="dim gray", command=lambda: btnClick(2)).grid(row=5,column=3) division=Button(f2,padx=16,pady=16,bd=8,fg="black", text="/", bg="dim gray", command=lambda: btnClick("/")).grid(row=5,column=3) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! rand = StringVar() #lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="red",bg="red",anchor="w",relief=RIDGE).grid(row=0,column=0) #txtReference=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,bg="red",fg="white", justify = "right").grid(row=0,column=1) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="white",bg="green",anchor="w", relief=RIDGE) lblReference.grid(row=0,column=0) b=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,fg="white",bg="black", justify = "left") b.grid(row=0,column=1) #img = "/root/Desktop/Desktop/python/projects/prj1_Botik/1.jpg" #root.ima = Image.open(img) #Label (root,bg="white",width=120,height=120, image=ima).pack() bill_in = StringVar() bill_out = StringVar() shrting=Label(f1,font=("arial", 20, "bold"), text="Shirting:",bg="powder blue", fg="black",anchor="w",relief=GROOVE).grid(row=1,column=0) shirts=Entry(f1,font=("arial", 16, "italic"), bd=10, textvariable=shirt, insertwidth=1,bg="black",fg="white", justify="left").grid(row=2,column=0) owner=Button(root,padx=16,pady=16, font=("arial",12, "bold"),text="info", bd=8,bg="black",command=ano_win1,fg="white",relief=RAISED).pack(side=LEFT) yes=Button(root,padx=16,pady=16,font=("arial",12, "bold"),text="Done",bd=8,bg="black", fg="white", command=_calculation(),relief=RAISED).pack(side=RIGHT) panting=Label(f1,font=("arial",20, "bold"), text="pant_mm:", bg="powder blue",fg="black",anchor="w",relief=GROOVE).grid(row=1,column=1) pantx=Entry(f1,font=("arial",16, "bold"), textvariable=pant, insertwidth=1, bd=10,bg="black",fg="white", justify="left").grid(row=2,column=1) sales=Label(f1,font=("arial",16, "bold"), text="sales_total:",bg="powder blue",fg="black",anchor="w",bd=8,relief=GROOVE).grid(row=1,column=2) salex=Entry(f1,font=("arial",16, "bold"),bg="black",fg="white",textvariable=sale,insertwidth=1,bd=10,justify="left").grid(row=2,column=2) buying=Label(f1,font=("arial",16, "bold"), text="buying_something: ",bg="powder blue",fg="black", anchor="e", relief=GROOVE).grid(row=3,column=0) buyx=Entry(f1,font=("arial", 16, "bold"), textvariable=buy, insertwidth=1, bd=10,bg="black", fg="white", justify="left").grid(row=4,column=0) Bank_Total=Label(f1,font=("arial",16,"bold"),text="Bank_Deposite: ", bg="powder blue", fg="black", anchor="e",relief=GROOVE).grid(row=3, column=1) depositex=Entry(f1,font=("arial",16,"bold"),bd=10, textvariable=deposite, bg="black", fg="white", justify="left").grid(row=4, column=1) lblBankwith=Label(f1, font=("arial", 16, "bold"),fg="black",bg="powder blue",text="Bank_Withdraw", anchor="e",relief=GROOVE).grid(row=3,column=2) withdrawx=Entry(f1,font=("arial",16, "bold"),bd=10, fg="white",bg="black", textvariable=withdraw, insertwidth=1).grid(row=4,column=2) coating=Label(f1, font=("arial", 16, "bold"),text="coat_mm:", bg="powder blue",fg="black",anchor="e").grid(row=5,column=0) coatx=Entry(f1, font=("arial", 16, "bold"), bg="black", fg="white", textvariable=coat, insertwidth=1, justify="left",bd=10).grid(row=6,column=0) lablsari=Label(f1,font=("arial", 16, "bold"), bg="powder blue",text="sari mm:", fg="black",anchor="e",relief=GROOVE).grid(row=5,column=1) sarix=Entry(f1, font=("arial", 16, "bold"), bg="black",bd=10, fg="white",textvariable=sari, insertwidth=1).grid(row=6,column=1) buying=Label(f1,font=("arial", 16, "bold"), bg="powder blue",text="buy_info:",fg="black",anchor="e",relief=GROOVE).grid(row=7,column=0) buyx=Entry(f1,font=("arial",16, "bold"),bd=8, fg="white",bg="black",textvariable=buy,insertwidth=1).grid(row=8,column=0) outgoing =Label(f1, font=("arial", 16, "bold"), bg="powder blue", text="outgoing:", fg="black",anchor="e",relief=GROOVE).grid(row=7,column=1) outx=Entry(f1,font=("arial", 16, "bold"),textvariable=out, bd=8,fg="white",bg="black",insertwidth=1).grid(row=8,column=1) ordering=Label(f1,font=("arial",16,"bold"),bg="powder blue",text="order_info:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=0) orderx=Entry(f1,font=("arial",16,"bold"),insertwidth=1, textvariable=order,bd=8,fg="white",bg="black").grid(row=10,column=0) lblcustomer=Label(f1,font=("arial",16,"bold"),bg="powder blue",text="cus_name:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=1) no=Entry(f1,font=("arial",16, "bold"),bd=8,bg="black",fg="white",insertwidth=1, textvariable=cus_name).grid(row=10,column=1) lblmonthly=Label(f1, font=("arial",16,"bold"),bg="powder blue",text="monthly:",fg="black",anchor="e",relief=GROOVE).grid(row=5,column=2) monthly=StringVar() monthx=Entry(f1,font=("arial",16,"bold"),show="blank",bg="black",textvariable=monthly,insertwidth=1,fg="white",bd=10).grid(row=6,column=2) lbltotal=Label(f1, font=("arial", 16, "bold"),bg="powder blue",text="Total:",fg="black").grid(row=7,column=2) totalx=Entry(f1, font=("arial", 16, "bold"),bg="black",textvariable=total,fg="white",insertwidth=1,bd=10).grid(row=8,column=2) lblemployee = Label(f1,font=("arial", 16, "bold"),bg="powder blue",text="employee name:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=2) employx= Entry(f1,font=("arial", 16,"bold"),textvariable=employee,insertwidth=1,bg="black",fg="white",bd=10).grid(row=10,column=2) ###############################database for the project###################### '''def __database(): db = TinyDB("/records.json") #print(monthly) #print(b) #fuck = c.get() a = order_bef.get() b = stock_full.get() c = shrting.get() d = pant.get() e = sari.get() f = order_info.get() g = delivery_report.get() h = daily_info.get() i = sales.get() j = buy.get() k = total_bank.get() l = bank_deposite.get() m = bank_withdraw.get() n = due_amount.get() o = order_info.get() p = daily_cash.get() q = cus_name.get() r = cus_no.get() s = employee.get() files = {"a": "", "b": "", "c": "", "d": "", "e": "", "f": "", "g": "", "h": "", "i": "", "j": "" , "k": "", "l": "", "m": "", "n": "", "o": "", "p": "", "q": "", "r": "", "s": ""} db.insert({"total": a }), db.insert({"regrds":"reference"}), db.insert({"day_income":"billion"}), db.insert({"day_outgoing":"billout"}), db.insert({"bankdeposit":"bankdepo"}), db.insert({"full_stock":"stock"}), db.insert({"shirt_mm":"shirt"}), db.insert({"bankwithdraw":"bankwith"}), db.insert({"pantmm":"pant"}), db.insert({"sarimm":"sari"}), db.insert({"orderday":"orderinfo"}), db.insert({"salling":"sales"}), db.insert({"buying":"buy"}), db.insert({"customern":"customer"}), db.insert({"monthly_info":"monthly"}), db.insert({"totaldy":"total"}), db.insert({"employeid":"employee"}) for db in range(1): print(db) files = list(files) file = open("/file.txt", "wb") da = "" for data in files: if len(data) != 0: print("this is are the files written in python\\n check the file.txt for debug ") da += data print(data) da = int(da) file.write(da) try: file = open("/records.txt", "r") except: print("creating the file from script {}".format(__file__)) file = open("/records.txt","w") finally: pass check = os.path.isfile("/records.txt") if check: for item in db: data = open("/records.txt","wb") #with open("/records.txt","wb") as file: #pickle.dump(item, data) #file.close() #file1 = pickle.load(file) if len(item) == len(file1): break if item != file: #item = str(item) file.write("%s" %(item)) time.sleep(1) print("done writing to the file") #for item in db: with open("/records.txt", "rb") as file: reading = file1 if len(reading) != None: print("its printed") print(reading) file.close() #db.insert({"name":"Rupen Gurung"}) name = Query() #db(name.type == "changed") d = datetime.now() month = str(d.month) day = str(d.day) year = str(d.year) hour = str(d.hour) minute = str(d.minute) second = str(d.second) between = str(":")''' '''def __time(infos): time = datetime.now() day = str(time.day) month = str(time.month) hour = str(time.hour) second = str(time.second) year = str(time.year) minute = str(time.minute) #assuming the infos as the order taken that will be notified before the #60 hours #changing all the formats to the seconds that will be easy for the #calculation #first calculating seconds in one day that will ease all the further operations daysec = (24*60) * 60 * 60 ### ##this is will be easy now yearSec = daysec * 365 month = daysec * 30 daySec = daysec hourSec = 60 * 60 * 60 minuteSec = 60 * 60 files = {"a":"", "b":"","c":"","d":"","e":"","f":"","g":"","h":"","i":"","j":"" ,"k":"","l":"","m":"","n":"","o":"","p":"","q":"","r":"","s":""}''' #files = list(files) '''for data in files: if len(data) != 0: print(data)''' #lenght = len(db) ##this will show the recorded bill numbers #l # command=bill_in).pack(anchor=NE) root.mainloop() #__database() #add1=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"), #text="+",bg="powder blue", command=lambda: btnClick("+")).grid(row=3,column=6) #btn10=Button(f2,padx=16,padx=16, fg="blue", font("arial",5,"bold"), # text="rupen",bg="powder blue", command=rupen).grid(row=3,column=5) #def function(): # pass(): # pass main(): # root.mainloop() #for the revies of the follow in the sorry of the same of the tkinter in the main function of the sollow #main()
[ 6738, 256, 74, 3849, 1330, 1635, 198, 11748, 4738, 198, 11748, 640, 198, 6738, 350, 4146, 1330, 7412, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 7009, 9945, 1330, 1635, 198, 11748, 28686, 198, 11748, 2298, 293, 198, 2, 6738, ...
2.419107
7,683
import torch import torch.nn as nn from mmcv.cnn import normal_init from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps from ..builder import build_loss from ..registry import HEADS from ..utils import ConvModule, Scale, bias_init_with_prob from IPython import embed INF = 1e8
[ 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 6738, 8085, 33967, 13, 66, 20471, 1330, 3487, 62, 15003, 198, 198, 6738, 8085, 15255, 13, 7295, 1330, 5253, 17, 65, 3524, 11, 2700, 62, 46428, 2624, 11, 5021, 62, 39014, ...
3.106796
103
from . import db from sqlalchemy.dialects.mysql import LONGTEXT
[ 6738, 764, 1330, 20613, 198, 6738, 44161, 282, 26599, 13, 38969, 478, 82, 13, 28744, 13976, 1330, 44533, 32541, 628, 628 ]
3.190476
21
import pandas as pd import numpy as np import plotly.offline as pyo import plotly.graph_objs as go df= pd.read_csv("Data/nst-est2017-alldata.csv") df2=df[df["DIVISION"] == '1'] df2.set_index("NAME",inplace=True) list_of_pop_col=[col for col in df2.columns if col.startswith('POP')] df2=df2[list_of_pop_col] data=[go.Scatter(x=df2.columns, y=df2.loc[name], mode='lines', name=name) for name in df2.index] pyo.plot(data)
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 7110, 306, 13, 2364, 1370, 355, 279, 8226, 198, 11748, 7110, 306, 13, 34960, 62, 672, 8457, 355, 467, 628, 198, 7568, 28, 279, 67, 13, 961, 62, 40664, ...
2.030172
232
""" test_markup ~~~~~~~~~~~ Test various Sphinx-specific markup extensions. :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re import pytest from docutils import frontend, nodes, utils from docutils.parsers.rst import Parser as RstParser from sphinx import addnodes from sphinx.builders.html.transforms import KeyboardTransform from sphinx.builders.latex import LaTeXBuilder from sphinx.roles import XRefRole from sphinx.testing.util import Struct, assert_node from sphinx.transforms import SphinxSmartQuotes from sphinx.util import docutils, texescape from sphinx.util.docutils import sphinx_domains from sphinx.writers.html import HTMLTranslator, HTMLWriter from sphinx.writers.latex import LaTeXTranslator, LaTeXWriter # since we're not resolving the markup afterwards, these nodes may remain class ForgivingTranslator: def visit_pending_xref(self, node): pass def test_samp_role(parse): # no braces text = ':samp:`a{b}c`' doctree = parse(text) assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a", [nodes.emphasis, "b"], "c")]) # nested braces text = ':samp:`a{{b}}c`' doctree = parse(text) assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a", [nodes.emphasis, "{b"], "}c")]) # half-opened braces text = ':samp:`a{bc`' doctree = parse(text) assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{bc"]) # escaped braces text = ':samp:`a\\\\{b}c`' doctree = parse(text) assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{b}c"]) # no braces (whitespaces are keeped as is) text = ':samp:`code sample`' doctree = parse(text) assert_node(doctree[0], [nodes.paragraph, nodes.literal, "code sample"]) def test_download_role(parse): # implicit text = ':download:`sphinx.rst`' doctree = parse(text) assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference, nodes.literal, "sphinx.rst"]) assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download', refexplicit=False, reftarget='sphinx.rst', refwarn=False) assert_node(doctree[0][0][0], classes=['xref', 'download']) # explicit text = ':download:`reftitle <sphinx.rst>`' doctree = parse(text) assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference, nodes.literal, "reftitle"]) assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download', refexplicit=True, reftarget='sphinx.rst', refwarn=False) assert_node(doctree[0][0][0], classes=['xref', 'download']) def test_XRefRole(inliner): role = XRefRole() # implicit doctrees, errors = role('ref', 'rawtext', 'text', 5, inliner, {}, []) assert len(doctrees) == 1 assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text']) assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text', refexplicit=False, refwarn=False) assert errors == [] # explicit doctrees, errors = role('ref', 'rawtext', 'title <target>', 5, inliner, {}, []) assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'title']) assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='target', refexplicit=True, refwarn=False) # bang doctrees, errors = role('ref', 'rawtext', '!title <target>', 5, inliner, {}, []) assert_node(doctrees[0], [nodes.literal, 'title <target>']) # refdomain doctrees, errors = role('test:doc', 'rawtext', 'text', 5, inliner, {}, []) assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text']) assert_node(doctrees[0], refdoc='dummy', refdomain='test', reftype='doc', reftarget='text', refexplicit=False, refwarn=False) # fix_parens role = XRefRole(fix_parens=True) doctrees, errors = role('ref', 'rawtext', 'text()', 5, inliner, {}, []) assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text()']) assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text', refexplicit=False, refwarn=False) # lowercase role = XRefRole(lowercase=True) doctrees, errors = role('ref', 'rawtext', 'TEXT', 5, inliner, {}, []) assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'TEXT']) assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text', refexplicit=False, refwarn=False)
[ 37811, 198, 220, 220, 220, 1332, 62, 4102, 929, 198, 220, 220, 220, 220, 15116, 4907, 93, 628, 220, 220, 220, 6208, 2972, 45368, 28413, 12, 11423, 41485, 18366, 13, 628, 220, 220, 220, 1058, 22163, 4766, 25, 15069, 4343, 12, 1238, 2...
2.246112
2,186
from pandac.PandaModules import PStatCollector from direct.directnotify.DirectNotifyGlobal import directNotify from direct.showbase.PythonUtil import Queue, invertDictLossless, makeFlywheelGen from direct.showbase.PythonUtil import itype, serialNum, safeRepr, fastRepr from direct.showbase.Job import Job import types, weakref, random, __builtin__ def _getPruneTaskName(self): return 'pruneLeakingContainerRefs-%s' % self._serialNum def getContainerIds(self): return self._id2ref.keys() def getContainerByIdGen(self, id, **kwArgs): # return a generator to look up a container return self._id2ref[id].getContainerGen(**kwArgs) def getContainerById(self, id): for result in self._id2ref[id].getContainerGen(): pass return result def getContainerNameByIdGen(self, id, **kwArgs): return self._id2ref[id].getEvalStrGen(**kwArgs) def getContainerNameById(self, id): if id in self._id2ref: return repr(self._id2ref[id]) return '<unknown container>' def removeContainerById(self, id): if id in self._id2ref: self._id2ref[id].destroy() del self._id2ref[id]
[ 6738, 19798, 330, 13, 47, 5282, 5841, 5028, 1330, 350, 17126, 31337, 273, 198, 6738, 1277, 13, 12942, 1662, 1958, 13, 13470, 3673, 1958, 22289, 1330, 1277, 3673, 1958, 198, 6738, 1277, 13, 12860, 8692, 13, 37906, 18274, 346, 1330, 4670,...
2.517745
479
# sql/default_comparator.py # Copyright (C) 2005-2018 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Default implementation of SQL comparison operations. """ from .. import exc, util from . import type_api from . import operators from .elements import BindParameter, True_, False_, BinaryExpression, \ Null, _const_expr, _clause_element_as_expr, \ ClauseList, ColumnElement, TextClause, UnaryExpression, \ collate, _is_literal, _literal_as_text, ClauseElement, and_, or_, \ Slice, Visitable, _literal_as_binds, CollectionAggregate from .selectable import SelectBase, Alias, Selectable, ScalarSelect def _inv_impl(expr, op, **kw): """See :meth:`.ColumnOperators.__inv__`.""" if hasattr(expr, 'negation_clause'): return expr.negation_clause else: return expr._negate() def _neg_impl(expr, op, **kw): """See :meth:`.ColumnOperators.__neg__`.""" return UnaryExpression(expr, operator=operators.neg, type_=expr.type) def _match_impl(expr, op, other, **kw): """See :meth:`.ColumnOperators.match`.""" return _boolean_compare( expr, operators.match_op, _check_literal( expr, operators.match_op, other), result_type=type_api.MATCHTYPE, negate=operators.notmatch_op if op is operators.match_op else operators.match_op, **kw ) def _distinct_impl(expr, op, **kw): """See :meth:`.ColumnOperators.distinct`.""" return UnaryExpression(expr, operator=operators.distinct_op, type_=expr.type) def _between_impl(expr, op, cleft, cright, **kw): """See :meth:`.ColumnOperators.between`.""" return BinaryExpression( expr, ClauseList( _check_literal(expr, operators.and_, cleft), _check_literal(expr, operators.and_, cright), operator=operators.and_, group=False, group_contents=False), op, negate=operators.notbetween_op if op is operators.between_op else operators.between_op, modifiers=kw) # a mapping of operators with the method they use, along with # their negated operator for comparison operators operator_lookup = { "and_": (_conjunction_operate,), "or_": (_conjunction_operate,), "inv": (_inv_impl,), "add": (_binary_operate,), "mul": (_binary_operate,), "sub": (_binary_operate,), "div": (_binary_operate,), "mod": (_binary_operate,), "truediv": (_binary_operate,), "custom_op": (_custom_op_operate,), "json_path_getitem_op": (_binary_operate, ), "json_getitem_op": (_binary_operate, ), "concat_op": (_binary_operate,), "any_op": (_scalar, CollectionAggregate._create_any), "all_op": (_scalar, CollectionAggregate._create_all), "lt": (_boolean_compare, operators.ge), "le": (_boolean_compare, operators.gt), "ne": (_boolean_compare, operators.eq), "gt": (_boolean_compare, operators.le), "ge": (_boolean_compare, operators.lt), "eq": (_boolean_compare, operators.ne), "is_distinct_from": (_boolean_compare, operators.isnot_distinct_from), "isnot_distinct_from": (_boolean_compare, operators.is_distinct_from), "like_op": (_boolean_compare, operators.notlike_op), "ilike_op": (_boolean_compare, operators.notilike_op), "notlike_op": (_boolean_compare, operators.like_op), "notilike_op": (_boolean_compare, operators.ilike_op), "contains_op": (_boolean_compare, operators.notcontains_op), "startswith_op": (_boolean_compare, operators.notstartswith_op), "endswith_op": (_boolean_compare, operators.notendswith_op), "desc_op": (_scalar, UnaryExpression._create_desc), "asc_op": (_scalar, UnaryExpression._create_asc), "nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst), "nullslast_op": (_scalar, UnaryExpression._create_nullslast), "in_op": (_in_impl, operators.notin_op), "notin_op": (_in_impl, operators.in_op), "is_": (_boolean_compare, operators.is_), "isnot": (_boolean_compare, operators.isnot), "collate": (_collate_impl,), "match_op": (_match_impl,), "notmatch_op": (_match_impl,), "distinct_op": (_distinct_impl,), "between_op": (_between_impl, ), "notbetween_op": (_between_impl, ), "neg": (_neg_impl,), "getitem": (_getitem_impl,), "lshift": (_unsupported_impl,), "rshift": (_unsupported_impl,), "contains": (_unsupported_impl,), }
[ 2, 44161, 14, 12286, 62, 785, 1845, 1352, 13, 9078, 198, 2, 15069, 357, 34, 8, 5075, 12, 7908, 262, 16363, 2348, 26599, 7035, 290, 20420, 198, 2, 1279, 3826, 37195, 20673, 2393, 29, 198, 2, 198, 2, 770, 8265, 318, 636, 286, 16363,...
2.438232
1,878
from django.contrib.auth.validators import UnicodeUsernameValidator from rest_framework import serializers from django.contrib.auth.models import User from recipes.models import Recipe, Ingredient, Step
[ 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 12102, 2024, 1330, 34371, 5842, 13292, 47139, 1352, 198, 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 198, 6...
3.781818
55
import numpy as np import pytest import torch from mmpose.models import TemporalRegressionHead def test_temporal_regression_head(): """Test temporal head.""" head = TemporalRegressionHead( in_channels=1024, num_joints=17, loss_keypoint=dict(type='MPJPELoss', use_target_weight=True)) head.init_weights() with pytest.raises(AssertionError): # ndim of the input tensor should be 3 input_shape = (1, 1024, 1, 1) inputs = _demo_inputs(input_shape) _ = head(inputs) with pytest.raises(AssertionError): # size of the last dim should be 1 input_shape = (1, 1024, 3) inputs = _demo_inputs(input_shape) _ = head(inputs) input_shape = (1, 1024, 1) inputs = _demo_inputs(input_shape) out = head(inputs) assert out.shape == torch.Size([1, 17, 3]) loss = head.get_loss(out, out, torch.ones_like(out)) assert torch.allclose(loss['reg_loss'], torch.tensor(0.)) _ = head.inference_model(inputs) _ = head.inference_model(inputs, [(0, 1), (2, 3)]) acc = head.get_accuracy(out, out, torch.ones_like(out)) assert acc['mpjpe'] == 0. np.testing.assert_almost_equal(acc['p_mpjpe'], 0.) def _demo_inputs(input_shape=(1, 1024, 1)): """Create a superset of inputs needed to run head. Args: input_shape (tuple): input batch dimensions. Default: (1, 1024, 1). Returns: Random input tensor with the size of input_shape. """ inps = np.random.random(input_shape) inps = torch.FloatTensor(inps) return inps
[ 11748, 299, 32152, 355, 45941, 201, 198, 11748, 12972, 9288, 201, 198, 11748, 28034, 201, 198, 201, 198, 6738, 285, 3149, 577, 13, 27530, 1330, 5825, 35738, 8081, 2234, 13847, 201, 198, 201, 198, 201, 198, 4299, 1332, 62, 11498, 35738, ...
2.196809
752
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2016-10-31 23:20 from __future__ import unicode_literals from django.db import migrations
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 940, 13, 17, 319, 1584, 12, 940, 12, 3132, 2242, 25, 1238, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 1...
2.690909
55
# Your SnakeGame object will be instantiated and called as such: # obj = SnakeGame(width, height, food) # param_1 = obj.move(direction)
[ 198, 198, 2, 3406, 16705, 8777, 2134, 481, 307, 9113, 12931, 290, 1444, 355, 884, 25, 198, 2, 26181, 796, 16705, 8777, 7, 10394, 11, 6001, 11, 2057, 8, 198, 2, 5772, 62, 16, 796, 26181, 13, 21084, 7, 37295, 8 ]
3.341463
41
from itertools import ( chain, ) import logging from azul import ( config, require, ) from azul.logging import ( configure_script_logging, ) from azul.terra import ( TDRClient, TDRSourceName, ) log = logging.getLogger(__name__) if __name__ == '__main__': main()
[ 6738, 340, 861, 10141, 1330, 357, 198, 220, 220, 220, 6333, 11, 198, 8, 198, 11748, 18931, 198, 198, 6738, 35560, 377, 1330, 357, 198, 220, 220, 220, 4566, 11, 198, 220, 220, 220, 2421, 11, 198, 8, 198, 6738, 35560, 377, 13, 6404,...
2.418033
122
""" Tools to "play notes for the editor clef", which may be thought of as "executing editor commands". NOTE: in the below, we often connect notes together "manually", i.e. using NoteSlur(..., previous_hash). As an alternative, we could consider `nouts_for_notes`. """ from s_address import node_for_s_address, s_dfs from dsn.s_expr.legato import NoteSlur, NoteCapo from dsn.s_expr.utils import ( bubble_history_up, calc_possibility, insert_text_at, insert_node_at, replace_text_at, weave_disjoint_replaces, ) from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode from dsn.s_expr.structure import TreeNode from dsn.editor.clef import ( CursorChild, CursorDFS, CursorParent, CursorSet, EDelete, EncloseWithParent, InsertNodeChild, InsertNodeSibbling, MoveSelectionChild, MoveSelectionSibbling, LeaveChildrenBehind, SwapSibbling, TextInsert, TextReplace, )
[ 37811, 198, 33637, 284, 366, 1759, 4710, 329, 262, 5464, 1190, 69, 1600, 543, 743, 307, 1807, 286, 355, 366, 18558, 15129, 5464, 9729, 1911, 198, 198, 16580, 25, 287, 262, 2174, 11, 356, 1690, 2018, 4710, 1978, 366, 805, 935, 1600, ...
2.625344
363
"""Base for all Classes. Base mainly includes the description fields """ import logging from typing import Optional from .log import Log # type: ignore
[ 37811, 14881, 329, 477, 38884, 13, 198, 198, 14881, 8384, 3407, 262, 6764, 7032, 198, 37811, 198, 11748, 18931, 198, 6738, 19720, 1330, 32233, 198, 198, 6738, 764, 6404, 1330, 5972, 220, 1303, 2099, 25, 8856, 628 ]
4.216216
37
import subprocess import re programs = input('Separe the programs with a space: ').split() secure_pattern = '[\w\d]' for program in programs: if not re.match(secure_pattern, program): print("Sorry we can't check that program") continue process = subprocess. run( ['which', program], capture_output=True, text=True) if process.returncode == 0: print(f'The program "{program}" is installed') print(f'The location of the binary is: {process.stdout}') else: print(f'Sorry the {program} is not installed') print(process.stderr) print('\n')
[ 11748, 850, 14681, 198, 11748, 302, 198, 198, 23065, 82, 796, 5128, 10786, 19117, 533, 262, 4056, 351, 257, 2272, 25, 705, 737, 35312, 3419, 198, 198, 22390, 62, 33279, 796, 44438, 59, 86, 59, 67, 49946, 198, 198, 1640, 1430, 287, 4...
2.683983
231
from __future__ import absolute_import from django import forms from authentication.account.forms import BaseSignupForm from . import app_settings, signals from .adapter import get_adapter from .models import SocialAccount
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 198, 6738, 42625, 14208, 1330, 5107, 198, 198, 6738, 18239, 13, 23317, 13, 23914, 1330, 7308, 11712, 929, 8479, 198, 198, 6738, 764, 1330, 598, 62, 33692, 11, 10425, 198, 6738, 764, ...
4
57
#!/usr/bin/env python """ Provides the primary interface into the library """ from __future__ import annotations import asyncio import logging from typing import Callable, Optional, Union from . import utils from . import controllers from .networking.connection import Connection from .networking.types import SSDPResponse from .networking.errors import ChannelUnavailableError from .models.heos import HEOSEvent from .models.system import AccountStatus logger = logging.getLogger('pytheos') def close(self): """ Close the connection to our HEOS device :return: None """ logger.info(f'Closing connection to {self.server}:{self.port}') if self._event_task: self._event_task.cancel() if self._event_processor: self._event_processor.cancel() self._connected = False def subscribe(self, event_name: str, callback: Callable): """ Subscribe a callback function to a specific event :param event_name: Event name :param callback: Callback function :return: None """ # FIXME: Change event_name to an enum if self._event_subscriptions.get(event_name) is None: self._event_subscriptions[event_name] = [] self._event_subscriptions[event_name].append(callback) def is_receiving_events(self): """ Retrieves whether or not we're receiving events. :return: bool """ return self._receive_events def _init_internal_event_handlers(self): """ Initialize the internal event handlers :return: None """ # FIXME: Meh, do something better with this. internal_handler_map = { # 'event/sources_changed': self._handle_sources_changed, # 'event/players_changed': self._handle_players_changed, # 'event/groups_changed': self._handle_groups_changed, # 'event/player_state_changed': self._handle_player_state_changed, # 'event/player_now_playing_changed': self._handle_now_playing_changed, # 'event/player_now_playing_progress': self._handle_now_playing_progress, # 'event/player_playback_error': self._handle_playback_error, # 'event/player_queue_changed': self._handle_queue_changed, # 'event/player_volume_changed': self._handle_volume_changed, # 'event/repeat_mode_changed': self._handle_repeat_mode_changed, # 'event/shuffle_mode_changed': self._handle_shuffle_mode_changed, # 'event/group_volume_changed': self._handle_group_volume_changed, # 'event/user_changed': self._handle_user_changed, } for event, callback in internal_handler_map.items(): self.subscribe(event, callback) async def connect(host: Union[SSDPResponse, str], port: int=Pytheos.DEFAULT_PORT) -> Pytheos: """ Connect to the provided host and return a context manager for use with the connection. :param host: Host to connect to :param port: Port to connect to :raises: ValueError :return: The Pytheos instance """ if isinstance(host, SSDPResponse): host = utils.extract_host(host.location) conn = Pytheos(host, port) return await conn.connect()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 37811, 47081, 262, 4165, 7071, 656, 262, 5888, 37227, 198, 198, 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 11748, 30351, 952, 198, 11748, 18931, 198, 6738, 19720, 1330, 4889, 540, 1...
2.614343
1,255
# Lista celulares # O departamento de marketing da sua empresa est interessado em obter apenas os nmeros de telefone celular, separando-os dos telefones fixos. Para simplificar esta operao sero considerados nmeros de celular apenas aqueles que, aps o cdigo de rea, iniciarem com o dgito adicional 9. # Voc recebeu a tarefa de obter uma lista com os nmeros de celular, sem o cdigo de rea. Entretanto, o cadastro de telefones do departamento de marketing no est padronizado e existem nmeros seguindo 3 formatos distintos: # 1. Nmeros completos (13 ou 14 caracteres), incluindo o cdigo do pas (+55) e o cdigo de rea (ex: 11). Exemplos: '+5511912345678' ou '+551133334444' (note que ambos comeam com o caractere '+'); # 2. Nmero contendo apenas o cdigo de rea (10 ou 11 caracteres). Exemplos: '11987654321' ou '1155556666'; # 3. Nmero sem cdigo de rea (8 ou 9 caracteres). Exemplos: '918273645' ou '77778888'. # Note que em todos os casos, o primeiro exemplo um nmero de celular e o segundo no. # Faa uma funo que recebe uma lista de nmeros de telefone e devolve uma lista contendo apenas os telefones celulares. Cada telefone da lista de entrada (recebida como argumento da sua funo) pode estar em qualquer um dos 3 formatos acima. Os telefones da lista de sada (retornada pela sua funo) devem conter apenas os dgitos do telefone, removendo o cdigo do pas e cdigo de rea se for necessrio. # Exemplo: a chamada lista_celulares(['+5511912345678', '1155556666', '77778888', '+551133334444', '918273645', '11987654321']) deve retornar a lista ['912345678', '918273645', '987654321'] # O nome da sua funo deve ser lista_celulares.
[ 2, 7343, 64, 18725, 377, 3565, 198, 2, 440, 6313, 3263, 78, 390, 7124, 12379, 424, 64, 795, 79, 14625, 1556, 493, 68, 601, 4533, 795, 909, 353, 2471, 268, 292, 28686, 299, 647, 418, 390, 5735, 69, 505, 18725, 934, 11, 2880, 25440,...
2.553459
636
import csv _iso_639_1_codes_file = open("files/ISO-639-1_Codes.csv", mode='r') _iso_639_1_codes_dictreader = csv.DictReader(_iso_639_1_codes_file) _iso_639_1_codes_dict: dict = {} for _row in _iso_639_1_codes_dictreader: _iso_639_1_codes_dict[_row['ISO-639-1 Code']] = _row['Language'] print(str(_iso_639_1_codes_dict))
[ 11748, 269, 21370, 198, 198, 62, 26786, 62, 21, 2670, 62, 16, 62, 40148, 62, 7753, 796, 1280, 7203, 16624, 14, 40734, 12, 21, 2670, 12, 16, 62, 34, 4147, 13, 40664, 1600, 4235, 11639, 81, 11537, 198, 62, 26786, 62, 21, 2670, 62, ...
2.116883
154
import time import torch import warnings import numpy as np from tianshou.env import BaseVectorEnv from tianshou.data import Batch, ReplayBuffer,\ ListReplayBuffer from tianshou.utils import MovAvg
[ 11748, 640, 198, 11748, 28034, 198, 11748, 14601, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 256, 1547, 15710, 13, 24330, 1330, 7308, 38469, 4834, 85, 198, 6738, 256, 1547, 15710, 13, 7890, 1330, 347, 963, 11, 23635, 28632, 11, 59, ...
3.383333
60
from drink_partners.contrib.samples import partner_bar_legal
[ 6738, 4144, 62, 3911, 2741, 13, 3642, 822, 13, 82, 12629, 1330, 5212, 62, 5657, 62, 18011, 628 ]
3.444444
18
# -*- coding: utf-8 -*- # !/usr/bin/python ################################### PART0 DESCRIPTION ################################# # Filename: class_create_model_of_logistic_regression.py # Description: # # Author: Shuai Yuan # E-mail: ysh329@sina.com # Create: 2016-01-23 23:32:49 # Last: __author__ = 'yuens' ################################### PART1 IMPORT ###################################### import MySQLdb import logging import time import pylab from numpy import * from math import exp import csv import decorator_of_function ################################### PART2 CLASS && FUNCTION ########################### ################################### PART3 CLASS TEST ################################## """ # Initial parameters database_name = "TitanicDB" passenger_table_name = "passenger_table" LRModel = CreateLogisticRegressionModel() """
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 5145, 14, 14629, 14, 8800, 14, 29412, 198, 29113, 21017, 16652, 15, 22196, 40165, 1303, 29113, 198, 2, 7066, 12453, 25, 1398, 62, 17953, 62, 19849, 62, 1659, 62, 64...
3.763158
228
import multiprocessing from typing import List, Optional import numpy as np from ..util import dill_for_apply
[ 11748, 18540, 305, 919, 278, 198, 6738, 19720, 1330, 7343, 11, 32233, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 11485, 22602, 1330, 288, 359, 62, 1640, 62, 39014, 628, 198 ]
3.454545
33
# 377 Combination Sum IV # Given an integer array with all positive numbers and no duplicates, # find the number of possible combinations that add up to a positive integer target. # # Example: # # nums = [1, 2, 3] # target = 4 # # The possible combination ways are: # (1, 1, 1, 1) # (1, 1, 2) # (1, 2, 1) # (1, 3) # (2, 1, 1) # (2, 2) # (3, 1) # # Note that different sequences are counted as different combinations. # # Therefore the output is 7. # # Follow up: # What if negative numbers are allowed in the given array? # How does it change the problem? # What limitation we need to add to the question to allow negative numbers? print(Solution().combinationSum4([1, 2, 3], 4))
[ 2, 42163, 14336, 1883, 5060, 8363, 198, 198, 2, 11259, 281, 18253, 7177, 351, 477, 3967, 3146, 290, 645, 14184, 16856, 11, 198, 2, 1064, 262, 1271, 286, 1744, 17790, 326, 751, 510, 284, 257, 3967, 18253, 2496, 13, 198, 2, 198, 2, ...
3.076577
222
from conans import ConanFile, CMake, tools import os STATIC_LIBS = ["nvtt", "squish", "rg_etc1", "nvimage", "bc6h", "posh", "bc7", "nvmath", "nvthread", "nvcore"] SHARED_LIBS = ["nvtt", "nvimage", "nvthread", "nvmath", "nvcore"]
[ 6738, 369, 504, 1330, 31634, 8979, 11, 327, 12050, 11, 4899, 198, 11748, 28686, 198, 198, 35744, 2149, 62, 31271, 4462, 796, 14631, 48005, 926, 1600, 366, 16485, 680, 1600, 366, 41345, 62, 14784, 16, 1600, 366, 48005, 9060, 1600, 366, ...
2.247706
109
#!/usr/bin/env python3 """ train_args.py train_args.py command-line args. """ import argparse def get_args(): """ """ parser = argparse.ArgumentParser( description="This script lets you train and save your model.", usage="python3 train.py flowers/train --gpu --learning_rate 0.001 --epochs 11 --gpu --hidden_units 500", formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('data_directory', action="store") parser.add_argument('--arch', action="store", default="alexnet", dest='arch', type=str, help='Directory to save the model file.', ) parser.add_argument('--save_dir', action="store", default=".", dest='save_dir', type=str, help='Directory to save the model file.', ) parser.add_argument('--save_name', action="store", default="checkpoint", dest='save_name', type=str, help='Checkpoint filename.', ) parser.add_argument('--categories_json', action="store", default="cat_to_name.json", dest='categories_json', type=str, help='Path to file containing the categories.', ) parser.add_argument('--gpu', action="store_true", dest="use_gpu", default=False, help='Use the GPU to train instead of the CPU') hp = parser.add_argument_group('hyperparameters') hp.add_argument('--learning_rate', action="store", default=0.001, type=float, help='Learning rate') hp.add_argument('--hidden_units', '-hu', action="store", dest="hidden_units", default=[4096], type=int, nargs='+', help='Hidden layer units') hp.add_argument('--epochs', action="store", dest="epochs", default=1, type=int, help='Epochs to train the model for') parser.parse_args() return parser def main(): """ Main Function """ print(f'Command line argument utility for train.py.\nTry "python train.py -h".') if __name__ == '__main__': main() """ main() is called if script is executed on it's own. """
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 37811, 4512, 62, 22046, 13, 9078, 198, 27432, 62, 22046, 13, 9078, 3141, 12, 1370, 26498, 13, 198, 37811, 198, 198, 11748, 1822, 29572, 198, 198, 4299, 651, 62, 22046, 33529, 198, ...
1.780653
1,623
from django.http import HttpResponseRedirect from django.conf import settings from django.views.generic import TemplateView from apps.payment.models import PaymentLog from apps.payment.stripe import get_token, get_payment_charge from apps.subscription.views import start_subscription
[ 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 31077, 7738, 1060, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 33571, 13, 41357, 1330, 37350, 7680, 198, 198, 6738, 6725, 13, 37301, 13, 27530, 1330, 28784, ...
3.878378
74
from django.apps import AppConfig
[ 6738, 42625, 14208, 13, 18211, 1330, 2034, 16934, 198 ]
3.777778
9
# -*- coding: utf-8 -*- """Console script for secure_data_store.""" import click from . import secure_data_store as sds CONFIG='~/.sdsrc' main()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 47581, 4226, 329, 5713, 62, 7890, 62, 8095, 526, 15931, 198, 11748, 3904, 198, 6738, 764, 1330, 5713, 62, 7890, 62, 8095, 355, 264, 9310, 198, 198, 10943, 16254, ...
2.625
56
#!/usr/bin/env python # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Michael A.G. Aivazis # California Institute of Technology # (C) 1998-2003 All Rights Reserved # # <LicenseText> # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # from Drawable import Drawable def nodeAttributes(): """return a list of valid attributes for Node""" return Node._validAttributes.keys() # version __id__ = "$Id$" # # End of file
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 2, 220, 27156, 27156, 27156, 27156, 27156, 198, 2, 198, 2, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 2...
2.81068
206
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import pprint import random import wx from cairis.core.armid import * from cairis.core.Borg import Borg import matplotlib matplotlib.use('WXAgg') from matplotlib.figure import Figure from matplotlib.backends.backend_wxagg import \ FigureCanvasWxAgg as FigCanvas, \ NavigationToolbar2WxAgg as NavigationToolbar
[ 2, 220, 49962, 284, 262, 24843, 10442, 5693, 357, 1921, 37, 8, 739, 530, 198, 2, 220, 393, 517, 18920, 5964, 11704, 13, 220, 4091, 262, 28536, 2393, 198, 2, 220, 9387, 351, 428, 670, 329, 3224, 1321, 198, 2, 220, 5115, 6634, 9238,...
3.602564
312
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = [ 'GetRegistryResult', 'AwaitableGetRegistryResult', 'get_registry', ] def get_registry(name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegistryResult: """ Use this data source to access information about an existing Container Registry. ## Example Usage ```python import pulumi import pulumi_azure as azure example = azure.containerservice.get_registry(name="testacr", resource_group_name="test") pulumi.export("loginServer", example.login_server) ``` :param str name: The name of the Container Registry. :param str resource_group_name: The Name of the Resource Group where this Container Registry exists. """ __args__ = dict() __args__['name'] = name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure:containerservice/getRegistry:getRegistry', __args__, opts=opts, typ=GetRegistryResult).value return AwaitableGetRegistryResult( admin_enabled=__ret__.admin_enabled, admin_password=__ret__.admin_password, admin_username=__ret__.admin_username, id=__ret__.id, location=__ret__.location, login_server=__ret__.login_server, name=__ret__.name, resource_group_name=__ret__.resource_group_name, sku=__ret__.sku, storage_account_id=__ret__.storage_account_id, tags=__ret__.tags)
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 17202, 39410, 25, 428, 2393, 373, 7560, 416, 262, 21624, 12994, 24118, 687, 10290, 357, 27110, 5235, 8, 16984, 13, 17202, 198, 2, 17202, 2141, 407, 4370, 416, 1021, 4556, 345, 821, 1728, 345, 760...
2.625
760
#!/usr/bin/env python # Copyright 2008 Orbitz WorldWide # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # This module is an implementation of the Whisper database API # Here is the basic layout of a whisper data file # # File = Header,Data # Header = Metadata,ArchiveInfo+ # Metadata = lastUpdate,maxRetention,xFilesFactor,archiveCount # ArchiveInfo = Offset,SecondsPerPoint,Points # Data = Archive+ # Archive = Point+ # Point = timestamp,value """ NOTE: This is a modified version of whisper.py For details on the modification, read https://bugs.launchpad.net/graphite/+bug/245835 """ import os, struct, time try: import fcntl CAN_LOCK = True except ImportError: CAN_LOCK = False LOCK = False CACHE_HEADERS = False __headerCache = {} longFormat = "!L" longSize = struct.calcsize(longFormat) floatFormat = "!f" floatSize = struct.calcsize(floatFormat) timestampFormat = "!L" timestampSize = struct.calcsize(timestampFormat) valueFormat = "!d" valueSize = struct.calcsize(valueFormat) pointFormat = "!Ld" pointSize = struct.calcsize(pointFormat) metadataFormat = "!2LfL" metadataSize = struct.calcsize(metadataFormat) archiveInfoFormat = "!3L" archiveInfoSize = struct.calcsize(archiveInfoFormat) debug = startBlock = endBlock = lambda *a,**k: None def create(path,archiveList,xFilesFactor=0.5): """create(path,archiveList,xFilesFactor=0.5) path is a string archiveList is a list of archives, each of which is of the form (secondsPerPoint,numberOfPoints) xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur """ #Validate archive configurations... assert archiveList, "You must specify at least one archive configuration!" archiveList.sort(key=lambda a: a[0]) #sort by precision (secondsPerPoint) for i,archive in enumerate(archiveList): if i == len(archiveList) - 1: break next = archiveList[i+1] assert archive[0] < next[0],\ "You cannot configure two archives with the same precision %s,%s" % (archive,next) assert (next[0] % archive[0]) == 0,\ "Higher precision archives' precision must evenly divide all lower precision archives' precision %s,%s" % (archive[0],next[0]) retention = archive[0] * archive[1] nextRetention = next[0] * next[1] assert nextRetention > retention,\ "Lower precision archives must cover larger time intervals than higher precision archives %s,%s" % (archive,next) #Looks good, now we create the file and write the header assert not exists(path), "File %s already exists!" % path fh = open(path,'wb') if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX ) lastUpdate = struct.pack( timestampFormat, int(time.time()) ) oldest = sorted([secondsPerPoint * points for secondsPerPoint,points in archiveList])[-1] maxRetention = struct.pack( longFormat, oldest ) xFilesFactor = struct.pack( floatFormat, float(xFilesFactor) ) archiveCount = struct.pack(longFormat, len(archiveList)) packedMetadata = lastUpdate + maxRetention + xFilesFactor + archiveCount fh.write(packedMetadata) headerSize = metadataSize + (archiveInfoSize * len(archiveList)) archiveOffsetPointer = headerSize for secondsPerPoint,points in archiveList: archiveInfo = struct.pack(archiveInfoFormat, archiveOffsetPointer, secondsPerPoint, points) fh.write(archiveInfo) archiveOffsetPointer += (points * pointSize) zeroes = '\x00' * (archiveOffsetPointer - headerSize) fh.write(zeroes) fh.close() def update(path,value,timestamp=None): """update(path,value,timestamp=None) path is a string value is a float timestamp is either an int or float """ #startBlock('complete update') value = float(value) fh = open(path,'r+b') if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX ) header = __readHeader(fh) now = int( time.time() ) if timestamp is None: timestamp = now timestamp = int(timestamp) diff = now - timestamp assert diff < header['maxRetention'] and diff >= 0, "Timestamp not covered by any archives in this database" for i,archive in enumerate(header['archives']): #Find the highest-precision archive that covers timestamp if archive['retention'] < diff: continue lowerArchives = header['archives'][i+1:] #We'll pass on the update to these lower precision archives later break #First we update the highest-precision archive myInterval = timestamp - (timestamp % archive['secondsPerPoint']) myPackedPoint = struct.pack(pointFormat,myInterval,value) fh.seek(archive['offset']) packedPoint = fh.read(pointSize) (baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint) if baseInterval == 0: #This file's first update fh.seek(archive['offset']) fh.write(myPackedPoint) baseInterval,baseValue = myInterval,value else: #Not our first update timeDistance = myInterval - baseInterval pointDistance = timeDistance / archive['secondsPerPoint'] byteDistance = pointDistance * pointSize myOffset = archive['offset'] + (byteDistance % archive['size']) fh.seek(myOffset) fh.write(myPackedPoint) #Now we propagate the update to lower-precision archives #startBlock('update propagation') higher = archive for lower in lowerArchives: if not __propagate(fh,myInterval,header['xFilesFactor'],higher,lower): break higher = lower #endBlock('update propagation') __changeLastUpdate(fh) fh.close() #endBlock('complete update') def update_many(path,points): """update_many(path,points) path is a string points is a list of (timestamp,value) points """ #startBlock('complete update_many path=%s points=%d' % (path,len(points))) if not points: return points = [ (int(t),float(v)) for (t,v) in points] points.sort(key=lambda p: p[0],reverse=True) #order points by timestamp, newest first fh = open(path,'r+b') if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX ) header = __readHeader(fh) now = int( time.time() ) archives = iter( header['archives'] ) currentArchive = next(archives) #debug(' update_many currentArchive=%s' % str(currentArchive)) currentPoints = [] for point in points: age = now - point[0] #debug(' update_many iterating points, point=%s age=%d' % (str(point),age)) while currentArchive['retention'] < age: #we can't fit any more points in this archive #debug(' update_many this point is too old to fit here, currentPoints=%d' % len(currentPoints)) if currentPoints: #commit all the points we've found that it can fit currentPoints.reverse() #put points in chronological order __archive_update_many(fh,header,currentArchive,currentPoints) currentPoints = [] try: currentArchive = next(archives) #debug(' update_many using next archive %s' % str(currentArchive)) except StopIteration: #debug(' update_many no more archives!') currentArchive = None break if not currentArchive: break #drop remaining points that don't fit in the database #debug(' update_many adding point=%s' % str(point)) currentPoints.append(point) #debug(' update_many done iterating points') if currentArchive and currentPoints: #don't forget to commit after we've checked all the archives currentPoints.reverse() __archive_update_many(fh,header,currentArchive,currentPoints) __changeLastUpdate(fh) fh.close() #endBlock('complete update_many path=%s points=%d' % (path,len(points))) def info(path): """info(path) path is a string """ fh = open(path,'rb') info = __readHeader(fh) fh.close() return info def fetch(path,fromTime,untilTime=None): """fetch(path,fromTime,untilTime=None) path is a string fromTime is an epoch time untilTime is also an epoch time, but defaults to now """ fh = open(path,'rb') header = __readHeader(fh) now = int( time.time() ) if untilTime is None or untilTime > now: untilTime = now if fromTime < (now - header['maxRetention']): fromTime = now - header['maxRetention'] assert fromTime < untilTime, "Invalid time interval" diff = now - fromTime for archive in header['archives']: if archive['retention'] >= diff: break fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) ) untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) ) fh.seek(archive['offset']) packedPoint = fh.read(pointSize) (baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint) if baseInterval == 0: step = archive['secondsPerPoint'] points = (untilInterval - fromInterval) / step timeInfo = (fromInterval,untilInterval,step) valueList = [None] * points return (timeInfo,valueList) #Determine fromOffset timeDistance = fromInterval - baseInterval pointDistance = timeDistance / archive['secondsPerPoint'] byteDistance = pointDistance * pointSize fromOffset = archive['offset'] + (byteDistance % archive['size']) #Determine untilOffset timeDistance = untilInterval - baseInterval pointDistance = timeDistance / archive['secondsPerPoint'] byteDistance = pointDistance * pointSize untilOffset = archive['offset'] + (byteDistance % archive['size']) #Read all the points in the interval fh.seek(fromOffset) if fromOffset < untilOffset: #If we don't wrap around the archive seriesString = fh.read(untilOffset - fromOffset) else: #We do wrap around the archive, so we need two reads archiveEnd = archive['offset'] + archive['size'] seriesString = fh.read(archiveEnd - fromOffset) fh.seek(archive['offset']) seriesString += fh.read(untilOffset - archive['offset']) #Now we unpack the series data we just read (anything faster than unpack?) byteOrder,pointTypes = pointFormat[0],pointFormat[1:] points = len(seriesString) / pointSize seriesFormat = byteOrder + (pointTypes * points) unpackedSeries = struct.unpack(seriesFormat, seriesString) #And finally we construct a list of values (optimize this!) valueList = [None] * points #pre-allocate entire list for speed currentInterval = fromInterval step = archive['secondsPerPoint'] for i in xrange(0,len(unpackedSeries),2): pointTime = unpackedSeries[i] if pointTime == currentInterval: pointValue = unpackedSeries[i+1] valueList[i/2] = pointValue #in-place reassignment is faster than append() currentInterval += step fh.close() timeInfo = (fromInterval,untilInterval,step) return (timeInfo,valueList)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 15069, 3648, 38161, 89, 2159, 42559, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, ...
3.1081
3,506
from PIL import Image import csv from ast import literal_eval as make_tuple from math import sqrt import argparse import os.path if __name__ == '__main__': args = handle_arguments() path = args.image emoji_list = [] with open('proc.csv') as raw_list: emoji_list = [] reader = csv.reader(raw_list) raw_list = list(reader) for entry in raw_list: emoji_list.append([entry[0], make_tuple(entry[1])]) image = load_img(path) size = image.size emoji_grid = gen_matrix(image) write_out(emoji_grid) print('Output in out.txt')
[ 6738, 350, 4146, 1330, 7412, 198, 11748, 269, 21370, 198, 6738, 6468, 1330, 18875, 62, 18206, 355, 787, 62, 83, 29291, 198, 6738, 10688, 1330, 19862, 17034, 198, 11748, 1822, 29572, 198, 11748, 28686, 13, 6978, 628, 628, 628, 198, 361, ...
2.415323
248
# Xlib.ext.xinput -- XInput extension module # # Copyright (C) 2012 Outpost Embedded, LLC # Forest Bond <forest.bond@rapidrollout.com> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation; either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 59 Temple Place, # Suite 330, # Boston, MA 02111-1307 USA ''' A very incomplete implementation of the XInput extension. ''' import sys import array import struct # Python 2/3 compatibility. from six import integer_types from Xlib.protocol import rq from Xlib import X extname = 'XInputExtension' PropertyDeleted = 0 PropertyCreated = 1 PropertyModified = 2 NotifyNormal = 0 NotifyGrab = 1 NotifyUngrab = 2 NotifyWhileGrabbed = 3 NotifyPassiveGrab = 4 NotifyPassiveUngrab = 5 NotifyAncestor = 0 NotifyVirtual = 1 NotifyInferior = 2 NotifyNonlinear = 3 NotifyNonlinearVirtual = 4 NotifyPointer = 5 NotifyPointerRoot = 6 NotifyDetailNone = 7 GrabtypeButton = 0 GrabtypeKeycode = 1 GrabtypeEnter = 2 GrabtypeFocusIn = 3 GrabtypeTouchBegin = 4 AnyModifier = (1 << 31) AnyButton = 0 AnyKeycode = 0 AsyncDevice = 0 SyncDevice = 1 ReplayDevice = 2 AsyncPairedDevice = 3 AsyncPair = 4 SyncPair = 5 SlaveSwitch = 1 DeviceChange = 2 MasterAdded = (1 << 0) MasterRemoved = (1 << 1) SlaveAdded = (1 << 2) SlaveRemoved = (1 << 3) SlaveAttached = (1 << 4) SlaveDetached = (1 << 5) DeviceEnabled = (1 << 6) DeviceDisabled = (1 << 7) AddMaster = 1 RemoveMaster = 2 AttachSlave = 3 DetachSlave = 4 AttachToMaster = 1 Floating = 2 ModeRelative = 0 ModeAbsolute = 1 MasterPointer = 1 MasterKeyboard = 2 SlavePointer = 3 SlaveKeyboard = 4 FloatingSlave = 5 KeyClass = 0 ButtonClass = 1 ValuatorClass = 2 ScrollClass = 3 TouchClass = 8 KeyRepeat = (1 << 16) AllDevices = 0 AllMasterDevices = 1 DeviceChanged = 1 KeyPress = 2 KeyRelease = 3 ButtonPress = 4 ButtonRelease = 5 Motion = 6 Enter = 7 Leave = 8 FocusIn = 9 FocusOut = 10 HierarchyChanged = 11 PropertyEvent = 12 RawKeyPress = 13 RawKeyRelease = 14 RawButtonPress = 15 RawButtonRelease = 16 RawMotion = 17 DeviceChangedMask = (1 << DeviceChanged) KeyPressMask = (1 << KeyPress) KeyReleaseMask = (1 << KeyRelease) ButtonPressMask = (1 << ButtonPress) ButtonReleaseMask = (1 << ButtonRelease) MotionMask = (1 << Motion) EnterMask = (1 << Enter) LeaveMask = (1 << Leave) FocusInMask = (1 << FocusIn) FocusOutMask = (1 << FocusOut) HierarchyChangedMask = (1 << HierarchyChanged) PropertyEventMask = (1 << PropertyEvent) RawKeyPressMask = (1 << RawKeyPress) RawKeyReleaseMask = (1 << RawKeyRelease) RawButtonPressMask = (1 << RawButtonPress) RawButtonReleaseMask = (1 << RawButtonRelease) RawMotionMask = (1 << RawMotion) GrabModeSync = 0 GrabModeAsync = 1 GrabModeTouch = 2 DEVICEID = rq.Card16 DEVICE = rq.Card16 DEVICEUSE = rq.Card8 def query_version(self): return XIQueryVersion( display=self.display, opcode=self.display.get_extension_major(extname), major_version=2, minor_version=0, ) EventMask = rq.Struct( DEVICE('deviceid'), rq.LengthOf('mask', 2), Mask('mask'), ) def select_events(self, event_masks): ''' select_events(event_masks) event_masks: Sequence of (deviceid, mask) pairs, where deviceid is a numerical device ID, or AllDevices or AllMasterDevices, and mask is either an unsigned integer or sequence of 32 bits unsigned values ''' return XISelectEvents( display=self.display, opcode=self.display.get_extension_major(extname), window=self, masks=event_masks, ) AnyInfo = rq.Struct( rq.Card16('type'), rq.Card16('length'), rq.Card16('sourceid'), rq.Pad(2), ) ButtonInfo = rq.Struct( rq.Card16('type'), rq.Card16('length'), rq.Card16('sourceid'), rq.LengthOf(('state', 'labels'), 2), ButtonState('state'), rq.List('labels', rq.Card32), ) KeyInfo = rq.Struct( rq.Card16('type'), rq.Card16('length'), rq.Card16('sourceid'), rq.LengthOf('keycodes', 2), rq.List('keycodes', rq.Card32), ) ValuatorInfo = rq.Struct( rq.Card16('type'), rq.Card16('length'), rq.Card16('sourceid'), rq.Card16('number'), rq.Card32('label'), FP3232('min'), FP3232('max'), FP3232('value'), rq.Card32('resolution'), rq.Card8('mode'), rq.Pad(3), ) ScrollInfo = rq.Struct( rq.Card16('type'), rq.Card16('length'), rq.Card16('sourceid'), rq.Card16('number'), rq.Card16('scroll_type'), rq.Pad(2), rq.Card32('flags'), FP3232('increment'), ) TouchInfo = rq.Struct( rq.Card16('type'), rq.Card16('length'), rq.Card16('sourceid'), rq.Card8('mode'), rq.Card8('num_touches'), ) INFO_CLASSES = { KeyClass: KeyInfo, ButtonClass: ButtonInfo, ValuatorClass: ValuatorInfo, ScrollClass: ScrollInfo, TouchClass: TouchInfo, } ClassInfo = ClassInfoClass() DeviceInfo = rq.Struct( DEVICEID('deviceid'), rq.Card16('use'), rq.Card16('attachment'), rq.LengthOf('classes', 2), rq.LengthOf('name', 2), rq.Bool('enabled'), rq.Pad(1), rq.String8('name', 4), rq.List('classes', ClassInfo), ) HierarchyInfo = rq.Struct( DEVICEID('deviceid'), DEVICEID('attachment'), DEVICEUSE('type'), rq.Bool('enabled'), rq.Pad(2), rq.Card32('flags'), ) HierarchyEventData = rq.Struct( DEVICEID('deviceid'), rq.Card32('time'), rq.Card32('flags'), rq.LengthOf('info', 2), rq.Pad(10), rq.List('info', HierarchyInfo), ) ModifierInfo = rq.Struct( rq.Card32('base_mods'), rq.Card32('latched_mods'), rq.Card32('locked_mods'), rq.Card32('effective_mods'), ) GroupInfo = rq.Struct( rq.Card8('base_group'), rq.Card8('latched_group'), rq.Card8('locked_group'), rq.Card8('effective_group'), ) DeviceEventData = rq.Struct( DEVICEID('deviceid'), rq.Card32('time'), rq.Card32('detail'), rq.Window('root'), rq.Window('event'), rq.Window('child'), FP1616('root_x'), FP1616('root_y'), FP1616('event_x'), FP1616('event_y'), rq.LengthOf('buttons', 2), rq.Card16('valulators_len'), DEVICEID('sourceid'), rq.Pad(2), rq.Card32('flags'), rq.Object('mods', ModifierInfo), rq.Object('groups', GroupInfo), ButtonState('buttons'), ) DeviceChangedEventData = rq.Struct( DEVICEID('deviceid'), rq.Card32('time'), rq.LengthOf('classes', 2), DEVICEID('sourceid'), rq.Card8('reason'), rq.Pad(11), rq.List('classes', ClassInfo), )
[ 2, 1395, 8019, 13, 2302, 13, 87, 15414, 1377, 1395, 20560, 7552, 8265, 201, 198, 2, 201, 198, 2, 220, 220, 220, 15069, 357, 34, 8, 2321, 3806, 7353, 13302, 47238, 11, 11419, 201, 198, 2, 220, 220, 220, 220, 220, 9115, 12812, 1279,...
2.331232
3,173
from .loader import load
[ 6738, 764, 29356, 1330, 3440 ]
4.8
5
# Generated by Django 3.0.4 on 2020-04-16 23:10 from django.db import migrations
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 19, 319, 12131, 12, 3023, 12, 1433, 2242, 25, 940, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
2.766667
30
# @Time : 2022/1/26 23:07 # @Author : zhaoyu # @Site : # @File : __init__.py.py # @Software: PyCharm # @Note : xx
[ 2, 2488, 7575, 220, 220, 220, 1058, 33160, 14, 16, 14, 2075, 2242, 25, 2998, 220, 198, 2, 2488, 13838, 220, 1058, 1976, 3099, 726, 84, 198, 2, 2488, 29123, 220, 220, 220, 1058, 220, 198, 2, 2488, 8979, 220, 220, 220, 1058, 11593, ...
1.897059
68
from typing import Union from unittest import mock import graphene import pytest from django.core.exceptions import ValidationError from django.db.models import Q from django.template.defaultfilters import slugify from graphene.utils.str_converters import to_camel_case from saleor.core.taxes import zero_money from saleor.graphql.core.utils import snake_to_camel_case from saleor.graphql.product.enums import AttributeTypeEnum, AttributeValueType from saleor.graphql.product.filters import filter_attributes_by_product_types from saleor.graphql.product.mutations.attributes import validate_value_is_unique from saleor.graphql.product.types.attributes import resolve_attribute_value_type from saleor.product import AttributeInputType from saleor.product.error_codes import ProductErrorCode from saleor.product.models import ( Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant, ) from saleor.product.utils.attributes import associate_attribute_values_to_instance from tests.api.utils import get_graphql_content QUERY_ATTRIBUTES = """ query { attributes(first: 20) { edges { node { id name slug values { id name slug } } } } } """ QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES = """ { products(first: 1) { edges { node { attributes { attribute { slug } values { slug } value { slug } } variants { attributes { attribute { slug } values { slug } value { slug } } } } } } } """ def test_resolve_attribute_values(user_api_client, product, staff_user): """Ensure the attribute values are properly resolved.""" query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES api_client = user_api_client variant = product.variants.first() assert product.attributes.count() == 1 assert variant.attributes.count() == 1 product_attribute_values = list( product.attributes.first().values.values_list("slug", flat=True) ) variant_attribute_values = list( variant.attributes.first().values.values_list("slug", flat=True) ) assert len(product_attribute_values) == 1 assert len(variant_attribute_values) == 1 product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][ "edges" ][0]["node"] product_attributes = product["attributes"] variant_attributes = product["variants"][0]["attributes"] assert len(product_attributes) == len(product_attribute_values) assert len(variant_attributes) == len(variant_attribute_values) assert product_attributes[0]["attribute"]["slug"] == "color" assert product_attributes[0]["values"][0]["slug"] == product_attribute_values[0] assert product_attributes[0]["value"]["slug"] == product_attribute_values[0] assert variant_attributes[0]["attribute"]["slug"] == "size" assert variant_attributes[0]["values"][0]["slug"] == variant_attribute_values[0] assert variant_attributes[0]["value"]["slug"] == variant_attribute_values[0] def test_resolve_attribute_values_non_assigned_to_node( user_api_client, product, staff_user ): """Ensure the attribute values are properly resolved when an attribute is part of the product type but not of the node (product/variant), thus no values should be resolved. """ query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES api_client = user_api_client variant = product.variants.first() product_type = product.product_type # Create dummy attributes unassigned_product_attribute = Attribute.objects.create(name="P", slug="product") unassigned_variant_attribute = Attribute.objects.create(name="V", slug="variant") # Create a value for each dummy attribute to ensure they are not returned # by the product or variant as they are not associated to them AttributeValue.objects.bulk_create( [ AttributeValue(slug="a", name="A", attribute=unassigned_product_attribute), AttributeValue(slug="b", name="B", attribute=unassigned_product_attribute), ] ) # Assign the dummy attributes to the product type and push them at the top # through a sort_order=0 as the other attributes have sort_order=null AttributeProduct.objects.create( attribute=unassigned_product_attribute, product_type=product_type, sort_order=0 ) AttributeVariant.objects.create( attribute=unassigned_variant_attribute, product_type=product_type, sort_order=0 ) assert product.attributes.count() == 1 assert variant.attributes.count() == 1 product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][ "edges" ][0]["node"] product_attributes = product["attributes"] variant_attributes = product["variants"][0]["attributes"] assert len(product_attributes) == 2, "Non-assigned attr from the PT may be missing" assert len(variant_attributes) == 2, "Non-assigned attr from the PT may be missing" assert product_attributes[0]["attribute"]["slug"] == "product" assert product_attributes[0]["values"] == [] assert variant_attributes[0]["value"] is None assert variant_attributes[0]["attribute"]["slug"] == "variant" assert variant_attributes[0]["values"] == [] assert variant_attributes[0]["value"] is None def test_attributes_filter_by_product_type_with_empty_value(): """Ensure passing an empty or null value is ignored and the queryset is simply returned without any modification. """ qs = Attribute.objects.all() assert filter_attributes_by_product_types(qs, "...", "") is qs assert filter_attributes_by_product_types(qs, "...", None) is qs def test_attributes_filter_by_product_type_with_unsupported_field(): """Ensure using an unknown field to filter attributes by raises a NotImplemented exception. """ qs = Attribute.objects.all() with pytest.raises(NotImplementedError) as exc: filter_attributes_by_product_types(qs, "in_space", "a-value") assert exc.value.args == ("Filtering by in_space is unsupported",) def test_attributes_filter_by_non_existing_category_id(): """Ensure using a non-existing category ID returns an empty query set.""" category_id = graphene.Node.to_global_id("Category", -1) mocked_qs = mock.MagicMock() qs = filter_attributes_by_product_types(mocked_qs, "in_category", category_id) assert qs == mocked_qs.none.return_value CREATE_ATTRIBUTES_QUERY = """ mutation createAttribute($name: String!, $values: [AttributeValueCreateInput]) { attributeCreate(input: {name: $name, values: $values}) { errors { field message } productErrors { field message code } attribute { name slug values { name slug } productTypes(first: 10) { edges { node { id } } } } } } """ UPDATE_ATTRIBUTE_QUERY = """ mutation updateAttribute( $id: ID!, $name: String!, $addValues: [AttributeValueCreateInput]!, $removeValues: [ID]!) { attributeUpdate( id: $id, input: { name: $name, addValues: $addValues, removeValues: $removeValues}) { errors { field message } productErrors { field message code } attribute { name slug values { name slug } productTypes(first: 10) { edges { node { id } } } } } } """ CREATE_ATTRIBUTE_VALUE_QUERY = """ mutation createAttributeValue( $attributeId: ID!, $name: String!) { attributeValueCreate( attribute: $attributeId, input: {name: $name}) { productErrors { field message code } attribute { values { name } } attributeValue { name type slug } } } """ UPDATE_ATTRIBUTE_VALUE_QUERY = """ mutation updateChoice( $id: ID!, $name: String!) { attributeValueUpdate( id: $id, input: {name: $name}) { errors { field message } attributeValue { name slug } attribute { values { name } } } } """ def test_resolve_assigned_attribute_without_values(api_client, product_type, product): """Ensure the attributes assigned to a product type are resolved even if the product doesn't provide any value for it or is not directly associated to it. """ # Retrieve the product's variant variant = product.variants.get() # Remove all attributes and values from the product and its variant product.attributesrelated.clear() variant.attributesrelated.clear() # Retrieve the product and variant's attributes products = get_graphql_content( api_client.post_graphql( """ { products(first: 10) { edges { node { attributes { attribute { slug } values { name } } variants { attributes { attribute { slug } values { name } } } } } } } """ ) )["data"]["products"]["edges"] # Ensure we are only working on one product and variant, the ones we are testing assert len(products) == 1 assert len(products[0]["node"]["variants"]) == 1 # Retrieve the nodes data product = products[0]["node"] variant = product["variants"][0] # Ensure the product attributes values are all None assert len(product["attributes"]) == 1 assert product["attributes"][0]["attribute"]["slug"] == "color" assert product["attributes"][0]["values"] == [] # Ensure the variant attributes values are all None assert variant["attributes"][0]["attribute"]["slug"] == "size" assert variant["attributes"][0]["values"] == [] ASSIGN_ATTR_QUERY = """ mutation assign($productTypeId: ID!, $operations: [AttributeAssignInput]!) { attributeAssign(productTypeId: $productTypeId, operations: $operations) { errors { field message } productType { id productAttributes { id } variantAttributes { id } } } } """ def test_assign_variant_attribute_to_product_type_with_disabled_variants( staff_api_client, permission_manage_products, product_type_without_variant, color_attribute_without_values, ): """The assignAttribute mutation should raise an error when trying to add an attribute as a variant attribute when the product type doesn't support variants""" product_type = product_type_without_variant attribute = color_attribute_without_values staff_api_client.user.user_permissions.add(permission_manage_products) product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk) query = ASSIGN_ATTR_QUERY operations = [ {"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)} ] variables = {"productTypeId": product_type_global_id, "operations": operations} content = get_graphql_content(staff_api_client.post_graphql(query, variables))[ "data" ]["attributeAssign"] assert content["errors"] == [ { "field": "operations", "message": "Variants are disabled in this product type.", } ] def test_assign_variant_attribute_having_unsupported_input_type( staff_api_client, permission_manage_products, product_type, size_attribute ): """The assignAttribute mutation should raise an error when trying to use an attribute as a variant attribute when the attribute's input type doesn't support variants""" attribute = size_attribute attribute.input_type = AttributeInputType.MULTISELECT attribute.save(update_fields=["input_type"]) product_type.variant_attributes.clear() staff_api_client.user.user_permissions.add(permission_manage_products) product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk) query = ASSIGN_ATTR_QUERY operations = [ {"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)} ] variables = {"productTypeId": product_type_global_id, "operations": operations} content = get_graphql_content(staff_api_client.post_graphql(query, variables))[ "data" ]["attributeAssign"] assert content["errors"] == [ { "field": "operations", "message": ( "Attributes having for input types ['multiselect'] cannot be assigned " "as variant attributes" ), } ] UNASSIGN_ATTR_QUERY = """ mutation unAssignAttribute( $productTypeId: ID!, $attributeIds: [ID]! ) { attributeUnassign(productTypeId: $productTypeId, attributeIds: $attributeIds) { errors { field message } productType { id variantAttributes { id } productAttributes { id } } } } """ def test_unassign_attributes_not_in_product_type( staff_api_client, permission_manage_products, color_attribute_without_values ): """The unAssignAttribute mutation should not raise any error when trying to remove an attribute that is not/no longer in the product type.""" staff_api_client.user.user_permissions.add(permission_manage_products) product_type = ProductType.objects.create(name="Type") product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk) query = UNASSIGN_ATTR_QUERY variables = { "productTypeId": product_type_global_id, "attributeIds": [ graphene.Node.to_global_id("Attribute", color_attribute_without_values.pk) ], } content = get_graphql_content(staff_api_client.post_graphql(query, variables))[ "data" ]["attributeUnassign"] assert not content["errors"] assert content["productType"]["id"] == product_type_global_id assert len(content["productType"]["productAttributes"]) == 0 assert len(content["productType"]["variantAttributes"]) == 0 ATTRIBUTES_RESORT_QUERY = """ mutation ProductTypeReorderAttributes( $productTypeId: ID! $moves: [ReorderInput]! $type: AttributeTypeEnum! ) { productTypeReorderAttributes( productTypeId: $productTypeId moves: $moves type: $type ) { productType { id variantAttributes { id slug } productAttributes { id } } errors { field message } } } """ def test_sort_attributes_within_product_type_invalid_product_type( staff_api_client, permission_manage_products ): """Try to reorder an invalid product type (invalid ID).""" product_type_id = graphene.Node.to_global_id("ProductType", -1) attribute_id = graphene.Node.to_global_id("Attribute", -1) variables = { "type": "VARIANT", "productTypeId": product_type_id, "moves": [{"id": attribute_id, "sortOrder": 1}], } content = get_graphql_content( staff_api_client.post_graphql( ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products] ) )["data"]["productTypeReorderAttributes"] assert content["errors"] == [ { "field": "productTypeId", "message": f"Couldn't resolve to a product type: {product_type_id}", } ] def test_sort_attributes_within_product_type_invalid_id( staff_api_client, permission_manage_products, color_attribute ): """Try to reorder an attribute not associated to the given product type.""" product_type = ProductType.objects.create(name="Dummy Type") product_type_id = graphene.Node.to_global_id("ProductType", product_type.id) attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id) variables = { "type": "VARIANT", "productTypeId": product_type_id, "moves": [{"id": attribute_id, "sortOrder": 1}], } content = get_graphql_content( staff_api_client.post_graphql( ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products] ) )["data"]["productTypeReorderAttributes"] assert content["errors"] == [ { "field": "moves", "message": f"Couldn't resolve to an attribute: {attribute_id}", } ] ATTRIBUTE_VALUES_RESORT_QUERY = """ mutation attributeReorderValues($attributeId: ID!, $moves: [ReorderInput]!) { attributeReorderValues(attributeId: $attributeId, moves: $moves) { attribute { id values { id } } errors { field message } } } """ def test_sort_values_within_attribute_invalid_product_type( staff_api_client, permission_manage_products ): """Try to reorder an invalid attribute (invalid ID).""" attribute_id = graphene.Node.to_global_id("Attribute", -1) value_id = graphene.Node.to_global_id("AttributeValue", -1) variables = { "attributeId": attribute_id, "moves": [{"id": value_id, "sortOrder": 1}], } content = get_graphql_content( staff_api_client.post_graphql( ATTRIBUTE_VALUES_RESORT_QUERY, variables, permissions=[permission_manage_products], ) )["data"]["attributeReorderValues"] assert content["errors"] == [ { "field": "attributeId", "message": f"Couldn't resolve to an attribute: {attribute_id}", } ] def test_sort_values_within_attribute_invalid_id( staff_api_client, permission_manage_products, color_attribute ): """Try to reorder a value not associated to the given attribute.""" attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id) value_id = graphene.Node.to_global_id("AttributeValue", -1) variables = { "type": "VARIANT", "attributeId": attribute_id, "moves": [{"id": value_id, "sortOrder": 1}], } content = get_graphql_content( staff_api_client.post_graphql( ATTRIBUTE_VALUES_RESORT_QUERY, variables, permissions=[permission_manage_products], ) )["data"]["attributeReorderValues"] assert content["errors"] == [ { "field": "moves", "message": f"Couldn't resolve to an attribute value: {value_id}", } ] ATTRIBUTES_FILTER_QUERY = """ query($filters: AttributeFilterInput!) { attributes(first: 10, filter: $filters) { edges { node { name slug } } } } """ ATTRIBUTES_SORT_QUERY = """ query($sortBy: AttributeSortingInput) { attributes(first: 10, sortBy: $sortBy) { edges { node { slug } } } } """ def test_sort_attributes_by_default_sorting(api_client): """Don't provide any sorting, this should sort by name by default.""" Attribute.objects.bulk_create( [Attribute(name="A", slug="b"), Attribute(name="B", slug="a")] ) attributes = get_graphql_content( api_client.post_graphql(ATTRIBUTES_SORT_QUERY, {}) )["data"]["attributes"]["edges"] assert len(attributes) == 2 assert attributes[0]["node"]["slug"] == "b" assert attributes[1]["node"]["slug"] == "a"
[ 6738, 19720, 1330, 4479, 198, 6738, 555, 715, 395, 1330, 15290, 198, 198, 11748, 42463, 198, 11748, 12972, 9288, 198, 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 3254, 24765, 12331, 198, 6738, 42625, 14208, 13, 9945, 13, 27530, ...
2.242192
9,509
import os import json import cv2 import logging import boto3 import botocore s3 = boto3.client('s3') logger = logging.getLogger() logger.setLevel(logging.INFO) def upload_file(file_name, bucket, object_name=None): """Upload a file to an S3 bucket :param file_name: File to upload :param bucket: Bucket to upload to :param object_name: S3 object name. If not specified then same as file_name :return: True if file was uploaded, else False """ # If S3 object_name was not specified, use file_name if object_name is None: object_name = file_name # Upload the file s3_client = s3 try: response = s3_client.upload_file(file_name, bucket, object_name) except botocore.exceptions.ClientError as e: logging.error(e) return False return True
[ 11748, 28686, 198, 11748, 33918, 198, 11748, 269, 85, 17, 198, 11748, 18931, 198, 11748, 275, 2069, 18, 198, 11748, 10214, 420, 382, 198, 198, 82, 18, 796, 275, 2069, 18, 13, 16366, 10786, 82, 18, 11537, 198, 6404, 1362, 796, 18931, ...
2.677524
307
from pyinstrument import Profiler p = Profiler(use_signal=False) p.start() func(900) p.stop() print(p.output_text()) with open('overflow_out.html', 'w') as f: f.write(p.output_html())
[ 6738, 12972, 259, 43872, 1330, 4415, 5329, 198, 198, 79, 796, 4415, 5329, 7, 1904, 62, 12683, 282, 28, 25101, 8, 198, 198, 79, 13, 9688, 3419, 198, 198, 20786, 7, 12865, 8, 198, 198, 79, 13, 11338, 3419, 198, 198, 4798, 7, 79, 1...
2.365854
82
#!/usr/bin/env python3 # SPDX-License-Identifier: BSD-2-Clause # # Copyright (c) 2019, Linaro Limited # from __future__ import print_function from __future__ import division import argparse import sys import struct import re import hashlib try: from elftools.elf.elffile import ELFFile from elftools.elf.constants import SH_FLAGS from elftools.elf.enums import ENUM_RELOC_TYPE_ARM from elftools.elf.enums import ENUM_RELOC_TYPE_AARCH64 from elftools.elf.sections import SymbolTableSection from elftools.elf.relocation import RelocationSection except ImportError: print(""" *** Can't find elftools module. Probably it is not installed on your system. You can install this module with $ apt install python3-pyelftools if you are using Ubuntu. Or try to search for "pyelftools" or "elftools" in your package manager if you are using some other distribution. *** """) raise small_page_size = 4 * 1024 elffile_symbols = None tee_pageable_bin = None tee_pager_bin = None tee_embdata_bin = None if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 347, 10305, 12, 17, 12, 2601, 682, 198, 2, 198, 2, 15069, 357, 66, 8, 13130, 11, 5164, 12022, 15302, 198, 2, 198, 198, 6738, 115...
2.888298
376
# The MIT License (MIT) # # Copyright (c) 2020 Jeff Epler for Adafruit Industries LLC # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ Make a key (button) repeat when held down """ import time
[ 2, 383, 17168, 13789, 357, 36393, 8, 198, 2, 198, 2, 15069, 357, 66, 8, 12131, 5502, 412, 20053, 329, 1215, 1878, 4872, 20171, 11419, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 25...
3.804416
317
# --- Klassendeklaration mit Konstruktor --- # # --- Instanziierung einer Klasse ---# # --- Ich bevorzuge die Initialisierung mit den Keywords --- # pc_instanz = PC(cpu='Ryzen 7', gpu='RTX2070Super', ram='GSkill') # --- Zugriff auf normale _public_ Attribute --- # print(pc_instanz.cpu) print(pc_instanz.gpu) # --- Zugriff auf ein _privates_ Attribut --- # # Auskommentiert, da es einen AttributeError schmeit. # print(pc_instanz.__ram) # --- Zugriff auf das Instanz-Dictionary, um die Inhalte jener Instanz zu erhalten. --- # print(pc_instanz.__dict__) # --- Zugriff auf das eigentlich _private_ Attribut. --- # print(pc_instanz._PC__ram)
[ 2, 11420, 14770, 562, 437, 988, 75, 10186, 10255, 17431, 19554, 74, 13165, 11420, 1303, 628, 198, 2, 11420, 2262, 272, 17027, 959, 2150, 304, 7274, 14770, 21612, 11420, 2, 198, 2, 11420, 26364, 307, 20867, 89, 2217, 4656, 20768, 271, ...
2.573705
251