content
stringlengths
1
1.04M
input_ids
listlengths
1
774k
ratio_char_token
float64
0.38
22.9
token_count
int64
1
774k
import logging import pickle
[ 11748, 18931, 198, 11748, 2298, 293, 628, 198 ]
3.875
8
from firechannel import get_client
[ 6738, 2046, 17620, 1330, 651, 62, 16366, 628, 628 ]
4.222222
9
from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class FilesConfig(AppConfig): """Configuration for ``Files`` app.""" name = 'apps.files' verbose_name = _('Files')
[ 6738, 42625, 14208, 13, 18211, 1330, 2034, 16934, 198, 6738, 42625, 14208, 13, 26791, 13, 41519, 1330, 334, 1136, 5239, 62, 75, 12582, 355, 4808, 628, 198, 4871, 13283, 16934, 7, 4677, 16934, 2599, 198, 220, 220, 220, 37227, 38149, 329,...
3.055556
72
#standard libraries import time from datetime import datetime import csv import os import json from concurrent import futures import threading import multiprocessing import math #external libraries import numpy as np import pandas as pd from discord import Webhook, RequestsWebhookAdapter from binance.client import Client from binance.enums import * from matplotlib import pyplot as plt #dash imports import dash import dash_html_components as html import dash_core_components as dcc from dash.dependencies import Input, Output import plotly.express as px import plotly.graph_objects as go #file imports from database import LiveDataBase from actor import NNActor from utils import read_json, read_config, timer if __name__ == "__main__": from pretrain import Network #load in the actor Actor = NNActor(neural_network=Network, load_path="./experiments/testeth2/Run1", epoch=0) bot = Bot(symbol="ETHUSDT", run_path="./experiments/testeth2/Run1", actor=Actor) bot.run()
[ 2, 20307, 12782, 198, 11748, 640, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 11748, 269, 21370, 198, 11748, 28686, 198, 11748, 33918, 198, 6738, 24580, 1330, 25650, 198, 11748, 4704, 278, 198, 11748, 18540, 305, 919, 278, 198, 11748, ...
3.316667
300
import matplotlib.pyplot as plt import time from datetime import datetime import os import os.path from tkinter import filedialog import tkinter as tk import ast import openpyxl import pandas as pd from pandas.io.json import json_normalize import numpy as np # import random ## #create combined "All" data by taking sum/mean/max among various roi #### if len(self.roi_label_unique) > 1: #ROI names MUST NOT have "All" ot "Dict"! ## columns_all = [a + "_All" for a in header_split] ## self.df_forcedata = pd.concat([self.df_forcedata, ## pd.DataFrame(columns=columns_all)], sort=False) ## self.df_forcedata[columns_all] = self.df_forcedata[columns_all].fillna(0) ## ## for a in self.roi_label_unique: ## if a == 'All': ## print("Change ROI name 'All'") ## break ## for i in range(len(columns_all)): ## if header_split[i] in header_split_add: ## self.df_forcedata[columns_all[i]] += self.df_forcedata[header_split[i] + ## "_" + a].fillna(0) ## elif header_split[i] in header_split_max: ## clist1 = [header_split[i] + "_" + b for b in self.roi_label_unique] ## self.df_forcedata[columns_all[i]] = self.df_forcedata[clist1].max(axis=1) ## elif header_split[i] in header_split_avg: ## clist2 = [header_split[i] + "_" + b for b in self.roi_label_unique] ## self.df_forcedata[columns_all[i]] = self.df_forcedata[clist2].mean(axis=1) ## ## self.roi_label_unique.update(["All"]) ## self.df_final.to_excel('E:/Work/Data/Summary/20200213/Sex/summary_temp_' + ## str(random.randint(1, 90000)) + '.xlsx') #export as excel ## self.df_final = self.df_all.copy() ## self.df_all.to_excel("E:/Work/Codes/Test codes/test5.xlsx") #export as excel # if legend_parameter == "ROI Label": #no filtering as this is already plotted in prepareplot (when leg = None) # self.plotSummary(summaryDict, df_good, df_good) # else: # ## legend_parameter = 'Folder_Name' #choose, same as column names # legend_list = df_good[legend_parameter].unique() # legend_list.sort() # print(legend_list) # markerlist = ["o", "v", "P", "^", "D", "X", "<", ">", "*", "s", # "+", "d", "1", "x", "2", "h"] # figlist = None # i = 0 # ## df_leg = pd.DataFrame(dict(zip([legend_parameter], [legend_list]))) # for lg in legend_list: # print("zxz", lg) # i = 0 if i > 15 else i # df_filtered = df_good[df_good[legend_parameter] == lg] # self.plotSummary(summaryDict, # df_filtered, df_good, legend_parameter, markerlist[i], # figlist, lg) # figlist = self.figdict.copy() # ## df_all_joined = self.df_all.copy() # ## df_all_joined.insert(0, legend_parameter, lg) # ## if i == 0: # ## df_final = df_all_joined.copy() # ## else: # ## df_final = df_final.append(df_all_joined, ignore_index=True, sort=False) # ## print("iter", i) # i += 1 # ## self.df_final = df_final.copy() ##a.combineSummary("Folder_Name") ##if a.list_filepath != "": ## a.showSummaryPlot() ##summary = SummaryAnal() ##summary.importSummary() ##summary.plotSummary(summary.speed_def_unique, ## summary.roi_label_unique, ## summary.df_forcedata, ## summary.df_forcedata) ##summary.showSummaryPlot()
[ 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 201, 198, 11748, 640, 201, 198, 6738, 4818, 8079, 1330, 4818, 8079, 201, 198, 11748, 28686, 201, 198, 11748, 28686, 13, 6978, 201, 198, 6738, 256, 74, 3849, 1330, 5717, 498, 519...
1.777639
2,406
from onegov.activity.collections.activity import ActivityFilter from onegov.activity.collections.activity import ActivityCollection from onegov.activity.collections.attendee import AttendeeCollection from onegov.activity.collections.booking import BookingCollection from onegov.activity.collections.invoice import InvoiceCollection from onegov.activity.collections.occasion import OccasionCollection from onegov.activity.collections.period import PeriodCollection from onegov.activity.collections.publication_request import \ PublicationRequestCollection from onegov.activity.collections.volunteer import VolunteerCollection __all__ = [ 'ActivityCollection', 'ActivityFilter', 'AttendeeCollection', 'BookingCollection', 'InvoiceCollection', 'OccasionCollection', 'PeriodCollection', 'PublicationRequestCollection', 'VolunteerCollection', ]
[ 6738, 530, 9567, 13, 21797, 13, 4033, 26448, 13, 21797, 1330, 24641, 22417, 198, 6738, 530, 9567, 13, 21797, 13, 4033, 26448, 13, 21797, 1330, 24641, 36307, 198, 6738, 530, 9567, 13, 21797, 13, 4033, 26448, 13, 1078, 437, 1453, 1330, ...
3.7173
237
""" This file contains SPRKKRAtoms - an enhanced version of Atoms to be used with SPRKKR """ from ase import Atoms from ..common.unique_values import UniqueValuesMapping import spglib from ase.spacegroup import Spacegroup import numpy as np from ..sprkkr.sites import Site from ..common.misc import numpy_index class SPRKKRAtoms(Atoms): """ ASE Atoms object extended by the data necessary for SPR-KKR calculations """ @staticmethod def promote_ase_atoms(obj, symmetry=None): """ Convert ASE Atoms object to the one usable by SPRKKR. For the case of the usability it is a bit ugly hack: The __class__ attribute is replaced so the extra methods and properties of the objects will be available. Parameters ---------- obj: ase.Atoms The atoms object to be promoted to be used for SPRKKR calculations symmetry: boolean or None The sites property of the resulting object will consider the symmetry of the structure. I.e., the by-symmetry-equal atomic sites will share the same sites object. Default None is the same as True, however it does not change the symmetry of the already promoted obj passed into the routine. """ if obj and not isinstance(obj, SPRKKRAtoms): if obj.__class__ is Atoms: obj.__class__ = SPRKKRAtoms else: if not isinstance(obj, Atoms): raise(f'Can not promote class {obj} of class {obj.__class__} to {SPRKKRAtoms}') obj.__class__ = SprKKrAtomsEx obj._init(True if symmetry is None else symmetry) else: if symmetry is not None: obj.symmetry = symmetry return obj def __init__(self, *args, symmetry=True, potential=None, **kwargs): """ Creates SPRKKRAtoms atoms Parameters ---------- *args: list The positionals arguments of ase.Atoms.__init__ symmetry: boolean The symmetry will be computed when the sites property will be initialized. I.e., the by-symmetry-equal atomic sites will share the same sites object. **kwargs: dict The named arguments of ase.Atoms.__init__ """ self._init(symmetry, potential) super().__init__(*args, **kwargs) def _init(self, symmetry=True, potential=None): """ The initialization of the additional (not-in-ASE) properties. To be used by constructor and by promote_ase_atoms""" self._unique_sites = None self._potential = potential self._symmetry = symmetry @property def symmetry(self): """ Whether the sites property is/will be generated using symmetry, i.e. whether the Sites objects in the sites property will be shared among symmetric atomic sites. """ return self._symmetry @symmetry.setter def symmetry(self, value): """ Recomputes the sites with enabled/disabled symmetry if the value of the property has changed. """ if self._symmetry == value: return self._symmetry = value if self._unique_sites is not None: if value: self._compute_sites_symmetry() else: self._cancel_sites_symmetry() def compute_spacegroup_for_atomic_numbers(self, atomic_numbers=None, symprec=1e-5): """ Return spacegroup that suits to the atoms' cell structure and to the given atomic_numbers (not necessary the real ones, they can be just ''labels''). """ atomic_numbers = atomic_numbers if atomic_numbers is not None else self.get_atomic_numbers() sg = spglib.get_spacegroup((self.get_cell(), self.get_scaled_positions(), atomic_numbers), symprec=symprec) if sg is None: return None sg_no = int(sg[sg.find('(') + 1:sg.find(')')]) spacegroup = Spacegroup(sg_no) return spacegroup def compute_sites_symmetry(self, spacegroup=None, atomic_numbers=None, consider_old=False, symprec=1e-5): """ SPRKKR has some properties shared by all by-symmetry-equal sites. This method initializes _sites property, that holds these properties: makes identical all the atoms on the "symmetry identical positions" with the same atomic number. The method is called automatically when the sites property is firstly accessed. The effect of the method is the nearly same as setting the symmetry property. However, setting the symmetry property on an 'already symmetrized' object has no effect, while this methods always recompute the sites property. Parameters ---------- spacegroup: Spacegroup If not None, the given spacegroup is used for determining the symmetry, instead of the one determined by cell geometry. atomic_numbers: [ int ] Atomic numbers used to determine the spacegroup (if it is not given) to compute the symmetry. The atomic numbers can be ''virtual'', just to denote the equivalence of the sites. The array should have the same length as the number of atoms in the unit cell. If None, self.symbols are used. consider_old: bool If True, and _unique_sites is not None, the non-symmetry-equivalent sites won't be equivalent in the newly computed symmetry. symprec: float A threshold for spatial error for the symmetry computing. See spglib.get_spacegroup """ self._symmetry = True SPRKKRAtoms._compute_sites_symmetry(**locals()) def _compute_sites_symmetry(self, spacegroup=None, atomic_numbers=None, consider_old=False, symprec=1e-5): """ See compute_sites_symmetry - this metod does just the same, but it does not set the symmetry property.""" occupation = self.info.get('occupancy', {}) if not spacegroup and self._symmetry: if atomic_numbers: mapping = UniqueValuesMapping(atomic_numbers) else: mapping = UniqueValuesMapping(self.get_atomic_numbers()) if consider_old and self._unique_sites: mapping = mapping.merge(self._unique_sites) if occupation: mapping = mapping.merge(gen_occ()) spacegroup = self.compute_spacegroup_for_atomic_numbers(mapping.mapping, symprec=symprec) self.info['spacegroup'] = spacegroup if not spacegroup: return self.cancel_sites_symmetry() tags = spacegroup.tag_sites(self.get_scaled_positions()) mapping = mapping.merge( tags ) tags = mapping.mapping sites = np.empty(len(tags), dtype=object) uniq, umap = np.unique(tags, return_inverse = True) used = set() for i in range(len(uniq)): index = umap == i if self._unique_sites is not None: #first non-none of the given index possible = (i for i in self._unique_sites[index]) site = next(filter(None, possible), None) if site in used: site = site.copy() else: used.add(site) else: site = None if not site: symbol = self.symbols[ numpy_index(umap,i)] for ai in np.where(index)[0]: if ai in occupation and occupation[ai]: symbol = occupation[ai] site = Site(self, symbol) sites[index] = site self.sites = sites def cancel_sites_symmetry(self): """ Cancel the use of symmetry in the structure, i.e., makes the Site object uniqe (not shared) for each atomic site. Calling this method is nearly equivalent to the setting the symmetry property to False, however, this method always recompute the sites object, while setting symmetry=False recomputes the sites property only if it was previously set to False. """ self._symmetry = False self._cancel_sites_symmetry() def _cancel_sites_symmetry(self): """ See cancel_sites_symmetry - this metod does just the same, but it does not set the symmetry property.""" sites = np.empty(len(self), dtype=object) used = set() occupation = self.info.get('occupancy', {}) for i in range(len(self)): if self._unique_sites is not None: site=self._unique_sites[i] if site in used: site = site.copy() else: used.add(site) else: symbol = occupation[i] if i in occupation and occupation[i] else \ self.symbols[i] site = Site(self, symbol) sites[i] = site self.sites = sites @property def sites(self): """ The sites property holds all the information for the SPR-KKR package: atomic types (including number of semicore and valence electrons), occupancy, symmetries, meshes... Some of the properties are stored in the ASE atoms properties (e.g. occupancy, atomic symbol), however, ASE is not able to hold them all and/or to describe fully the SPR-KKR options; thus, these properties are hold in this array. The changes made on this array are reflected (as is possible) to the ASE properties, but the opposite does not hold - to reflect the changes in these properties please create a new Atoms object with given properties. """ if self._unique_sites is None: self._compute_sites_symmetry() return self._unique_sites @sites.setter def sites(self, v): """ Set the sites property and update all other dependent properties (symbols, occupancy) according to the sites """ an = np.zeros(len(v), dtype= int) occ = {} for i,j in enumerate(v): occ[i] = j.occupation.as_dict an[i] = j.occupation.primary_atomic_number self.set_atomic_numbers(an) self.info['occupancy'] = occ self._unique_sites = v @property @potential.setter #at the last - to avoid circular imports from ..potentials import potentials
[ 37811, 770, 2393, 4909, 49068, 16601, 49, 2953, 3150, 532, 281, 13105, 2196, 286, 1629, 3150, 284, 307, 973, 198, 4480, 49068, 16601, 49, 37227, 628, 198, 6738, 257, 325, 1330, 1629, 3150, 198, 6738, 11485, 11321, 13, 34642, 62, 27160, ...
2.392051
4,428
''' It would be nice if this module didn't need to import anything, since it defines (part of) the syntax of the language, and that and that seems like something that should be completely abstract. But macros make it possible to alter the syntax at run-time, meaning that keyword dispatch has to be cognizant of the mutable state of the machine! ### Is it "cheating" to include keyword_dispatch? It would be trivial to unroll it into a big ugly list of branch-if statements, so it doesn't really add any expressive power. Still, to mollify the skeptic, keyword_dispatch can be imagined as a piece of specialized hardware. Further, it can be stipulated that its use is relatively expensive, thereby gaining some advantage for analyze-interpretation. ''' from env import is_macro from stats import dispatch_stats DEFINE_KEYS = 'define', 'def' ASS_KEYS = 'set!', 'ass!' LAMBDA_KEYS = 'lambda', 'λ', 'fun' IF_KEYS = 'if', BEGIN_KEYS = 'begin', 'progn' QUOTE_KEYS = 'quote', QUASIQUOTE_KEYS = 'quasiquote', 'qsq' UNQUOTE_KEYS = 'unquote', 'unq' SPLICE_KEYS = 'splice', 'spl' DEFMACRO_KEYS = 'defmacro', 'defmac' ### @dispatch_stats ### ###
[ 7061, 6, 198, 220, 220, 220, 632, 561, 307, 3621, 611, 428, 8265, 1422, 470, 761, 284, 1330, 1997, 11, 198, 220, 220, 220, 1201, 340, 15738, 357, 3911, 286, 8, 262, 15582, 286, 262, 3303, 11, 290, 326, 198, 220, 220, 220, 290, 3...
2.943765
409
from datetime import datetime, timedelta from unittest.mock import MagicMock import pytest from scrapy.exceptions import DropItem from city_scrapers_core.constants import CANCELLED from city_scrapers_core.decorators import ignore_jscalendar from city_scrapers_core.items import Meeting from city_scrapers_core.pipelines import ( DiffPipeline, JSCalendarPipeline, MeetingPipeline, ) from city_scrapers_core.spiders import CityScrapersSpider
[ 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 6139, 44, 735, 198, 198, 11748, 12972, 9288, 198, 6738, 15881, 88, 13, 1069, 11755, 1330, 14258, 7449, 198, 198, 6738, 1748, 62, 1416, ...
2.955128
156
# -*- coding: utf-8 -*- import scrapy import json from locations.items import GeojsonPointItem from locations.hours import OpeningHours
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 15881, 88, 198, 11748, 33918, 198, 198, 6738, 7064, 13, 23814, 1330, 2269, 13210, 1559, 12727, 7449, 198, 6738, 7064, 13, 24425, 1330, 25522, 39792, 628 ]
3.365854
41
import unittest from stensorflow.basic.protocol.module_transform import module_transform,\ module_transform_withPRF import numpy as np from stensorflow.basic.basic_class.base import SharedTensorBase, SharedPairBase from stensorflow.global_var import StfConfig import tensorflow as tf from stensorflow.engine.start_server import start_local_server import os start_local_server(os.path.join(os.environ.get("stf_home", ".."), "conf", "config.json")) if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 6738, 336, 22854, 11125, 13, 35487, 13, 11235, 4668, 13, 21412, 62, 35636, 1330, 8265, 62, 35636, 11, 59, 198, 220, 220, 220, 8265, 62, 35636, 62, 4480, 4805, 37, 198, 11748, 299, 32152, 355, 45941, 198, 6...
2.994186
172
# # -*- coding: utf-8 -*- # # Copyright (c) 2021 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # import time import sys import numpy as np import unicodedata import six import re import tensorflow as tf from absl import app from argparse import ArgumentParser import pandas as pd from utils import tokenizer from utils.tokenizer import Subtokenizer from utils import metrics flags = tf.compat.v1.flags FLAGS = flags.FLAGS flags.DEFINE_integer("batch_size", 64, "run batch size") flags.DEFINE_string("input_graph", None, "The path of input model file.") flags.DEFINE_string("inputs_file", None, "File saved to an output file.") flags.DEFINE_string("reference_file", None, "File containing reference translation.") flags.DEFINE_string("vocab_file", None, "Path to subtoken vocabulary file.") flags.DEFINE_string("config", None, "Config json file") flags.DEFINE_string("output_model", None, "The output model of the quantized model.") flags.DEFINE_string("mode", "tune", "One of three options: 'benchmark'/'accuracy'/'tune'.") flags.DEFINE_integer("iters", -1, "The iteration used for benchmark.") uregex = UnicodeRegex() def collate_fn(batch): """Puts each data field into a pd frame with outer dimension batch size""" elem = batch[0] if isinstance(elem, tuple): batch = zip(*batch) return [collate_fn(samples) for samples in batch] elif isinstance(elem, np.ndarray): return [list(elem) for elem in batch] elif isinstance(elem, str): return batch else: return pd.DataFrame(batch).fillna(0).values.astype(np.int32) if __name__ == "__main__": tf.compat.v1.app.run()
[ 2, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 357, 66, 8, 33448, 8180, 10501, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, ...
2.561159
932
from django.apps import AppConfig from django.utils.translation import gettext_lazy as _
[ 6738, 42625, 14208, 13, 18211, 1330, 2034, 16934, 198, 6738, 42625, 14208, 13, 26791, 13, 41519, 1330, 651, 5239, 62, 75, 12582, 355, 4808, 628 ]
3.6
25
""" platform that offers a connection to a warmup4ie device. this platform is inspired by the following code: https://github.com/alyc100/SmartThingsPublic/tree/master/devicetypes/alyc100/\ warmup-4ie.src to setup this component, you need to register to warmup first. see https://my.warmup.com/login Then add to your configuration.yaml climate: - platform: warmup4ie name: YOUR_DESCRIPTION username: YOUR_E_MAIL_ADDRESS password: YOUR_PASSWORD location: YOUR_LOCATION_NAME room: YOUR_ROOM_NAME # the following issues are not yet implemented, since i have currently no need # for them # OPEN - holiday mode still missing # - commands for setting/retrieving programmed times missing """ import logging import requests _LOGGER = logging.getLogger(__name__) class Warmup4IEDevice(): """Representation of a warmup4ie device. According to the home assistant documentation this class should be packed and made available on PyPi. Perhaps later.... """ TOKEN_URL = 'https://api.warmup.com/apps/app/v1' URL = 'https://apil.warmup.com/graphql' APP_TOKEN = \ 'M=;He<Xtg"$}4N%5k{$:PD+WA"]D<;#PriteY|VTuA>_iyhs+vA"4lic{6-LqNM:' HEADER = {'user-agent': 'WARMUP_APP', 'accept-encoding': 'br, gzip, deflate', 'accept': '*/*', 'Connection': 'keep-alive', 'content-type': 'application/json', 'app-token': APP_TOKEN, 'app-version': '1.8.1', 'accept-language': 'de-de'} RUN_MODE = {0:'off', 1:'prog', 3:'fixed', 4:'frost', 5:'away'} #pylint: disable-msg=too-many-arguments def __init__(self, user, password, location, room, target_temp): """Initialize the climate device.""" _LOGGER.info("Setting up Warmup4IE component") self._user = user self._password = password self._location_name = location self._room_name = room self._target_temperature = target_temp self._warmup_access_token = None self._loc_id = None self._room = None self._current_temperature = 0 self._away = False self._on = True self.setup_finished = False token_ok = self._generate_access_token() location_ok = self._get_locations() room_ok = self.update_room() if token_ok and location_ok and room_ok: self.setup_finished = True def get_run_mode(self): """return current mode, e.g. 'off', 'fixed', 'prog'.""" if self._room is None: return 'off' return self.RUN_MODE[self._room['runModeInt']] def update_room(self): """Update room/device data for the given room name. """ # make sure the location is already configured if self._loc_id is None or \ self._warmup_access_token is None or \ self._room_name is None: return False body = { "query": "query QUERY{ user{ currentLocation: location { id name rooms{ id roomName runModeInt targetTemp currentTemp thermostat4ies {minTemp maxTemp}} }} } " } header_with_token = self.HEADER.copy() header_with_token['warmup-authorization'] = str(self._warmup_access_token) response = requests.post(url=self.URL, headers=header_with_token, json=body) # check if request was acceppted and if request was successful if response.status_code != 200 or \ response.json()['status'] != 'success': _LOGGER.error("updating new room failed, %s", response) return False # extract and store roomId for later use rooms = response.json()['data']['user']['currentLocation']['rooms'] room_updated = False for room in rooms: if room['roomName'] == self._room_name: self._room = room _LOGGER.info("Successfully updated data for room '%s' " "with ID %s", self._room['roomName'], self._room['id']) room_updated = True break if not room_updated: return False # update temperatures values self._target_temperature = int(self._room['targetTemp'])/10 self._target_temperature_low = int(self._room['thermostat4ies'][0]['minTemp'])/10 self._target_temperature_high = int(self._room['thermostat4ies'][0]['maxTemp'])/10 self._current_temperature = int(self._room['currentTemp'])/10 return True ''' def update_room(self): """Update room/device data for the given room name. """ # make sure the location is already configured if self._loc_id is None or \ self._warmup_access_token is None or \ self._room_name is None: return False body = { "account": { "email": self._user, "token": self._warmup_access_token}, "request": { "method": "getRooms", "locId": self._loc_id} } response = requests.post(url=self.TOKEN_URL, headers=self.HEADER, json=body) # check if request was acceppted and if request was successful if response.status_code != 200 or \ response.json()['status']['result'] != 'success': _LOGGER.error("updating room failed, %s", response) return False # extract and store roomId for later use rooms = response.json()['response']['rooms'] room_updated = False for room in rooms: if room['roomName'] == self._room_name: self._room = room _LOGGER.info("Successfully updated data for room '%s' " "with ID %s", self._room['roomName'], self._room['roomId']) room_updated = True break if not room_updated: return False # update temperatures values self._target_temperature = int(self._room['targetTemp'])/10 self._target_temperature_low = int(self._room['minTemp'])/10 self._target_temperature_high = int(self._room['maxTemp'])/10 self._current_temperature = int(self._room['currentTemp'])/10 return True ''' def get_target_temmperature(self): """return target temperature""" return self._target_temperature def get_current_temmperature(self): """return currrent temperature""" return self._current_temperature def get_target_temperature_low(self): """return minimum temperature""" return self._target_temperature_low def get_target_temperature_high(self): """return maximum temperature""" return self._target_temperature_high def _generate_access_token(self): """retrieve access token from server""" body = {'request': {'email': self._user, 'password': self._password, 'method': 'userLogin', 'appId': 'WARMUP-APP-V001'} } response = requests.post(url=self.TOKEN_URL, headers=self.HEADER, json=body) # check if request was acceppted and if request was successful if response.status_code != 200 or \ response.json()['status']['result'] != 'success': _LOGGER.error("generating AccessToken failed, %s", response) return False # extract and store access token for later use self._warmup_access_token = response.json()['response']['token'] return True def _get_locations(self): """retrieve location ID that corrresponds to self._location_name""" # make sure we have an accessToken if self._warmup_access_token is None: return False body = { "account": { "email": self._user, "token": self._warmup_access_token }, "request": { "method": "getLocations" } } response = requests.post(url=self.TOKEN_URL, headers=self.HEADER, json=body) # check if request was acceppted and if request was successful if response.status_code != 200 or \ response.json()['status']['result'] != 'success': _LOGGER.error("initialising failed, %s", response) return False # extract and store locationId for later use locations = response.json()['response']['locations'] for loc in locations: if loc['name'] == self._location_name: self._loc_id = loc['id'] _LOGGER.info( "Successfully fetched location ID %s for location '%s'", self._loc_id, self._location_name) break if self._loc_id is None: return False return True def set_new_temperature(self, new_temperature): """set new target temperature""" # make sure the room/device is already configured if self._room is None or self._warmup_access_token is None: return body = { "account": { "email": self._user, "token": self._warmup_access_token }, "request": { "method": "setProgramme", "roomId": self._room['id'], "roomMode": "fixed", "fixed": { "fixedTemp": "{:03d}".format(int(new_temperature * 10)) } } } response = requests.post(url=self.TOKEN_URL, headers=self.HEADER, json=body) # check if request was acceppted and if request was successful if response.status_code != 200 or \ response.json()['status']['result'] != 'success': _LOGGER.error( "Setting new target temperature failed, %s", response) return response_temp = response.json()["message"]["targetTemp"] if new_temperature != int(response_temp)/10: _LOGGER.info("Server declined to set new target temperature " "to %.1f°C; response from server: '%s'", new_temperature, response.text) return self._target_temperature = new_temperature _LOGGER.info("Successfully set new target temperature to %.1f°C; " "response from server: '%s'", self._target_temperature, response.text) def set_temperature_to_auto(self): """set device to automatic mode""" # make sure the room/device is already configured if self._room is None or self._warmup_access_token is None: return body = { "account": { "email": self._user, "token": self._warmup_access_token }, "request": { "method": "setProgramme", "roomId": self._room['id'], "roomMode": "prog" } } response = requests.post(url=self.TOKEN_URL, headers=self.HEADER, json=body) # check if request was acceppted and if request was successful if response.status_code != 200 or \ response.json()['status']['result'] != 'success': _LOGGER.error( "Setting new target temperature to auto failed, %s", response) return _LOGGER.info("Successfully set new target temperature to auto, " "response from server: '%s'", response.text) def set_temperature_to_manual(self): """set device to manual mode""" # make sure the room/device is already configured if self._room is None or self._warmup_access_token is None: return body = { "account": { "email": self._user, "token": self._warmup_access_token }, "request": { "method": "setProgramme", "roomId": self._room['id'], "roomMode": "fixed" } } response = requests.post(url=self.TOKEN_URL, headers=self.HEADER, json=body) # check if request was acceppted and if request was successful if response.status_code != 200 or \ response.json()['status']['result'] != 'success': _LOGGER.error( "Setting new target temperature to " "manual failed, %s", response) return _LOGGER.info("Successfully set new target temperature to manual, " "response from server: '%s'", response.text) def set_location_to_frost(self): """set device to frost protection mode""" # make sure the room/device is already configured if self._loc_id is None or self._warmup_access_token is None: return body = { "account": { "email": self._user, "token": self._warmup_access_token }, "request": { "method": "setModes", "values": { "holEnd": "-", "fixedTemp": "", "holStart": "-", "geoMode": "0", "holTemp": "-", "locId": self._loc_id, "locMode": "frost" } } } response = requests.post(url=self.TOKEN_URL, headers=self.HEADER, json=body) # check if request was acceppted and if request was successful if response.status_code != 200 or \ response.json()['status']['result'] != 'success': _LOGGER.error( "Setting location to frost protection failed, %s", response) return _LOGGER.info("Successfully set location to frost protection, response " "from server: '%s'", response.text) def set_location_to_off(self): """ turn off device""" # make sure the room/device is already configured if self._loc_id is None or self._warmup_access_token is None: return body = { "account": { "email": self._user, "token": self._warmup_access_token }, "request": { "method": "setModes", "values": { "holEnd": "-", "fixedTemp": "", "holStart": "-", "geoMode": "0", "holTemp": "-", "locId": self._loc_id, "locMode": "off" } } } response = requests.post(url=self.TOKEN_URL, headers=self.HEADER, json=body) # check if request was acceppted and if request was successful if response.status_code != 200 or \ response.json()['status']['result'] != 'success': _LOGGER.error("Setting location to off mode failed, %s", response) return _LOGGER.info("Successfully set location to off mode, " "response from server: '%s'", response.text)
[ 37811, 198, 24254, 326, 4394, 257, 4637, 284, 257, 5814, 929, 19, 494, 3335, 13, 198, 198, 5661, 3859, 318, 7867, 416, 262, 1708, 2438, 25, 198, 5450, 1378, 12567, 13, 785, 14, 3400, 66, 3064, 14, 25610, 22248, 15202, 14, 21048, 14,...
2.106572
7,319
number_list = [x for x in range(1, 21)] print(number_list) filtered_list = filter(lambda x: x%2==0, number_list) filtered_list = list(filtered_list) print(filtered_list) square_list = map(lambda x: x**2, filtered_list) square_list = list(square_list) print(square_list) # filter(func, iterable) # map(func, iterable)
[ 17618, 62, 4868, 796, 685, 87, 329, 2124, 287, 2837, 7, 16, 11, 2310, 15437, 198, 198, 4798, 7, 17618, 62, 4868, 8, 198, 10379, 4400, 62, 4868, 796, 8106, 7, 50033, 2124, 25, 2124, 4, 17, 855, 15, 11, 1271, 62, 4868, 8, 198, 1...
2.596774
124
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. #
[ 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 220, 198, 2, 49962, 739, 262, 17168, 13789, 13, 220, 198, 2, 628, 628, 628, 628 ]
3.75
28
""" Components/Text Field ===================== .. seealso:: `Material Design spec, Text fields <https://material.io/components/text-fields>`_ .. rubric:: Text fields let users enter and edit text. .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-fields.png :align: center `KivyMD` provides the following field classes for use: - MDTextField_ - MDTextFieldRound_ - MDTextFieldRect_ .. Note:: :class:`~MDTextField` inherited from :class:`~kivy.uix.textinput.TextInput`. Therefore, most parameters and all events of the :class:`~kivy.uix.textinput.TextInput` class are also available in the :class:`~MDTextField` class. .. MDTextField: MDTextField ----------- :class:`~MDTextField` can be with helper text and without. Without helper text mode ------------------------ .. code-block:: kv MDTextField: hint_text: "No helper text" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-no-helper-mode.gif :align: center Helper text mode on ``on_focus`` event -------------------------------------- .. code-block:: kv MDTextField: hint_text: "Helper text on focus" helper_text: "This will disappear when you click off" helper_text_mode: "on_focus" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-helper-mode-on-focus.gif :align: center Persistent helper text mode --------------------------- .. code-block:: kv MDTextField: hint_text: "Persistent helper text" helper_text: "Text is always here" helper_text_mode: "persistent" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-helper-mode-persistent.gif :align: center Helper text mode `'on_error'` ---------------------------- To display an error in a text field when using the ``helper_text_mode: "on_error"`` parameter, set the `"error"` text field parameter to `True`: .. code-block:: python from kivy.lang import Builder from kivymd.app import MDApp KV = ''' BoxLayout: padding: "10dp" MDTextField: id: text_field_error hint_text: "Helper text on error (press 'Enter')" helper_text: "There will always be a mistake" helper_text_mode: "on_error" pos_hint: {"center_y": .5} ''' class Test(MDApp): def __init__(self, **kwargs): super().__init__(**kwargs) self.screen = Builder.load_string(KV) def build(self): self.screen.ids.text_field_error.bind( on_text_validate=self.set_error_message, on_focus=self.set_error_message, ) return self.screen def set_error_message(self, instance_textfield): self.screen.ids.text_field_error.error = True Test().run() .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-helper-mode-on-error.gif :align: center Helper text mode `'on_error'` (with required) -------------------------------------------- .. code-block:: kv MDTextField: hint_text: "required = True" required: True helper_text_mode: "on_error" helper_text: "Enter text" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-required.gif :align: center Text length control ------------------- .. code-block:: kv MDTextField: hint_text: "Max text length = 5" max_text_length: 5 .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-text-length.gif :align: center Multi line text --------------- .. code-block:: kv MDTextField: multiline: True hint_text: "Multi-line text" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-text-multi-line.gif :align: center Color mode ---------- .. code-block:: kv MDTextField: hint_text: "color_mode = 'accent'" color_mode: 'accent' Available options are `'primary'`, `'accent'` or `'custom`'. .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-color-mode.gif :align: center .. code-block:: kv MDTextField: hint_text: "color_mode = 'custom'" color_mode: 'custom' helper_text_mode: "on_focus" helper_text: "Color is defined by 'line_color_focus' property" line_color_focus: 1, 0, 1, 1 .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-color-mode-custom.gif :align: center .. code-block:: kv MDTextField: hint_text: "Line color normal" line_color_normal: app.theme_cls.accent_color .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-line-color-normal.png :align: center Rectangle mode -------------- .. code-block:: kv MDTextField: hint_text: "Rectangle mode" mode: "rectangle" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-rectangle-mode.gif :align: center .. MDTextFieldRect: MDTextFieldRect --------------- .. Note:: :class:`~MDTextFieldRect` inherited from :class:`~kivy.uix.textinput.TextInput`. You can use all parameters and attributes of the :class:`~kivy.uix.textinput.TextInput` class in the :class:`~MDTextFieldRect` class. .. code-block:: kv MDTextFieldRect: size_hint: 1, None height: "30dp" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-rect.gif :align: center .. Warning:: While there is no way to change the color of the border. .. MDTextFieldRound: MDTextFieldRound ---------------- Without icon ------------ .. code-block:: kv MDTextFieldRound: hint_text: 'Empty field' .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-round.gif :align: center With left icon -------------- .. Warning:: The icons in the :class:`~MDTextFieldRound` are static. You cannot bind events to them. .. code-block:: kv MDTextFieldRound: icon_left: "email" hint_text: "Field with left icon" .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-round-left-icon.png :align: center With left and right icons ------------------------- .. code-block:: kv MDTextFieldRound: icon_left: 'key-variant' icon_right: 'eye-off' hint_text: 'Field with left and right icons' .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-round-left-right-icon.png :align: center Control background color ------------------------ .. code-block:: kv MDTextFieldRound: icon_left: 'key-variant' normal_color: app.theme_cls.accent_color .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-round-normal-color.gif :align: center .. code-block:: kv MDTextFieldRound: icon_left: 'key-variant' normal_color: app.theme_cls.accent_color color_active: 1, 0, 0, 1 .. image:: https://github.com/HeaTTheatR/KivyMD-data/raw/master/gallery/kivymddoc/text-field-round-active-color.gif :align: center .. seealso:: See more information in the :class:`~MDTextFieldRect` class. """ __all__ = ( "MDTextField", "MDTextFieldRect", "MDTextFieldRound", ) import sys from kivy.uix.label import Label from kivy.uix.textinput import TextInput from kivy.animation import Animation from kivy.graphics.context_instructions import Color from kivy.lang import Builder from kivy.properties import ( NumericProperty, StringProperty, BooleanProperty, OptionProperty, ListProperty, ObjectProperty, ) from kivy.metrics import dp from kivy.metrics import sp from kivymd.font_definitions import theme_font_styles from kivymd.theming import ThemableBehavior from kivymd.uix.label import MDIcon Builder.load_string( """ #:import images_path kivymd.images_path <MDTextField> canvas.before: Clear Color: rgba: self.line_color_normal if root.mode == "line" else [0, 0, 0, 0] Line: points: self.x, self.y + dp(16), self.x + self.width, self.y + dp(16) width: 1 dash_length: dp(3) dash_offset: 2 if self.disabled else 0 Color: rgba: self._current_line_color if root.mode == "line" else [0, 0, 0, 0] Rectangle: size: self._line_width, dp(2) pos: self.center_x - (self._line_width / 2), self.y + dp(16) Color: rgba: self._current_error_color Rectangle: texture: self._msg_lbl.texture size: self._msg_lbl.texture_size pos: self.x, self.y Color: rgba: self._current_right_lbl_color Rectangle: texture: self._right_msg_lbl.texture size: self._right_msg_lbl.texture_size pos: self.width-self._right_msg_lbl.texture_size[0]+dp(45), self.y Color: rgba: (self._current_line_color if self.focus and not \ self._cursor_blink else (0, 0, 0, 0)) Rectangle: pos: [int(x) for x in self.cursor_pos] size: 1, -self.line_height Color: rgba: self._current_hint_text_color Rectangle: texture: self._hint_lbl.texture size: self._hint_lbl.texture_size pos: self.x, self.y + self.height - self._hint_y Color: rgba: self.disabled_foreground_color if self.disabled else\ (self.hint_text_color if not self.text and not\ self.focus else self.foreground_color) Color: rgba: self._current_line_color Line: width: dp(1) if root.mode == "rectangle" else dp(0.00001) points: ( self.x + root._line_blank_space_right_hint_text, self.top - self._hint_lbl.texture_size[1] // 2, self.right + dp(12), self.top - self._hint_lbl.texture_size[1] // 2, self.right + dp(12), self.y, self.x - dp(12), self.y, self.x - dp(12), self.top - self._hint_lbl.texture_size[1] // 2, self.x + root._line_blank_space_left_hint_text, self.top - self._hint_lbl.texture_size[1] // 2 ) font_name: 'Roboto' foreground_color: app.theme_cls.text_color font_size: sp(16) bold: False padding: 0, dp(16), 0, dp(10) multiline: False size_hint_y: None height: self.minimum_height + dp(8) <TextfieldLabel> size_hint_x: None width: self.texture_size[0] shorten: True shorten_from: "right" <MDTextFieldRect> on_focus: root.anim_rect([root.x, root.y, root.right, root.y, root.right,\ root.top, root.x, root.top, root.x, root.y], 1) if root.focus\ else root.anim_rect([root.x - dp(60), root.y - dp(60),\ root.right + dp(60), root.y - dp(60), root.right + dp(60), root.top + dp(60),\ root.x - dp(60), root.top + dp(60),\ root.x - dp(60), root.y - dp(60)], 0) canvas.after: Color: rgba: root._primary_color Line: width: dp(1.5) points: ( self.x - dp(60), self.y - dp(60), self.right + dp(60), self.y - dp(60), self.right + dp(60), self.top + dp(60), self.x - dp(60), self.top + dp(60), self.x - dp(60), self.y - dp(60) ) <MDTextFieldRound>: multiline: False size_hint: 1, None height: self.line_height + dp(10) background_active: f'{images_path}transparent.png' background_normal: f'{images_path}transparent.png' padding: self._lbl_icon_left.texture_size[1] + dp(10) if self.icon_left else dp(15), \ (self.height / 2) - (self.line_height / 2), \ self._lbl_icon_right.texture_size[1] + dp(20) if self.icon_right else dp(15), \ 0 canvas.before: Color: rgba: self.normal_color if not self.focus else self._color_active Ellipse: angle_start: 180 angle_end: 360 pos: self.pos[0] - self.size[1] / 2, self.pos[1] size: self.size[1], self.size[1] Ellipse: angle_start: 360 angle_end: 540 pos: self.size[0] + self.pos[0] - self.size[1]/2.0, self.pos[1] size: self.size[1], self.size[1] Rectangle: pos: self.pos size: self.size Color: rgba: self.line_color Line: points: self.pos[0] , self.pos[1], self.pos[0] + self.size[0], self.pos[1] Line: points: self.pos[0], self.pos[1] + self.size[1], self.pos[0] + self.size[0], self.pos[1] + self.size[1] Line: ellipse: self.pos[0] - self.size[1] / 2, self.pos[1], self.size[1], self.size[1], 180, 360 Line: ellipse: self.size[0] + self.pos[0] - self.size[1] / 2.0, self.pos[1], self.size[1], self.size[1], 360, 540 # Texture of left Icon. Color: rgba: self.icon_left_color Rectangle: texture: self._lbl_icon_left.texture size: self._lbl_icon_left.texture_size if self.icon_left \ else (0, 0) pos: self.x, \ self.center[1] - self._lbl_icon_right.texture_size[1] / 2 # Texture of right Icon. Color: rgba: self.icon_right_color Rectangle: texture: self._lbl_icon_right.texture size: self._lbl_icon_right.texture_size if self.icon_right \ else (0, 0) pos: (self.width + self.x) - (self._lbl_icon_right.texture_size[1]), \ self.center[1] - self._lbl_icon_right.texture_size[1] / 2 Color: rgba: root.theme_cls.disabled_hint_text_color if not self.focus \ else root.foreground_color """ )
[ 37811, 198, 7293, 3906, 14, 8206, 7663, 198, 4770, 1421, 28, 198, 198, 492, 766, 14508, 3712, 628, 220, 220, 220, 4600, 17518, 8495, 1020, 11, 8255, 7032, 1279, 5450, 1378, 33665, 13, 952, 14, 5589, 3906, 14, 5239, 12, 25747, 29, 63...
2.147641
6,719
from django.db import models from django.contrib.auth.models import AbstractUser, Group, User # Create your models here.
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 27741, 12982, 11, 4912, 11, 11787, 198, 2, 13610, 534, 4981, 994, 13, 198 ]
3.666667
33
""" Orca Extractor ============= #. :class:`.OrcaExtractor` Class to extract properties from Orca output. """ import re from .extractor import Extractor class OrcaExtractor(Extractor): """ Extracts properties from Orca 4.2 output files. Limited to final single point energy for now. Attributes ---------- output_file : :class:`str` Output file to extract properties from. output_lines : :class:`list` : :class:`str` :class:`list` of all lines in as :class:`str` in the output file. total_energy : :class:`float` The total energy in the :attr:`output_file` as :class:`float`. The energy is in units of a.u.. """ def _extract_values(self): """ Extract all properties from Orca output file. Returns ------- None : :class:`NoneType` """ for i, line in enumerate(self.output_lines): if self._check_line(line, 'total_energy'): self._extract_total_energy(line) def _extract_total_energy(self, line): """ Updates :attr:`total_energy`. Parameters ---------- line : :class:`str` Line of output file to extract property from. Returns ------- None : :class:`NoneType` """ nums = re.compile(r"[+-]?\d+(?:\.\d+)?(?:[eE][+-]?\d+)?") string = nums.search(line.rstrip()).group(0) self.total_energy = float(string)
[ 37811, 198, 5574, 6888, 29677, 273, 198, 25609, 28, 198, 198, 2, 13, 1058, 4871, 25, 44646, 5574, 6888, 11627, 40450, 63, 198, 198, 9487, 284, 7925, 6608, 422, 1471, 6888, 5072, 13, 198, 198, 37811, 198, 198, 11748, 302, 198, 6738, ...
2.296923
650
# Copyright (c) 2020 Huawei Technologies Co., Ltd # Copyright (c) 2019, Facebook CORPORATION. # All rights reserved. # # Licensed under the BSD 3-Clause License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torchvision from export.cp_parser import *
[ 2, 15069, 357, 66, 8, 12131, 43208, 21852, 1766, 1539, 12052, 198, 2, 15069, 357, 66, 8, 13130, 11, 3203, 23929, 44680, 6234, 13, 220, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 49962, 739, 262, 347, 10305, 513, 12, 2601, 682,...
3.851064
188
from fabric.api import sudo, settings from .task import Task from .containers import conf from .utils import upload_config_template from . import system from . import pip __all__ = [ 'install_deps', 'install', 'push_config', 'disable_config', 'emperor', ] install_deps = InstallDeps() install = Install() push_config = PushConfig() disable_config = DisableConfig() emperor = Emperor()
[ 6738, 9664, 13, 15042, 1330, 21061, 11, 6460, 198, 198, 6738, 764, 35943, 1330, 15941, 198, 6738, 764, 3642, 50221, 1330, 1013, 198, 6738, 764, 26791, 1330, 9516, 62, 11250, 62, 28243, 198, 6738, 764, 1330, 1080, 198, 6738, 764, 1330, ...
3.036232
138
#!/usr/bin/env python2 # Copyright (c) 2014 The Bitcoin Core developers # Copyright (c) 2018 The Zencash developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.authproxy import JSONRPCException from test_framework.util import assert_equal, initialize_chain_clean, \ start_nodes, start_node, connect_nodes, stop_node, stop_nodes, \ sync_blocks, sync_mempools, connect_nodes_bi, wait_bitcoinds, p2p_port, check_json_precision import traceback import os,sys import shutil from random import randint from decimal import Decimal import logging import time if __name__ == '__main__': blockdelay().main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 17, 198, 2, 15069, 357, 66, 8, 1946, 383, 6185, 7231, 6505, 198, 2, 15069, 357, 66, 8, 2864, 383, 1168, 12685, 1077, 6505, 198, 2, 4307, 6169, 739, 262, 17168, 3788, 5964, 11, 766, 262,...
3.324895
237
if __name__ == '__main__': a = int(input("input a number: \n")) b = a >> 4 c = ~(~0 << 4) d = b & c print("%o\t%o" % (a, d))
[ 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 257, 796, 493, 7, 15414, 7203, 15414, 257, 1271, 25, 3467, 77, 48774, 198, 220, 220, 220, 275, 796, 257, 9609, 604, 198, 220, 220, 220, 269, 796, 5299, ...
1.858974
78
from celery import Celery, signals, Task from datetime import datetime, timezone, timedelta, date from math import floor import requests import pymssql import json import twitterWebsiteSearch.TwitterWebsiteSearch as twitSearch app = Celery('tasks') app.config_from_object('celeryconfig') ''' ''' @signals.worker_process_init.connect @signals.worker_process_shutdown.connect ''' ''' @app.task(base=TwitSearchTask, bind=True) @app.task ''' input query start date end date For each day in date range create a task (query,day) page through each page in query save each page to database '''
[ 6738, 18725, 1924, 1330, 15248, 1924, 11, 10425, 11, 15941, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 640, 11340, 11, 28805, 12514, 11, 3128, 198, 6738, 10688, 1330, 4314, 198, 198, 11748, 7007, 198, 11748, 279, 4948, 824, 13976, 198...
2.764957
234
# -*- coding: utf-8 -*- from __future__ import absolute_import import logging import ssl as pyssl from functools import partial from pika import ( BasicProperties, BlockingConnection, ConnectionParameters, PlainCredentials, SelectConnection, SSLOptions, URLParameters, ) from pika.exceptions import AMQPError from pika.spec import PERSISTENT_DELIVERY_MODE from brewtils.errors import DiscardMessageException, RepublishRequestException from brewtils.request_handling import RequestConsumer from brewtils.schema_parser import SchemaParser class PikaClient(object): """Base class for connecting to RabbitMQ using Pika Args: host: RabbitMQ host port: RabbitMQ port user: RabbitMQ user password: RabbitMQ password connection_attempts: Maximum number of retry attempts heartbeat: Time between RabbitMQ heartbeats heartbeat_interval: DEPRECATED, use heartbeat virtual_host: RabbitMQ virtual host exchange: Default exchange that will be used ssl: SSL Options blocked_connection_timeout: If not None, the value is a non-negative timeout, in seconds, for the connection to remain blocked (triggered by Connection.Blocked from broker); if the timeout expires before connection becomes unblocked, the connection will be torn down, triggering the adapter-specific mechanism for informing client app about the closed connection (e.g., on_close_callback or ConnectionClosed exception) with `reason_code` of `InternalCloseReasons.BLOCKED_CONNECTION_TIMEOUT`. """ @property def connection_url(self): """str: Connection URL for this client's connection information""" virtual_host = self._conn_params.virtual_host if virtual_host == "/": virtual_host = "" return "amqp%s://%s:%s@%s:%s/%s" % ( "s" if self._ssl_enabled else "", self._conn_params.credentials.username, self._conn_params.credentials.password, self._conn_params.host, self._conn_params.port, virtual_host, ) def connection_parameters(self, **kwargs): """Get ``ConnectionParameters`` associated with this client Will construct a ``ConnectionParameters`` object using parameters passed at initialization as defaults. Any parameters passed in kwargs will override initialization parameters. Args: **kwargs: Overrides for specific parameters Returns: :obj:`pika.ConnectionParameters`: ConnectionParameters object """ credentials = PlainCredentials( username=kwargs.get("user", self._user), password=kwargs.get("password", self._password), ) conn_params = { "host": kwargs.get("host", self._host), "port": kwargs.get("port", self._port), "ssl_options": kwargs.get("ssl_options", self._ssl_options), "virtual_host": kwargs.get("virtual_host", self._virtual_host), "connection_attempts": kwargs.get( "connection_attempts", self._connection_attempts ), "heartbeat": kwargs.get( "heartbeat", kwargs.get("heartbeat_interval", self._heartbeat) ), "blocked_connection_timeout": kwargs.get( "blocked_connection_timeout", self._blocked_connection_timeout ), "credentials": credentials, } return ConnectionParameters(**conn_params) class TransientPikaClient(PikaClient): """Client implementation that creates new connection and channel for each action""" def setup_queue(self, queue_name, queue_args, routing_keys): """Create a new queue with queue_args and bind it to routing_keys""" with BlockingConnection(self._conn_params) as conn: conn.channel().queue_declare(queue_name, **queue_args) for routing_key in routing_keys: conn.channel().queue_bind( queue_name, self._exchange, routing_key=routing_key ) return {"name": queue_name, "args": queue_args} def publish(self, message, **kwargs): """Publish a message Args: message: Message to publish kwargs: Additional message properties Keyword Arguments: * *routing_key* -- Routing key to use when publishing * *headers* -- Headers to be included as part of the message properties * *expiration* -- Expiration to be included as part of the message properties * *confirm* -- Flag indicating whether to operate in publisher-acknowledgements mode * *mandatory* -- Raise if the message can not be routed to any queues * *priority* -- Message priority """ with BlockingConnection(self._conn_params) as conn: channel = conn.channel() if kwargs.get("confirm"): channel.confirm_delivery() properties = BasicProperties( app_id="beer-garden", content_type="text/plain", headers=kwargs.get("headers"), expiration=kwargs.get("expiration"), delivery_mode=kwargs.get("delivery_mode"), priority=kwargs.get("priority"), ) channel.basic_publish( exchange=self._exchange, routing_key=kwargs["routing_key"], body=message, properties=properties, mandatory=kwargs.get("mandatory"), ) class PikaConsumer(RequestConsumer): """Pika message consumer This consumer is designed to be fault-tolerant - if RabbitMQ closes the connection the consumer will attempt to reopen it. There are limited reasons why the connection may be closed from the broker side and usually indicates permission related issues or socket timeouts. Unexpected channel closures can indicate a problem with a command that was issued. Args: amqp_url: (str) The AMQP url to connect to queue_name: (str) The name of the queue to connect to on_message_callback (func): function called to invoke message processing. Must return a Future. panic_event (threading.Event): Event to be set on a catastrophic failure logger (logging.Logger): A configured Logger thread_name (str): Name to use for this thread max_concurrent: (int) Maximum requests to process concurrently max_reconnect_attempts (int): Number of times to attempt reconnection to message queue before giving up (default -1 aka never) max_reconnect_timeout (int): Maximum time to wait before reconnect attempt starting_reconnect_timeout (int): Time to wait before first reconnect attempt """ def run(self): """Run the consumer This method creates a connection to RabbitMQ and starts the IOLoop. The IOLoop will block and allow the SelectConnection to operate. This means that to stop the PikaConsumer we just need to stop the IOLoop. If the connection closed unexpectedly (the shutdown event is not set) then this will wait a certain amount of time and before attempting to restart it. Finally, if the maximum number of reconnect attempts have been reached the panic event will be set, which will end the PikaConsumer as well as the Plugin. Returns: None """ while not self._panic_event.is_set(): self._connection = self.open_connection() self._connection.ioloop.start() if not self._panic_event.is_set(): if 0 <= self._max_reconnect_attempts <= self._reconnect_attempt: self.logger.warning("Max connection failures, shutting down") self._panic_event.set() return self.logger.warning( "%s consumer has died, waiting %i seconds before reconnecting", self._queue_name, self._reconnect_timeout, ) self._panic_event.wait(self._reconnect_timeout) self._reconnect_attempt += 1 self._reconnect_timeout = min( self._reconnect_timeout * 2, self._max_reconnect_timeout ) def stop(self): """Cleanly shutdown It's a good idea to call stop_consuming before this to prevent new messages from being processed during shutdown. This sets the shutdown_event to let callbacks know that this is an orderly (requested) shutdown. It then schedules a channel close on the IOLoop - the channel's on_close callback will close the connection, and the connection's on_close callback will terminate the IOLoop which will end the PikaConsumer. Returns: None """ self.logger.debug("Stopping request consumer") if self._connection: self._connection.ioloop.add_callback_threadsafe( partial(self._connection.close) ) def is_connected(self): """Determine if the underlying connection is open Returns: True if the connection exists and is open, False otherwise """ return self._connection and self._connection.is_open def on_message(self, channel, basic_deliver, properties, body): """Invoked when a message is delivered from the queueing service Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. the properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent. Args: channel (pika.channel.Channel): The channel object basic_deliver (pika.Spec.Basic.Deliver): basic_deliver method properties (pika.Spec.BasicProperties): Message properties body (bytes): The message body """ self.logger.debug( "Received message #%s from %s on channel %s: %s", basic_deliver.delivery_tag, properties.app_id, channel.channel_number, body, ) # Pika gives us bytes, but we want a string to be ok too try: body = body.decode() except AttributeError: pass try: future = self._on_message_callback(body, properties.headers) future.add_done_callback( partial(self.on_message_callback_complete, basic_deliver) ) except Exception as ex: requeue = not isinstance(ex, DiscardMessageException) self.logger.exception( "Exception while trying to schedule message %s, about to nack%s: %s" % (basic_deliver.delivery_tag, " and requeue" if requeue else "", ex) ) self._channel.basic_nack(basic_deliver.delivery_tag, requeue=requeue) def on_message_callback_complete(self, basic_deliver, future): """Invoked when the future returned by _on_message_callback completes. This method will be invoked from the threadpool context. It's only purpose is to schedule the final processing steps to take place on the connection's ioloop. Args: basic_deliver: future: Completed future Returns: None """ self._connection.ioloop.add_callback_threadsafe( partial(self.finish_message, basic_deliver, future) ) def finish_message(self, basic_deliver, future): """Finish processing a message This should be invoked as the final part of message processing. It's responsible for acking / nacking messages back to the broker. The main complexity here depends on whether the request processing future has an exception: - If there is no exception it acks the message - If there is an exception - If the exception is an instance of DiscardMessageException it acks the message and does not requeue it - If the exception is an instance of RepublishRequestException it will construct an entirely new BlockingConnection, use that to publish a new message, and then ack the original message - If the exception is not an instance of either the panic_event is set and the consumer will self-destruct Also, if there's ever an error acking a message the panic_event is set and the consumer will self-destruct. Args: basic_deliver: future: Completed future Returns: None """ delivery_tag = basic_deliver.delivery_tag if not future.exception(): try: self.logger.debug("Acking message %s", delivery_tag) self._channel.basic_ack(delivery_tag) except Exception as ex: self.logger.exception( "Error acking message %s, about to shut down: %s", delivery_tag, ex ) self._panic_event.set() else: real_ex = future.exception() if isinstance(real_ex, RepublishRequestException): try: with BlockingConnection(self._connection_parameters) as c: headers = real_ex.headers headers.update({"request_id": real_ex.request.id}) props = BasicProperties( app_id="beer-garden", content_type="text/plain", headers=headers, priority=1, delivery_mode=PERSISTENT_DELIVERY_MODE, ) c.channel().basic_publish( exchange=basic_deliver.exchange, properties=props, routing_key=basic_deliver.routing_key, body=SchemaParser.serialize_request(real_ex.request), ) self._channel.basic_ack(delivery_tag) except Exception as ex: self.logger.exception( "Error republishing message %s, about to shut down: %s", delivery_tag, ex, ) self._panic_event.set() elif isinstance(real_ex, DiscardMessageException): self.logger.info( "Nacking message %s, not attempting to requeue", delivery_tag ) self._channel.basic_nack(delivery_tag, requeue=False) else: # If request processing throws anything else we terminate self.logger.exception( "Unexpected exception during request %s processing, about " "to shut down: %s", delivery_tag, real_ex, exc_info=False, ) self._panic_event.set() def open_connection(self): """Opens a connection to RabbitMQ This method immediately returns the connection object. However, whether the connection was successful is not know until a callback is invoked (either on_open_callback or on_open_error_callback). Returns: The SelectConnection object """ return SelectConnection( parameters=self._connection_parameters, on_open_callback=self.on_connection_open, on_close_callback=self.on_connection_closed, on_open_error_callback=self.on_connection_closed, ) def on_connection_open(self, connection): """Connection open success callback This method is called by pika once the connection to RabbitMQ has been established. The only thing this actually does is call the open_channel method. Args: connection: The connection object Returns: None """ self.logger.debug("Connection opened: %s", connection) if self._reconnect_attempt: self.logger.info("%s consumer successfully reconnected", self._queue_name) self._reconnect_attempt = 0 self.open_channel() def on_connection_closed(self, connection, *args): """Connection closed callback This method is invoked by pika when the connection to RabbitMQ is closed. If the connection is closed we terminate its IOLoop to stop the PikaConsumer. In the case of an unexpected connection closure we'll wait 5 seconds before terminating with the expectation that the plugin will attempt to restart the consumer once it's dead. Args: connection: The connection args: Tuple of arguments describing why the connection closed For pika < 1: reply_code (Numeric code indicating close reason), reply_text (String describing close reason). For pika >= 1 exc (Exception describing close). Returns: None """ self.logger.debug("Connection %s closed: %s", connection, args) self._connection.ioloop.stop() def open_channel(self): """Open a channel""" self.logger.debug("Opening a new channel") self._connection.channel(on_open_callback=self.on_channel_open) def on_channel_open(self, channel): """Channel open success callback This will add a close callback (on_channel_closed) the channel and will call start_consuming to begin receiving messages. Args: channel: The opened channel object Returns: None """ self.logger.debug("Channel opened: %s", channel) self._channel = channel self._channel.add_on_close_callback(self.on_channel_closed) self.start_consuming() def on_channel_closed(self, channel, *args): """Channel closed callback This method is invoked by pika when the channel is closed. Channels are usually closed as a result of something that violates the protocol, such as attempting to re-declare an exchange or queue with different parameters. This indicates that something has gone wrong, so just close the connection (if it's still open) to reset. Args: channel: The channel args: Tuple of arguments describing why the channel closed For pika < 1: reply_code (Numeric code indicating close reason), reply_text (String describing close reason). For pika >= 1 exc (Exception describing close). Returns: None """ self.logger.debug("Channel %i closed: %s", channel, args) if self._connection.is_open: self._connection.close() def start_consuming(self): """Begin consuming messages The RabbitMQ prefetch is set to the maximum number of concurrent consumers. This ensures that messages remain in RabbitMQ until a consuming thread is available to process them. An on_cancel_callback is registered so that the consumer is notified if it is canceled by the broker. Returns: None """ self.logger.debug("Issuing consumer related RPC commands") self._channel.basic_qos(prefetch_count=self._max_concurrent) self._channel.add_on_cancel_callback(self.on_consumer_cancelled) self._consumer_tag = self._channel.basic_consume( queue=self._queue_name, on_message_callback=self.on_message ) def stop_consuming(self): """Stop consuming messages Sends a Basic.Cancel command to the broker, which causes the broker to stop sending the consumer messages. Returns: None """ if self._channel and self._channel.is_open: self.logger.debug("Stopping message consuming on channel %i", self._channel) self._connection.ioloop.add_callback_threadsafe( partial( self._channel.basic_cancel, consumer_tag=self._consumer_tag, callback=lambda *args: None, ) ) def on_consumer_cancelled(self, method_frame): """Consumer cancelled callback This is only invoked if the consumer is cancelled by the broker. Since that effectively ends the request consuming we close the channel to start the process of terminating the PikaConsumer. Args: method_frame (pika.frame.Method): The Basic.Cancel frame Returns: None """ self.logger.debug("Consumer was cancelled: %r", method_frame) if self._channel: self._connection.close()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 198, 11748, 18931, 198, 11748, 264, 6649, 355, 12972, 45163, 198, 6738, 1257, 310, 10141, 1330, 13027, 198, 198, 6738,...
2.363071
9,169
from norm.executable import NormError, NormExecutable from norm.models import ListLambda, Lambda, Variable, Status import logging logger = logging.getLogger(__name__)
[ 6738, 2593, 13, 18558, 18187, 1330, 11220, 12331, 11, 11220, 23002, 18187, 198, 6738, 2593, 13, 27530, 1330, 7343, 43, 4131, 6814, 11, 21114, 6814, 11, 35748, 11, 12678, 198, 198, 11748, 18931, 198, 6404, 1362, 796, 18931, 13, 1136, 111...
3.489796
49
import re from allauth.account.forms import SignupForm from django import forms as form2 from django.contrib.auth import forms as admin_forms from django.contrib.auth import get_user_model from django.utils.translation import gettext_lazy as _ User = get_user_model()
[ 11748, 302, 198, 198, 6738, 477, 18439, 13, 23317, 13, 23914, 1330, 5865, 929, 8479, 198, 6738, 42625, 14208, 1330, 5107, 355, 1296, 17, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 5107, 355, 13169, 62, 23914, 198, 6738, ...
3.313253
83
#!/usr/bin/env python # # ---------------------------------------------------------------------- # # Brad T. Aagaard # U.S. Geological Survey # # <LicenseText> # # ---------------------------------------------------------------------- # ## @file cencalvm/__init__.py ## ## @brief Python top-level CenCalVM module initialization __all__ = ['CenCalVMDB'] # End of file
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 2, 16529, 23031, 198, 2, 198, 2, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 8114, ...
2.818792
149
""" A bunch of variables that are intended to be shared across the Flask codebase """ from flask import Flask import db import logging MAIN_APP = Flask(__name__) LOGGER = MAIN_APP.logger LOGGER.setLevel(logging.INFO) MAIN_DB = db.Database(MAIN_APP).getDb() POSSIBLE_NEEDS = [ 'N95', 'N95s', 'Gloves', 'Safety Goggles', 'Face Shields', 'Surgical Masks', 'Surgical Mask', 'Disposable Booties', 'Thermometers', 'Thermometer', 'Disinfectant Wipes', 'Disinfectant Wipe', 'Disposable Booties', 'Currency' ]
[ 37811, 198, 32, 7684, 286, 9633, 326, 389, 5292, 198, 1462, 307, 4888, 1973, 262, 46947, 2438, 8692, 198, 37811, 198, 198, 6738, 42903, 1330, 46947, 198, 11748, 20613, 198, 11748, 18931, 198, 198, 5673, 1268, 62, 24805, 796, 46947, 7, ...
2.424242
231
from django.db.models import Q from contacts.models import Contact from contacts.serializer import ContactSerializer from common.models import User, Attachments, Comment from common.custom_auth import JSONWebTokenAuthentication from common.serializer import ( UserSerializer, CommentSerializer, AttachmentsSerializer, CommentSerializer, ) from events import swagger_params from events.models import Event from events.serializer import EventSerializer, EventCreateSerializer from events.tasks import send_email from teams.serializer import TeamsSerializer from teams.models import Teams from rest_framework import status from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from rest_framework.pagination import LimitOffsetPagination from drf_yasg.utils import swagger_auto_schema import json from datetime import datetime, timedelta WEEKDAYS = ( ("Monday", "Monday"), ("Tuesday", "Tuesday"), ("Wednesday", "Wednesday"), ("Thursday", "Thursday"), ("Friday", "Friday"), ("Saturday", "Saturday"), ("Sunday", "Sunday"), )
[ 6738, 42625, 14208, 13, 9945, 13, 27530, 1330, 1195, 198, 6738, 13961, 13, 27530, 1330, 14039, 198, 6738, 13961, 13, 46911, 7509, 1330, 14039, 32634, 7509, 198, 198, 6738, 2219, 13, 27530, 1330, 11787, 11, 3460, 620, 902, 11, 18957, 198...
3.582043
323
import numpy as np from ecog.signal_processing import linenoise_notch def test_linenoise_notch_return(): """ Test the return shape. """ X = np.random.randn(32, 1000) rate = 200 Xh = linenoise_notch(X, rate) assert Xh.shape == X.shape
[ 11748, 299, 32152, 355, 45941, 198, 198, 6738, 9940, 519, 13, 12683, 282, 62, 36948, 1330, 9493, 23397, 786, 62, 1662, 354, 198, 198, 4299, 1332, 62, 2815, 23397, 786, 62, 1662, 354, 62, 7783, 33529, 198, 220, 220, 220, 37227, 198, ...
2.378378
111
#Splitter import cv2 import numpy.random as random import numpy as np import os import time import skimage.io as io from AGCWD import* #borders #mitochondria #mitochondria borders #PSD #vesicles file_dir_arr = ["axon", "mitochondria", "PSD", "vesicles", "boundaries","mitochondrial boundaries"] name_list = [] mask_list = [] out_dir = "cutting data" size_data_arr = [256,512,768] size_step_arr = [128,256,256] for i in range(len(size_data_arr)): size_data = size_data_arr[i] size_step = size_step_arr[i] if not os.path.isdir(out_dir): print("создаю out_dir:" + out_dir) os.makedirs(out_dir) dir_input_img = "original data/original/" dir_input_mask ="original data/" for img_name in os.listdir(dir_input_img): count = 0 if is_Img(os.path.join(dir_input_img, img_name)): img = io.imread(os.path.join(dir_input_img, img_name)) if len(img.shape) == 3: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img = agcwd(img) h,w = img.shape[0:2] if not os.path.isdir(out_dir+"/original"): print("создаю out_dir:" + "original") os.makedirs(out_dir+"/original") for start_y in range(0,h, size_step): if (h - start_y < size_data): continue for start_x in range(0,w, size_step): if (w - start_x < size_data): continue cutting_img = img[start_y:start_y+size_data, start_x:start_x+size_data] cv2.imwrite(out_dir + "/original/" + img_name + "_" + str(size_data) +"_" + str(size_step) +"_" +str(count)+".png", cutting_img) count+=1 else: continue for i,dir_name in enumerate(file_dir_arr): for img_name in os.listdir(dir_input_mask + dir_name): if is_Img(os.path.join(dir_input_mask + dir_name, img_name)): img = cv2.imread(os.path.join(dir_input_mask +dir_name, img_name), 0) img[img < 128] = 0 img[img > 127] = 255 if name_list.count(img_name) == 0: name_list.append(img_name) mask_list.append(np.zeros((len(file_dir_arr),)+ img.shape, np.uint8)) index = name_list.index(img_name) mask_list[index][i] = img else: continue print(name_list) for index, mask_stack in enumerate(mask_list): count = 0 for i,dir_name in enumerate(file_dir_arr): local_count = count mask_write = mask_stack[i] h,w = mask_write.shape[0:2] if not os.path.isdir(out_dir+"/"+dir_name): print("создаю out_dir:" + "mask") os.makedirs(out_dir+"/"+dir_name ) for start_y in range(0,h, size_step): if (h - start_y < size_data): continue for start_x in range(0,w, size_step): if (w - start_x < size_data): continue cutting_mask = mask_write[start_y:start_y+size_data, start_x:start_x+size_data] cv2.imwrite(out_dir+"/"+dir_name +"/" + name_list[index] + "_" + str(size_data) +"_" + str(size_step) +"_" +str(local_count)+".png", cutting_mask) local_count+=1
[ 2, 26568, 1967, 198, 198, 11748, 269, 85, 17, 198, 11748, 299, 32152, 13, 25120, 355, 4738, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 198, 11748, 640, 198, 198, 11748, 1341, 9060, 13, 952, 355, 33245, 198, 6738, 13077, 34...
2.07153
1,412
import pytest from backend.common.consts.auth_type import AuthType from backend.common.models.api_auth_access import ApiAuthAccess
[ 11748, 12972, 9288, 198, 198, 6738, 30203, 13, 11321, 13, 1102, 6448, 13, 18439, 62, 4906, 1330, 26828, 6030, 198, 6738, 30203, 13, 11321, 13, 27530, 13, 15042, 62, 18439, 62, 15526, 1330, 5949, 72, 30515, 15457, 628, 628, 628, 628, 6...
3.325581
43
import os import hashlib
[ 11748, 28686, 198, 11748, 12234, 8019 ]
4
6
#!/usr/bin/python import boto3 import click from pybase64 import b64encode from asym_crypto_yaml import ( load, Encrypted, decrypt_value, load_private_key_from_file, load_private_key_from_string ) def perform_deploy_lambda_envs(config_file_path, private_key_content, private_key_path, kms_key_arn, lambda_name): """ Loads private key to deploy the application's secret values to corresponding lambda :config_file_path = path to config file :private_key_content = content of private key :private_key_path = path to the private key :kms_key_arn = arn for an aws kms_key :lambda_name = name of an aws lambda function """ private_key = None if private_key_path is not None: private_key = load_private_key_from_file(private_key_path) elif private_key_content is not None: # GoCD will mangle the encrypted key when it is passed in this way # The following lines unmangle the key. private_key_content = private_key_content.replace(' ', '\n') private_key_content = private_key_content.replace('-----BEGIN\nRSA\nPRIVATE\nKEY-----', '-----BEGIN RSA PRIVATE KEY-----') private_key_content = private_key_content.replace('-----END\nRSA\nPRIVATE\nKEY-----', '-----END RSA PRIVATE KEY-----') private_key = load_private_key_from_string(private_key_content.encode('utf-8')) if private_key is None: raise ValueError('You must specify the private key either by PRIVATE_KEY ENV, or with private-key-path') push_config_and_secrets_to_lambda_env(config_file_path, private_key, kms_key_arn, lambda_name) def push_config_and_secrets_to_lambda_env(config_file_path, private_key, kms_key_arn, lambda_name): """ Pushes the application's configurations and secret (encrypted) values to the corresponding lambda function. The application will have to decrypt value :config_file_path = path to config file :private_key = private key of application :kms_key_arn = arn for an aws kms_key :lambda_name = name of an aws lambda function """ with open(config_file_path) as f: config = load(f) if config is None: config = {} for key, value in config.items(): if type(value) == Encrypted: config[key] = kms_encrypt(kms_key_arn, decrypt_value(value, private_key)) client = boto3.client('lambda') response = client.update_function_configuration( FunctionName=lambda_name, Environment={ 'Variables': config } ) def kms_encrypt(kms_key_arn, value): """ Uses AWS KMS to encrypt the value of an environment variable :kms_key_arn = arn for an aws kms_key :value = the value of an environment variable """ client = boto3.client('kms') response = client.encrypt( KeyId=kms_key_arn, Plaintext=value, ) # returns the encrypted 64 bit string return b64encode(response['CiphertextBlob']).decode()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 11748, 275, 2069, 18, 198, 11748, 3904, 198, 6738, 12972, 8692, 2414, 1330, 275, 2414, 268, 8189, 198, 198, 6738, 355, 4948, 62, 29609, 78, 62, 88, 43695, 1330, 357, 198, 220, 220, 220...
2.503241
1,234
import numpy as np import re import random import json import collections import parameters as params import pickle import nltk # args = params.argparser("lstm petModel-0 --keep_rate 0.9 --seq_length 25 --emb_train") # FIXED_PARAMETERS = params.load_parameters(args) FIXED_PARAMETERS = params.load_parameters() LABEL_MAP = { "entailment": 0, "neutral": 1, "contradiction": 2, "hidden": 0 } PADDING = "<PAD>" UNKNOWN = "<UNK>" def load_nli_data(path, snli=False): """ Load MultiNLI or SNLI data. If the "snli" parameter is set to True, a genre label of snli will be assigned to the data. """ data = [] with open(path) as f: for line in f: loaded_example = json.loads(line) if loaded_example["gold_label"] not in LABEL_MAP: continue loaded_example["label"] = LABEL_MAP[loaded_example["gold_label"]] if snli: loaded_example["genre"] = "snli" data.append(loaded_example) random.seed(1) random.shuffle(data) return data def load_nli_data_genre(path, genre, snli=True): """ Load a specific genre's examples from MultiNLI, or load SNLI data and assign a "snli" genre to the examples. If the "snli" parameter is set to True, a genre label of snli will be assigned to the data. If set to true, it will overwrite the genre label for MultiNLI data. """ data = [] j = 0 with open(path) as f: for line in f: loaded_example = json.loads(line) if loaded_example["gold_label"] not in LABEL_MAP: continue loaded_example["label"] = LABEL_MAP[loaded_example["gold_label"]] if snli: loaded_example["genre"] = "snli" if loaded_example["genre"] == genre: data.append(loaded_example) random.seed(1) random.shuffle(data) return data def build_dictionary(training_datasets): """ Extract vocabulary and build dictionary. """ word_counter = collections.Counter() for i, dataset in enumerate(training_datasets): for example in dataset: word_counter.update(tokenize(example['sentence1_binary_parse'])) word_counter.update(tokenize(example['sentence2_binary_parse'])) vocabulary = set([word for word in word_counter]) vocabulary = list(vocabulary) vocabulary = [PADDING, UNKNOWN] + vocabulary word_indices = dict(zip(vocabulary, range(len(vocabulary)))) return word_indices def build_dictionary_ngrams(training_datasets): """ Extract vocabulary and build bi and trigram dictionaries. """ word_counter_unigrams = collections.Counter() word_counter_bigrams = collections.Counter() word_counter_trigrams = collections.Counter() for i, dataset in enumerate(training_datasets): for example in dataset: sent1_tokenized = tokenize(example['sentence1_binary_parse']) sent2_tokenized = tokenize(example['sentence2_binary_parse']) bigrams1 = nltk.bigrams(sent1_tokenized) bigrams2 = nltk.bigrams(sent2_tokenized) trigrams1 = nltk.trigrams(sent1_tokenized) trigrams2 = nltk.trigrams(sent2_tokenized) word_counter_bigrams.update(bigrams1) word_counter_bigrams.update(bigrams2) word_counter_trigrams.update(trigrams1) word_counter_trigrams.update(trigrams2) word_counter_unigrams.update(sent1_tokenized) word_counter_unigrams.update(sent2_tokenized) vocabulary_uni = set([word for word in word_counter_unigrams]) vocabulary_uni = list(vocabulary_uni) vocabulary_uni = [PADDING, UNKNOWN] + vocabulary_uni word_indices_uni = dict(zip(vocabulary_uni, range(len(vocabulary_uni)))) vocabulary_bi = set([word for word in word_counter_bigrams]) vocabulary_bi = list(vocabulary_bi) vocabulary_bi = [PADDING, UNKNOWN] + vocabulary_bi word_indices_bi = dict(zip(vocabulary_bi, range(len(vocabulary_bi)))) vocabulary_tri = set([word for word in word_counter_trigrams]) vocabulary_tri = list(vocabulary_tri) vocabulary_tri = [PADDING, UNKNOWN] + vocabulary_tri word_indices_tri = dict(zip(vocabulary_tri, range(len(vocabulary_tri)))) return word_indices_uni, word_indices_bi, word_indices_tri def sentences_to_padded_index_sequences(word_indices, datasets): """ Annotate datasets with feature vectors. Adding right-sided padding. """ for i, dataset in enumerate(datasets): for example in dataset: for sentence in ['sentence1_binary_parse', 'sentence2_binary_parse']: # print("sentence is", sentence) example[sentence + '_index_sequence'] = np.zeros((FIXED_PARAMETERS["seq_length"]), dtype=np.int32) token_sequence = tokenize(example[sentence]) padding = FIXED_PARAMETERS["seq_length"] - len(token_sequence) for i in range(FIXED_PARAMETERS["seq_length"]): if i >= len(token_sequence): index = word_indices[PADDING] else: if token_sequence[i] in word_indices: index = word_indices[token_sequence[i]] else: index = word_indices[UNKNOWN] example[sentence + '_index_sequence'][i] = index def sentences_to_padded_index_sequences_ngrams(word_indices, word_indices_bi, word_indices_tri, datasets): """ Annotate datasets with feature vectors. Adding right-sided padding. """ for i, dataset in enumerate(datasets): for example in dataset: for sentence in ['sentence1_binary_parse', 'sentence2_binary_parse']: # print("sentence is", sentence) example[sentence + '_index_sequence'] = np.zeros((FIXED_PARAMETERS["seq_length"]), dtype=np.int32) example[sentence + '_index_sequence_bi'] = np.zeros((FIXED_PARAMETERS["seq_length"]), dtype=np.int32) example[sentence + '_index_sequence_tri'] = np.zeros((FIXED_PARAMETERS["seq_length"]), dtype=np.int32) token_sequence = tokenize(example[sentence]) padding = FIXED_PARAMETERS["seq_length"] - len(token_sequence) for i in range(FIXED_PARAMETERS["seq_length"]): if i >= len(token_sequence): index = word_indices[PADDING] else: if token_sequence[i] in word_indices: index = word_indices[token_sequence[i]] else: index = word_indices[UNKNOWN] example[sentence + '_index_sequence'][i] = index token_sequence_bi = list(nltk.bigrams(token_sequence)) padding_bi = FIXED_PARAMETERS["seq_length"] - len(token_sequence_bi) for i in range(FIXED_PARAMETERS["seq_length"]): if i >= len(token_sequence_bi): index = word_indices_bi[PADDING] else: if token_sequence_bi[i] in word_indices_bi: index = word_indices_bi[token_sequence_bi[i]] else: index = word_indices_bi[UNKNOWN] example[sentence + '_index_sequence_bi'][i] = index token_sequence_tri = list(nltk.trigrams(token_sequence)) padding_tri = FIXED_PARAMETERS["seq_length"] - len(token_sequence_tri) for i in range(FIXED_PARAMETERS["seq_length"]): if i >= len(token_sequence_tri): index = word_indices_tri[PADDING] else: if token_sequence_tri[i] in word_indices_tri: index = word_indices_tri[token_sequence_tri[i]] else: index = word_indices_tri[UNKNOWN] example[sentence + '_index_sequence_tri'][i] = index def loadEmbedding_zeros(path, word_indices): """ Load GloVe embeddings. Initializng OOV words to vector of zeros. """ emb = np.zeros((len(word_indices), FIXED_PARAMETERS["word_embedding_dim"]), dtype='float32') with open(path, 'r') as f: for i, line in enumerate(f): if FIXED_PARAMETERS["embeddings_to_load"] != None: if i >= FIXED_PARAMETERS["embeddings_to_load"]: break s = line.split() if s[0] in word_indices: emb[word_indices[s[0]], :] = np.asarray(s[1:]) return emb def loadEmbedding_rand(path, word_indices): """ Load GloVe embeddings. Doing a random normal initialization for OOV words. """ n = len(word_indices) m = FIXED_PARAMETERS["word_embedding_dim"] emb = np.empty((n, m), dtype=np.float32) emb[:,:] = np.random.normal(size=(n,m)) # Explicitly assign embedding of <PAD> to be zeros. emb[0:2, :] = np.zeros((1,m), dtype="float32") with open(path, 'r') as f: for i, line in enumerate(f): if FIXED_PARAMETERS["embeddings_to_load"] != None: if i >= FIXED_PARAMETERS["embeddings_to_load"]: break s = line.split() if s[0] in word_indices: emb[word_indices[s[0]], :] = np.asarray(s[1:]) return emb
[ 11748, 299, 32152, 355, 45941, 198, 11748, 302, 198, 11748, 4738, 198, 11748, 33918, 198, 11748, 17268, 198, 11748, 10007, 355, 42287, 198, 11748, 2298, 293, 198, 11748, 299, 2528, 74, 198, 198, 2, 26498, 796, 42287, 13, 853, 48610, 720...
2.073798
4,702
from __future__ import unicode_literals import re from collections import OrderedDict, defaultdict from conllu.compat import text DEFAULT_FIELDS = ('id', 'form', 'lemma', 'upostag', 'xpostag', 'feats', 'head', 'deprel', 'deps', 'misc') INTEGER = re.compile(r"^0|(\-?[1-9][0-9]*)$") ID_SINGLE = re.compile(r"^[1-9][0-9]*$") ID_RANGE = re.compile(r"^[1-9][0-9]*\-[1-9][0-9]*$") ID_DOT_ID = re.compile(r"^[0-9][0-9]*\.[1-9][0-9]*$") deps_pattern = r"\d+:[a-z][a-z_-]*(:[a-z][a-z_-]*)?" MULTI_DEPS_PATTERN = re.compile(r"^{}(\|{})*$".format(deps_pattern, deps_pattern))
[ 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 11748, 302, 198, 6738, 17268, 1330, 14230, 1068, 35, 713, 11, 4277, 11600, 198, 198, 6738, 369, 297, 84, 13, 5589, 265, 1330, 2420, 198, 198, 7206, 38865, 62, 11674...
1.949153
295
import numpy as np from skimage.morphology import medial_axis from skimage.util import invert
[ 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 9060, 13, 24503, 1435, 1330, 48174, 62, 22704, 198, 6738, 1341, 9060, 13, 22602, 1330, 287, 1851, 628, 628 ]
3.592593
27
# -*- coding: utf-8 -*- import sys import os import json import http.client import urllib sys.path.append('../') from utils.NetworkingUtils import NetworkingUtils from utils.ConstantUtils import ConstantUtils ''' The methods of this class manage requests that are related to the Github information stored in the project database. The ScrapingController also has methods related to Github, however, they are used only to get info from the actual Github API and store it into the dababase of this project. ''' class GithubController(): ''' Returns a list of all Github user ids in the database that are from a location '''
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 25064, 198, 11748, 28686, 198, 11748, 33918, 198, 11748, 2638, 13, 16366, 198, 11748, 2956, 297, 571, 198, 198, 17597, 13, 6978, 13, 33295, 10786, 40720, 11537...
3.416667
192
import torch from .BaseEmbeddingModel import BaseEmbeddingModel
[ 11748, 28034, 198, 198, 6738, 764, 14881, 31567, 6048, 278, 17633, 1330, 7308, 31567, 6048, 278, 17633, 628 ]
3.666667
18
# # Copyright (C) 2010-2017 Samuel Abels # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import print_function, absolute_import import re from .string import String # Matches any opening parenthesis that is neither preceded by a backslash # nor has a "?:" or "?<" appended. bracket_re = re.compile(r'(?<!\\)\((?!\?[:<])', re.I) modifier_grammar = ( ('modifier', r'[i]'), ('invalid_char', r'.'), ) modifier_grammar_c = [] for thetype, regex in modifier_grammar: modifier_grammar_c.append((thetype, re.compile(regex, re.M | re.S)))
[ 2, 198, 2, 15069, 357, 34, 8, 3050, 12, 5539, 17100, 2275, 1424, 198, 2, 383, 17168, 13789, 357, 36393, 8, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 198, 2, 257, 4866, 286, 428...
3.321577
482
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) from pants.backend.jvm.tasks.jvmdoc_gen import Jvmdoc, JvmdocGen from pants.java.distribution.distribution import Distribution from pants.java.executor import SubprocessExecutor from pants.util.memo import memoized
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 1946, 41689, 1628, 20420, 357, 3826, 27342, 9865, 3843, 20673, 13, 9132, 737, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 3826, 38559, 24290, 737, 198, 198, 6738, 1...
3.08982
167
from .merchant_authorization import MerchantAuthorizationClient from .merchant_client import MerchantClient __version__ = "0.0.5"
[ 6738, 764, 647, 8907, 62, 9800, 1634, 1330, 33508, 13838, 1634, 11792, 198, 6738, 764, 647, 8907, 62, 16366, 1330, 33508, 11792, 198, 198, 834, 9641, 834, 796, 366, 15, 13, 15, 13, 20, 1, 198 ]
3.638889
36
from random import uniform from time import sleep from typing import List, Optional, Union from airflow.exceptions import AirflowException from airflow.hooks.base import BaseHook from airflow.utils.log.logging_mixin import LoggingMixin from spell.client import SpellClient as ExternalSpellClient from spell.client.runs import Run as ExternalSpellRun from spell.client.runs import RunsService as ExternalSpellRunsService STILL_RUNNING = [ ExternalSpellRunsService.BUILDING, ExternalSpellRunsService.PUSHING, ExternalSpellRunsService.RUNNING, ExternalSpellRunsService.SAVING, ] def _delay(delay: Union[int, float, None] = None): """ Pause execution for ``delay`` seconds. :param delay: a delay to pause execution using ``time.sleep(delay)``; a small 1 second jitter is applied to the delay. :type delay: Optional[Union[int, float]] .. note:: This method uses a default random delay, i.e. ``random.uniform(DEFAULT_DELAY_MIN, DEFAULT_DELAY_MAX)``; using a random interval helps to avoid AWS API throttle limits when many concurrent tasks request job-descriptions. """ if delay is None: delay = uniform(SpellClient.DEFAULT_DELAY_MIN, SpellClient.DEFAULT_DELAY_MAX) else: delay = _add_jitter(delay) sleep(delay) def _add_jitter( delay: Union[int, float], width: Union[int, float] = 1, minima: Union[int, float] = 0, ) -> float: """ Use delay +/- width for random jitter Adding jitter to status polling can help to avoid Spell API limits for monitoring spell jobs with a high concurrency in Airflow tasks. :param delay: number of seconds to pause; delay is assumed to be a positive number :type delay: Union[int, float] :param width: delay +/- width for random jitter; width is assumed to be a positive number :type width: Union[int, float] :param minima: minimum delay allowed; minima is assumed to be a non-negative number :type minima: Union[int, float] :return: uniform(delay - width, delay + width) jitter and it is a non-negative number :rtype: float """ delay = abs(delay) width = abs(width) minima = abs(minima) lower = max(minima, delay - width) upper = delay + width return uniform(lower, upper)
[ 6738, 4738, 1330, 8187, 198, 6738, 640, 1330, 3993, 198, 6738, 19720, 1330, 7343, 11, 32233, 11, 4479, 198, 198, 6738, 45771, 13, 1069, 11755, 1330, 3701, 11125, 16922, 198, 6738, 45771, 13, 25480, 82, 13, 8692, 1330, 7308, 39, 566, 1...
2.880982
815
# -*- coding: utf-8 -*- ''' Python classes that are shared between LiveProxy main.py and Kodi service.liveproxy ''' import logging import os import sys import streamlink.logger as logger log = logging.getLogger('streamlink.liveproxy-shared') __all__ = [ 'check_root', 'logger', 'setup_logging', ]
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 7061, 6, 198, 220, 220, 220, 11361, 6097, 326, 389, 4888, 1022, 198, 220, 220, 220, 220, 220, 7547, 44148, 1388, 13, 9078, 290, 44129, 2139, 13, 12583, 36436, 198, 706...
2.68595
121
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-strict """ This is the main class that runs all of the validations. This class handles the overall logic to: * Copy the file to local storage * Run the validations * Generate a validation report Error handling: * If an unhandled error occurs, it will be returned in the report """ import csv import time from typing import Dict, Optional from fbpcp.service.storage_s3 import S3StorageService from fbpcs.input_data_validation.constants import INPUT_DATA_TMP_FILE_PATH from fbpcs.input_data_validation.enums import ValidationResult from fbpcs.input_data_validation.header_validator import HeaderValidator from fbpcs.input_data_validation.line_ending_validator import LineEndingValidator from fbpcs.private_computation.entity.cloud_provider import CloudProvider
[ 2, 15069, 357, 66, 8, 30277, 19193, 82, 11, 3457, 13, 290, 29116, 13, 198, 2, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 17168, 5964, 1043, 287, 262, 198, 2, 38559, 24290, 2393, 287, 262, 6808, 8619, 286, 428, 2723, 5509, 13, ...
3.516605
271
import argparse from Secure_Server import secure_server_start from Secure_Client import secure_client_start from Server import unsecure_server_start from Client import unsecure_client_start from MITM import mitm_start parser = argparse.ArgumentParser(description='Choose whether to run a client or a server, and whether it should be secure or not') parser.add_argument('--tls', action='store_true', help='run secure server') parser.add_argument('--mitm', action='store_true', help='run man in the middle') group = parser.add_mutually_exclusive_group() group.add_argument('--server', action='store_true', help='run server') group.add_argument('--client', action='store_true', help='run client') args = parser.parse_args() if __name__ == '__main__': if args.mitm: # run man in the middle mitm_start() if args.tls: if args.server: # run secure server # secure_server_start("9999") secure_server_start() elif args.client: # run secure client # secure_client_start("localhost", "9999") secure_client_start() else: if args.server: # run unsecure server unsecure_server_start() elif args.client: # run unsecure client unsecure_client_start()
[ 11748, 1822, 29572, 198, 6738, 26707, 62, 10697, 1330, 5713, 62, 15388, 62, 9688, 198, 6738, 26707, 62, 11792, 1330, 5713, 62, 16366, 62, 9688, 198, 6738, 9652, 1330, 555, 22390, 62, 15388, 62, 9688, 198, 6738, 20985, 1330, 555, 22390, ...
2.968523
413
from typing import cast from aws_cdk import aws_iam as iam from aws_cdk import core as cdk from api.infra import API from db.infra import Database
[ 6738, 19720, 1330, 3350, 198, 198, 6738, 3253, 82, 62, 10210, 74, 1330, 3253, 82, 62, 1789, 355, 1312, 321, 198, 6738, 3253, 82, 62, 10210, 74, 1330, 4755, 355, 22927, 74, 198, 198, 6738, 40391, 13, 10745, 430, 1330, 7824, 198, 6738...
3
50
"""This script allows use of an IDE (Wing, Pycharm, ...) to run the rasa shell: (-) Place this script in root of Rasa bot project (-) Open & run it from within your IDE (-) In Wing, use External Console for better experience. """ import os import sys # insert path of this script in syspath so custom modules will be found sys.path.insert(1, os.path.dirname(os.path.abspath(__file__))) # # This is exactly like issuing the command: # $ rasa shell --debug # # sys.argv.append("shell") sys.argv.append("--enable-api") sys.argv.append("--debug") if __name__ == "__main__": from rasa.__main__ import main main()
[ 37811, 1212, 4226, 3578, 779, 286, 281, 33497, 357, 35612, 11, 9485, 354, 1670, 11, 2644, 8, 284, 1057, 262, 374, 15462, 7582, 25, 198, 198, 7, 25106, 8474, 428, 4226, 287, 6808, 286, 371, 15462, 10214, 1628, 198, 7, 25106, 4946, 12...
2.962085
211
#-*-coding:utf-8-*- from flask import Flask, render_template from flask_bootstrap import Bootstrap from flask_sqlalchemy import SQLAlchemy from flask_migrate import Migrate from flask_moment import Moment from flask_login import LoginManager from flask_pagedown import PageDown from config import config import os from flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class bootstrap = Bootstrap() db = SQLAlchemy() moment = Moment() login_manager = LoginManager() login_manager.login_view = '.login' pagedown = PageDown() photos = UploadSet('photos', IMAGES)
[ 2, 12, 9, 12, 66, 7656, 25, 40477, 12, 23, 12, 9, 12, 198, 6738, 42903, 1330, 46947, 11, 8543, 62, 28243, 198, 6738, 42903, 62, 18769, 26418, 1330, 18892, 26418, 198, 6738, 42903, 62, 25410, 282, 26599, 1330, 16363, 2348, 26599, 198...
3.560976
164
from dragonfly import Grammar, CompoundRule pythonBootstrap = Grammar("python bootstrap") pythonBootstrap.add_rule(PythonEnabler()) pythonBootstrap.load() pythonGrammar = Grammar("python grammar") pythonGrammar.load() pythonGrammar.disable()
[ 6738, 10441, 12254, 1330, 20159, 3876, 11, 3082, 633, 31929, 628, 628, 198, 29412, 36476, 26418, 796, 20159, 3876, 7203, 29412, 6297, 26418, 4943, 198, 29412, 36476, 26418, 13, 2860, 62, 25135, 7, 37906, 4834, 397, 1754, 28955, 198, 29412...
3.306667
75
import pandas as pd import pandera as pa df = pd.read_csv('ocorrencia.csv', parse_dates=['ocorrencia_dia'], dayfirst=True) #fazer a leitura da tablea, parse_data: faz a conversão para data e dayfirst: faz com que o dia seja o primeiro número df.head(10) schema = pa.DataFrameSchema( #cria um esquema para o data frame para fazer as validações columns = { 'codigo':pa.Column(pa.Int, required=False), # O valor required (necessário) é True por padrao, colocando assim essa passa a ser uma coluna opcional, em caso de ausência não havera erro. 'codigo_ocorrencia':pa.Column(pa.Int),# diz que a primeira coluna tem que ser int 'codigo_ocorrencia2':pa.Column(pa.Int), # e assim por diante, verificando o tipo de cada coluna 'ocorrencia_classificacao':pa.Column(pa.String), 'ocorrencia_cidade':pa.Column(pa.String), 'ocorrencia_uf':pa.Column(pa.String, pa.Check.str_length(2,2)), # verifica o tamanho min e max 'ocorrencia_aerodromo':pa.Column(pa.String), 'ocorrencia_dia':pa.Column(pa.DateTime), 'ocorrencia_hora':pa.Column(pa.String, pa.Check.str_matches(r'([0-1]?[0-9]|[2][0-3]):([0-5][0-9])(:[0-5][0-9])?$'),nullable=True), #nullable é pra permitir valores nulos 'total_recomendacoes':pa.Column(pa.Int) #essa expressão recular acima faz a validação das horas, minutos e segundos } ) schema.validate(df)
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 279, 4066, 64, 355, 14187, 198, 198, 7568, 796, 279, 67, 13, 961, 62, 40664, 10786, 420, 273, 918, 33743, 13, 40664, 3256, 21136, 62, 19581, 28, 17816, 420, 273, 918, 33743, 62, 67, 544, ...
2.341421
577
import numpy as np import os import sqlite3 from . import SubCatalogMixin __all__ = ["SQLSubCatalogMixin"] class SQLSubCatalogMixin(SubCatalogMixin): """ This is a SubCatalog mixin class that writes its output to a sqlite database, rather than a text file. Note: subcatalogs in the same CompoundInstanceCatalog will all have to write to the same database file. They can, however, write to different tables within that file. Writing the catalog more than once will overwrite the database file. """ _table_name = None # name of the table to write to _file_name = None # name of the file to write to _files_written = set() # these need to be shared among daughter _tables_created = set() # classes, in case multiple catalogs try # writing to the same database def _create_table(self, file_name): """ Create the database table to write to Parameters ---------- file_name is the full path to the file where we will make the database """ if len(self._current_chunk) is 0: return dtype_map = {} dtype_map[float] = ('float', float) dtype_map[np.float] = ('float', float) dtype_map[np.float64] = ('float', float) dtype_map[np.float32] = ('float', float) dtype_map[int] = ('int', int) dtype_map[np.int] = ('int', int) dtype_map[np.int64] = ('int', int) dtype_map[np.int32] = ('int', int) dtype_map[np.str_] = ('text', str) dtype_map[np.object_] = ('text', str) self._type_casting = {} # a dict that stores any casts # needed for the catalog columns with sqlite3.connect(file_name) as conn: cursor = conn.cursor() creation_cmd = '''CREATE TABLE %s ''' % self._table_name creation_cmd += '''(''' # loop over the columns specified for the catalog, # adding them to the table schema for i_col, name in enumerate(self.iter_column_names()): col_type = self.column_by_name(name).dtype.type sql_type = dtype_map[col_type][0] self._type_casting[name] = dtype_map[col_type][1] if i_col>0: creation_cmd += ''', ''' creation_cmd += '''%s %s''' % (name, sql_type) creation_cmd+=''')''' cursor.execute(creation_cmd) conn.commit() # log that we have written to the database and created the table self._files_written.add(file_name) self._tables_created.add(self._table_name) def _write_recarray(self, input_recarray, file_handle): """ Write the recarray currently being processed by the catalog class into the SQLite database Parameters ---------- input_recarray is a recarray of data to be written file_handle is the file handle of the main .txt InstanceCatalog being written """ if self._table_name is None: raise RuntimeError("Cannot call SubCatalogSQLMixin._write_recarray:" "\n_table_name is None") if self._file_name is None: raise RuntimeError("Cannot call SubCatalogSQLMixin._write_recarray:" "\n_file_name is None") self._filter_chunk(input_recarray) file_dir = os.path.dirname(file_handle.name) full_file_name = os.path.join(file_dir, self._file_name) # delete previous iterations of the file if full_file_name not in self._files_written: if os.path.exists(full_file_name): os.unlink(full_file_name) if self._table_name not in self._tables_created: self._create_table(full_file_name) col_dict = {} for name in self.iter_column_names(): arr = self.column_by_name(name) if name in self.transformations: col_dict[name] = self.transformations[name](arr) else: col_dict[name] = arr if len(self._current_chunk) == 0: return with sqlite3.connect(full_file_name) as conn: insert_cmd = '''INSERT INTO %s ''' % self._table_name insert_cmd += '''VALUES(''' for i_col, name in enumerate(self.iter_column_names()): if i_col>0: insert_cmd += ''',''' insert_cmd += '''?''' insert_cmd += ''')''' cursor = conn.cursor() values = (tuple(self._type_casting[name](col_dict[name][i_obj]) for name in self.iter_column_names()) for i_obj in range(len(self._current_chunk))) cursor.executemany(insert_cmd, values) conn.commit()
[ 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 198, 11748, 44161, 578, 18, 198, 6738, 764, 1330, 3834, 49015, 35608, 259, 198, 198, 834, 439, 834, 796, 14631, 50, 48, 6561, 549, 49015, 35608, 259, 8973, 198, 198, 4871, 49747, 6561, ...
2.149956
2,294
"""Directives for building command blocks.""" from docutils import nodes from docutils.parsers.rst import directives from sphinx.util.docutils import SphinxDirective class CommandBlockDirective(SphinxDirective): """Generate a container node with a command, its prompt and optional output.""" has_content = True required_arguments = 1 # The command-block ID used in references option_spec = dict( prompt=directives.unchanged, separator=directives.unchanged, )
[ 37811, 13470, 1083, 329, 2615, 3141, 7021, 526, 15931, 198, 198, 6738, 2205, 26791, 1330, 13760, 198, 6738, 2205, 26791, 13, 79, 945, 364, 13, 81, 301, 1330, 34819, 198, 6738, 599, 20079, 87, 13, 22602, 13, 15390, 26791, 1330, 45368, ...
3.197452
157
''' Config example: { "subject" : "Daily backup", "body" : "This is a daily database backup", "sender_email" : "sender@gmail.com", "receiver_email" : "receiver@gmail.com", "password" : "supersecretpassword", "smtp_server" : "smtp.gmail.com", "smtp_host" : 465, "dbname" : "dbname", "file_prefix": "dbname_backup" } ''' import email, smtplib, ssl import datetime import subprocess import shlex import json from email import encoders from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText CONFIG_FILE = 'backup_email.json' with open(CONFIG_FILE, 'r') as f: config = json.load(f) subject = config['subject'] body = config['body'] sender_email = config['sender_email'] receiver_email = config['receiver_email'] password = config['password'] smtp_server = config['smtp_server'] smtp_host = config['smtp_host'] dbname = config['dbname'] file_prefix = config['file_prefix'] cmd1 = "mysqldump {}".format(dbname) cmd2 = "gzip -9" filename = "{}_{}.sql.gz".format(file_prefix, datetime.datetime.now().strftime('%Y%m%d%H%M')) # Backup database print('Backing up database..') with open(filename, 'w') as f: ps1 = subprocess.Popen(shlex.split(cmd1), stdout=subprocess.PIPE) ps2 = subprocess.Popen(shlex.split(cmd2), stdin=ps1.stdout, stdout=f) ps1.wait() ps2.wait() if ps2.returncode == 2: exit(1) # Create a multipart message and set headers message = MIMEMultipart() message["From"] = sender_email message["To"] = receiver_email message["Subject"] = subject message["Bcc"] = receiver_email # Recommended for mass emails # Add body to email message.attach(MIMEText(body, "plain")) # Open PDF file in binary mode with open(filename, "rb") as attachment: # Add file as application/octet-stream # Email client can usually download this automatically as attachment part = MIMEBase("application", "octet-stream") part.set_payload(attachment.read()) # Encode file in ASCII characters to send by email encoders.encode_base64(part) # Add header as key/value pair to attachment part part.add_header( "Content-Disposition", f"attachment; filename= {filename}", ) # Add attachment to message and convert message to string message.attach(part) text = message.as_string() # Log in to server using secure context and send email print('Sending email..') context = ssl.create_default_context() with smtplib.SMTP_SSL(smtp_server, smtp_host, context=context) as server: server.login(sender_email, password) server.sendmail(sender_email, receiver_email, text) print('Done.')
[ 7061, 6, 201, 198, 201, 198, 201, 198, 16934, 1672, 25, 201, 198, 90, 201, 198, 220, 220, 220, 366, 32796, 1, 1058, 366, 28545, 11559, 1600, 201, 198, 220, 220, 220, 366, 2618, 1, 1058, 366, 1212, 318, 257, 4445, 6831, 11559, 1600...
2.614573
1,043
# Test Case linked_list = LinkedList([5, 7, -1, 0.9, 71]) print("Linked List tests:") print (" Initialization: " + "Pass" if (linked_list.to_list() == [5, 7, -1, 0.9, 71]) else "Fail") linked_list.delete(-1) print (" Delete: " + "Pass" if (linked_list.to_list() == [5, 7, 0.9, 71]) else "Fail") print (" Search: " + "Pass" if (linked_list.search(0.9)) else "Fail") print (" Search: " + "Pass" if (not linked_list.search(55)) else "Fail") linked_list.append(91) print (" Append: " + "Pass" if (linked_list.to_list() == [5, 7, 0.9, 71, 91]) else "Fail") print (" Pop: " + "Pass" if (linked_list.pop() == 5) else "Fail")
[ 198, 2, 6208, 8913, 198, 25614, 62, 4868, 796, 7502, 276, 8053, 26933, 20, 11, 767, 11, 532, 16, 11, 657, 13, 24, 11, 9166, 12962, 198, 4798, 7203, 11280, 276, 7343, 5254, 25, 4943, 198, 4798, 5855, 220, 20768, 1634, 25, 366, 1343...
2.272109
294
#!/usr/bin/python # # Copyright 2018-2020 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from tests.utils import BaseTestCase from polyaxon.utils.memoize_decorators import memoize class MemoizeMethodTest(BaseTestCase): """ A test case for the `memoize` decorator. """
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 198, 2, 15069, 2864, 12, 42334, 12280, 897, 261, 11, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, ...
3.41453
234
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function import pytest from cryptography.hazmat.primitives import hashes from .utils import generate_hmac_test from ...utils import load_hash_vectors @pytest.mark.supported( only_if=lambda backend: backend.hmac_supported(hashes.MD5()), skip_message="Does not support MD5", ) @pytest.mark.hmac @pytest.mark.supported( only_if=lambda backend: backend.hmac_supported(hashes.SHA1()), skip_message="Does not support SHA1", ) @pytest.mark.hmac @pytest.mark.supported( only_if=lambda backend: backend.hmac_supported(hashes.SHA224()), skip_message="Does not support SHA224", ) @pytest.mark.hmac @pytest.mark.supported( only_if=lambda backend: backend.hmac_supported(hashes.SHA256()), skip_message="Does not support SHA256", ) @pytest.mark.hmac @pytest.mark.supported( only_if=lambda backend: backend.hmac_supported(hashes.SHA384()), skip_message="Does not support SHA384", ) @pytest.mark.hmac @pytest.mark.supported( only_if=lambda backend: backend.hmac_supported(hashes.SHA512()), skip_message="Does not support SHA512", ) @pytest.mark.hmac @pytest.mark.supported( only_if=lambda backend: backend.hmac_supported(hashes.RIPEMD160()), skip_message="Does not support RIPEMD160", ) @pytest.mark.hmac
[ 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743, 7330, 257, 4866, 286, 262, 13789, 379, 198, 2,...
2.996785
622
import os.path import sublime_plugin
[ 11748, 28686, 13, 6978, 198, 198, 11748, 41674, 62, 33803, 628 ]
3.545455
11
from django.db import models as m
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 355, 285, 628 ]
3.5
10
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iii/ # 123. Best Time to Buy and Sell Stock III(Hard) Solution # Say you have an array for which the ith element is the price of a given stock on day i. # Design an algorithm to find the maximum profit. You may complete at most two transactions. # Note: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again). # Driver code # test = [3, 3, 5, 0, 0, 3, 1, 4] # p = Solution() # result = p.maxProfit(test) # print(result)
[ 2, 3740, 1378, 293, 316, 8189, 13, 785, 14, 1676, 22143, 14, 13466, 12, 2435, 12, 1462, 12, 17846, 12, 392, 12, 7255, 12, 13578, 12, 15479, 14, 198, 2, 17031, 13, 6705, 3862, 284, 11763, 290, 25688, 10500, 6711, 7, 17309, 8, 28186...
3.230769
169
# Copyright 2014-2017 Insight Software Consortium. # Copyright 2004-2009 Roman Yakovenko. # Distributed under the Boost Software License, Version 1.0. # See http://www.boost.org/LICENSE_1_0.txt import os import pycodestyle import unittest import fnmatch if __name__ == "__main__": run_suite()
[ 2, 15069, 1946, 12, 5539, 39917, 10442, 42727, 13, 198, 2, 15069, 5472, 12, 10531, 7993, 30254, 16206, 7204, 13, 198, 2, 4307, 6169, 739, 262, 19835, 10442, 13789, 11, 10628, 352, 13, 15, 13, 198, 2, 4091, 2638, 1378, 2503, 13, 3952...
3.123711
97
"""Classes for creating DNS zones and writing corresponding zone files. No validation is done here (e.g. requiring a SOA record) because this module may be used to create fragments of zone files to be used via an $INCLUDE directive. Users of this module are encouraged to employ an external validation process like named-checkzone(8) on zone files. Host names are passed to dnsrecord.ResourceRecord methods unmodified; i.e. they must be terminated with a dot ('.') to be interpreted as fully qualified domain names (FQDNs)--otherwise they will be interpreted relative to $ORIGIN, so be diligent in minding your dots. IP addresses may be specified in any format accepted by ipaddr.IPAddress(). Time values may be specified either as an integer (seconds) or a string in one of BIND's time formats. Note that the 'name' keyword argument defaults to '@', so adding an MX record, for example, adds an MX record for the whole domain unless otherwise specified. The 'ttl' keyword argument defaults to None, so records will use the zone's default time-to-live (TTL) unless otherwise specified. """ import time import ipaddr import dnsrecord class _Zone(object): """Base DNS zone object.""" # default values for keyword args; values chosen from RFC recommendations TTL = '1h' REFRESH = '3h' RETRY = '1h' EXPIRY = '2d' NXDOMAIN = '1h' def __init__(self, origin, epochserial=False, ttl=TTL): """Return a _Zone object. Args: origin: (str) zone's root; '.' will be appended if necessary 'example.com' epochserial: (boolean) whether to use number of seconds since epoch as default serial number in SOA record ttl: (str or int) default time-to-live for resource records """ self.origin = origin if not self.origin.endswith('.'): # make sure it looks like FQDN (although it still might be wrong) self.origin += '.' self.epochserial = epochserial self.records = [] # list of dnsrecord objects self.ttl = ttl def write_file(self, filename): """Write zone file. Args: filename: (str) name of file to be written 'zonefile.hosts' """ with open(filename, 'w') as fh: fh.write('$ORIGIN %s\n' % self.origin) fh.write('$TTL %s\n' % self.ttl) for record in self.records: fh.write('%s\n' % record) fh.close() def add_record(self, record): """Add record to zone. This is abstracted from the add_*() methods in case later implementations store the records in a different data structure. (YagNi?) Args: record: (dnsrecord.ResourceRecord) record to be added """ self.records.append(record) def add_soa(self, mname, rname, serial=None, refresh=REFRESH, retry=RETRY, expiry=EXPIRY, nxdomain=NXDOMAIN, name='@', ttl=None): """Add Start of Authority record to zone. Args: mname: (str) host name of name server authoritative for zone 'ns1.example.com.' rname: (str) e-mail address of person responsible for zone 'hostmaster@example.com' serial: (int) serial number '1969123100' refresh: (str or int) slave's refresh interval retry: (str or int) slave's retry interval expiry: (str or int) slave's expiry interval nxdomain: (str or int) negative caching time (TTL) name: (str) name of node to which this record belongs 'example.com.' """ if serial is None: # set default serial number if self.epochserial: serial = int(time.time()) # number of seconds since epoch else: serial = int(time.strftime('%Y%m%d00')) # YYYYMMDD00 soa = dnsrecord.SOA(name, mname, rname, serial, refresh, retry, expiry, nxdomain, ttl) self.add_record(soa) def add_ns(self, name_server, name='@', ttl=None): """Add Name Server record to zone. Args: name_server: (str) host name of name server 'ns1.example.com.' name: (str) name of node to which this record belongs 'example.com.' ttl: (str or int) time-to-live """ ns = dnsrecord.NS(name, name_server, ttl) self.add_record(ns) class ForwardZone(_Zone): """Forward DNS zone.""" def add_a(self, address, name='@', ttl=None): """Add IPv4 Address record to zone. Args: address: (str) IPv4 address '192.168.1.1' name: (str) name of node to which this record belongs 'host.example.com.' ttl: (str or int) time-to-live """ a = dnsrecord.A(name, address, ttl) self.add_record(a) def add_aaaa(self, address, name='@', ttl=None): """Add IPv6 Address record to zone. Args: address: (str) IPv6 address '2001:db8::1' name: (str) name of node to which this record belongs 'host.example.com.' ttl: (str or int) time-to-live """ aaaa = dnsrecord.AAAA(name, address, ttl) self.add_record(aaaa) def add_cname(self, canonical_name, name='@', ttl=None): """Add Canonical Name record to zone. Args: canonical: (str) canonical host name of host 'mail.example.com.' name: (str) name of node to which this record belongs 'mailserver.example.com.' ttl: (str or int) time-to-live """ cname = dnsrecord.CNAME(name, canonical_name, ttl) self.add_record(cname) def add_mx(self, mail_exchanger, preference=10, name='@', ttl=None): """Add Mail Exchanger record to zone. Args: mail_exchanger: (str) host name of mail exchanger 'mail.example.com.' preference: (int) preference value of mail exchanger name: (str) name of node to which this record belongs 'example.com.' ttl: (str or int) time-to-live """ mx = dnsrecord.MX(name, preference, mail_exchanger, ttl) self.add_record(mx) def add_txt(self, text, name='@', ttl=None): """Add Text record to zone. Args: text: (str) textual contents of record 'This is a text record' name: (str) name of node to which this record belongs 'example.com.' ttl: (str or int) time-to-live """ txt = dnsrecord.TXT(name, text, ttl) self.add_record(txt) class ReverseZone(_Zone): """Reverse DNS zone.""" def add_ptr(self, address, name='@', ttl=None): """Add Pointer record to zone. Args: address: (str) IPv4 or IPv6 address '192.168.1.1' name: (str) name of node to which this record belongs 'ns1.example.com.' ttl: (str or int) time-to-live """ ptr = dnsrecord.PTR(address, name, ttl) self.add_record(ptr) def run_tests(): """Run rudimentary tests of module. These are really intended for development and debugging purposes rather than a substitute for unit tests. """ # create forward zone and write to file z = ForwardZone('example.com') z.add_soa('ns1', 'hostmaster') z.add_ns('ns1') z.add_ns('ns2') z.add_mx('mail1') z.add_mx('mail2', 20, ttl=600) z.add_a('192.168.1.1', 'ns1') z.add_aaaa('2001:db8::1', 'ns1') z.add_txt('v=spf1 mx ~all') z.add_cname('mailserver', 'mail') filename = 'fwdzone' z.write_file(filename) print 'Wrote %s.' % filename # create IPv4 reverse zone and write to file z = ReverseZone('1.168.192.in-addr.arpa') z.add_soa('ns1.example.com.', 'hostmaster@example.com.') z.add_ns('ns1.example.com.') z.add_ptr('192.168.1.1', 'ns1.example.com.') filename = 'revzone4' z.write_file(filename) print 'Wrote %s.' % filename # create IPv6 reverse zone and write to file z = ReverseZone('0.0.0.0.0.0.c.f.ip6.arpa', epochserial=True) z.add_soa('ns1.example.com.', 'hostmaster@example.com.') z.add_ns('ns1.example.com.') z.add_ptr('2001:db8::1', 'ns1.example.com.') filename = 'revzone6' z.write_file(filename) print 'Wrote %s.' % filename if __name__ == '__main__': run_tests()
[ 37811, 9487, 274, 329, 4441, 18538, 14123, 290, 3597, 11188, 6516, 198, 16624, 13, 198, 198, 2949, 21201, 318, 1760, 994, 357, 68, 13, 70, 13, 10616, 257, 12809, 32, 1700, 8, 780, 428, 198, 21412, 743, 307, 973, 284, 2251, 21441, 28...
2.239192
3,863
# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import traceback def _exception_handler(debugger, exc_info): """Exception handler enabling post-mortem debugging. A class extending testtools.TestCase can add this handler in setUp(): self.addOnException(post_mortem_debug.exception_handler) When an exception occurs, the user will be dropped into a debugger session in the execution environment of the failure. Frames associated with the testing framework are excluded so that the post-mortem session for an assertion failure will start at the assertion call (e.g. self.assertTrue) rather than the framework code that raises the failure exception (e.g. the assertTrue method). """ tb = exc_info[2] ignored_traceback = get_ignored_traceback(tb) if ignored_traceback: tb = FilteredTraceback(tb, ignored_traceback) traceback.print_exception(exc_info[0], exc_info[1], tb) debugger.post_mortem(tb) def get_ignored_traceback(tb): """Retrieve the first traceback of an ignored trailing chain. Given an initial traceback, find the first traceback of a trailing chain of tracebacks that should be ignored. The criteria for whether a traceback should be ignored is whether its frame's globals include the __unittest marker variable. This criteria is culled from: unittest.TestResult._is_relevant_tb_level For example: tb.tb_next => tb0.tb_next => tb1.tb_next - If no tracebacks were to be ignored, None would be returned. - If only tb1 was to be ignored, tb1 would be returned. - If tb0 and tb1 were to be ignored, tb0 would be returned. - If either of only tb or only tb0 was to be ignored, None would be returned because neither tb or tb0 would be part of a trailing chain of ignored tracebacks. """ # Turn the traceback chain into a list tb_list = [] while tb: tb_list.append(tb) tb = tb.tb_next # Find all members of an ignored trailing chain ignored_tracebacks = [] for tb in reversed(tb_list): if '__unittest' in tb.tb_frame.f_globals: ignored_tracebacks.append(tb) else: break # Return the first member of the ignored trailing chain if ignored_tracebacks: return ignored_tracebacks[-1] class FilteredTraceback(object): """Wraps a traceback to filter unwanted frames.""" def __init__(self, tb, filtered_traceback): """Constructor. :param tb: The start of the traceback chain to filter. :param filtered_traceback: The first traceback of a trailing chain that is to be filtered. """ self._tb = tb self.tb_lasti = self._tb.tb_lasti self.tb_lineno = self._tb.tb_lineno self.tb_frame = self._tb.tb_frame self._filtered_traceback = filtered_traceback @property
[ 2, 15069, 2211, 2297, 10983, 11, 3457, 13, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 407, 779, 428, 2393, 2845, 287, 11846...
2.840261
1,227
# encoding:utf-8 import os import torch import torch.nn.functional as F import torch.optim as optim import torchvision from torchvision import transforms as T from torchvision.datasets import ImageFolder from torch.autograd import Variable from torch.utils.data import DataLoader from models.discriminator import Discriminator from models.generator import Generator import time import visdom
[ 2, 21004, 25, 40477, 12, 23, 198, 11748, 28686, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 11748, 28034, 13, 40085, 355, 6436, 198, 11748, 28034, 10178, 198, 6738, 28034, 10178, 1330, 31408, 355, 309, 198...
4.020408
98
import sys from PyQt5.QtWidgets import (QApplication, QWidget, QPushButton, QLabel, QLineEdit, QGridLayout, QMessageBox) if __name__ == '__main__': app = QApplication(sys.argv) form = LoginForm() form.show() sys.exit(app.exec_())
[ 11748, 25064, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 54, 312, 11407, 1330, 357, 48, 23416, 11, 1195, 38300, 11, 1195, 49222, 21864, 11, 1195, 33986, 11, 1195, 13949, 18378, 11, 1195, 41339, 32517, 11, 1195, 12837, 14253, 8, 628, 1...
2.535354
99
import unittest import random import math from pynsga3 import utils if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 11748, 4738, 198, 11748, 10688, 198, 198, 6738, 279, 2047, 82, 4908, 18, 1330, 3384, 4487, 628, 628, 220, 220, 220, 220, 220, 220, 220, 220, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 1...
2.433962
53
#!/usr/bin/env python # -*- coding: utf-8 -*- # _version.py """Package initialization for IQDM-PDF.""" # Copyright (c) 2021 Dan Cutright # This file is part of IQDM-PDF, released under a MIT license. # See the file LICENSE included with this distribution __author__ = "Dan Cutright" __version__ = "0.3.0" __release__ = "0.3.0"
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 4808, 9641, 13, 9078, 198, 37811, 27813, 37588, 329, 18248, 23127, 12, 20456, 526, 15931, 198, 2, 15069, 357, 66, ...
2.82906
117
import torch import numpy as np from training.networks import FullyConnectedLayer batch_size = 2 in_channels = 512 w_dim = 512 lr = 0.1 # activation = 'linear' # activation = 'lrelu' # activation = 'relu' # activation = 'tanh' activation = 'sigmoid' # activation = 'elu' # activation = 'selu' # activation = 'softplus' # activation = 'swish' model = FullyConnectedLayer(w_dim, in_channels, activation=activation, bias_init=1) model.train() optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9) torch.save(model.state_dict(), "pytorch_fullyConnectedLayer.pth") dic = {} for batch_idx in range(20): optimizer.zero_grad(set_to_none=True) ws = torch.randn([batch_size, 512]) ws.requires_grad_(True) styles = model(ws) styles2 = torch.sigmoid(styles) dstyles2_dws = torch.autograd.grad(outputs=[styles2.sum()], inputs=[ws], create_graph=True, only_inputs=True)[0] dic['batch_%.3d.dstyles2_dws'%batch_idx] = dstyles2_dws.cpu().detach().numpy() dic['batch_%.3d.output'%batch_idx] = styles.cpu().detach().numpy() dic['batch_%.3d.input'%batch_idx] = ws.cpu().detach().numpy() loss = dstyles2_dws.sum() + styles2.sum() # loss = styles2.sum() loss.backward() optimizer.step() np.savez('01_fullyConnectedLayer_grad', **dic) print()
[ 198, 11748, 28034, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 3047, 13, 3262, 5225, 1330, 40234, 13313, 276, 49925, 628, 198, 43501, 62, 7857, 796, 362, 198, 259, 62, 354, 8961, 796, 22243, 198, 86, 62, 27740, 796, 22243, 198, 140...
2.479924
523
from appJar import gui import paho.mqtt.client as mqtt import sys # sys.path.insert(0, '/home/nikolatz/Edge-Computing-Interpretation/mainNode') # from main_node import runMain app = gui() app.setBg("DarkKhaki") app.startPagedWindow("Welcome to projecto") app.startPage() app.addLabel("w1", "You have to choose two files") app.stopPage() app.startPage() app.addLabel("l1", "upload a file") app.setLabelBg("l1", "green") app.setLabelSticky("l1", "both") app.addFileEntry("f1") app.setEntrySticky("f1", "both") app.stopPage() app.startPage() app.addLabel("l2", "upload a script") app.setLabelBg("l2", "green") app.setLabelSticky("l2", "both") app.addFileEntry("f2") app.setEntrySticky("f2", "both") app.stopPage() app.startPage() app.addButton("Send the work", press) app.setButtonAlign("Send the work", "center") app.setButtonSticky("Send the work", "both") app.stopPage() app.stopPagedWindow() client = mqtt.Client() client.on_connect = on_connect client.on_message = on_message client.connect("192.168.0.109", 1883, 60) client.loop_start() # start the GUI app.go()
[ 6738, 598, 47511, 1330, 11774, 198, 11748, 279, 17108, 13, 76, 80, 926, 13, 16366, 355, 285, 80, 926, 198, 11748, 25064, 198, 2, 25064, 13, 6978, 13, 28463, 7, 15, 11, 31051, 11195, 14, 17187, 349, 27906, 14, 37021, 12, 5377, 48074,...
2.619512
410
# Generated by Django 2.2.13 on 2020-08-02 01:23 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 17, 13, 1485, 319, 12131, 12, 2919, 12, 2999, 5534, 25, 1954, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.875
32
import utils import numpy as np from sklearn.multiclass import OneVsRestClassifier from sklearn.model_selection import KFold, cross_val_score from sklearn.linear_model import SGDClassifier,LogisticRegression # CROSS-VALIDATION ACCURACY # TRAINING ACCURACY AND PREDICTION
[ 11748, 3384, 4487, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 35720, 13, 16680, 291, 31172, 1330, 1881, 23266, 19452, 9487, 7483, 198, 6738, 1341, 35720, 13, 19849, 62, 49283, 1330, 509, 37, 727, 11, 3272, 62, 2100, 62, 26675,...
3.209302
86
from Local import *
[ 6738, 10714, 1330, 1635 ]
4.75
4
from PyQt5.QtWidgets import QWidget, QLabel, QGridLayout from PyQt5.QtCore import Qt, pyqtSlot import numpy as np from PyQt5.QtGui import QPixmap, QImage import os
[ 6738, 9485, 48, 83, 20, 13, 48, 83, 54, 312, 11407, 1330, 1195, 38300, 11, 1195, 33986, 11, 1195, 41339, 32517, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 14055, 1330, 33734, 11, 12972, 39568, 38963, 198, 11748, 299, 32152, 355, 45941...
2.477612
67
class Solution: """ @param A: An integer array @return: An integer """
[ 4871, 28186, 25, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 2488, 17143, 317, 25, 1052, 18253, 7177, 198, 220, 220, 220, 2488, 7783, 25, 1052, 18253, 198, 220, 220, 220, 37227 ]
2.606061
33
""" Copyright (C) 2018 NVIDIA Corporation. All rights reserved. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). """ from __future__ import division import torch.nn as nn import scipy.misc import numpy as np import scipy.sparse import scipy.sparse.linalg from numpy.lib.stride_tricks import as_strided from PIL import Image # Returns sparse matting laplacian # The implementation of the function is heavily borrowed from # https://github.com/MarcoForte/closed-form-matting/blob/master/closed_form_matting.py # We thank Marco Forte for sharing his code.
[ 37811, 198, 15269, 357, 34, 8, 2864, 15127, 10501, 13, 220, 220, 220, 1439, 2489, 10395, 13, 198, 26656, 15385, 739, 262, 12624, 11050, 12, 7792, 12, 4090, 604, 13, 15, 5964, 357, 5450, 1378, 20123, 425, 9503, 684, 13, 2398, 14, 677...
3.014354
209
from models.player import Player, Roles, States as PlayerStates
[ 198, 6738, 4981, 13, 7829, 1330, 7853, 11, 371, 4316, 11, 1829, 355, 7853, 42237, 628, 198 ]
3.941176
17
import warnings import numpy as np import scipy.linalg as linalg from scipy import sparse from sklearn.base import BaseEstimator, TransformerMixin from sklearn.preprocessing import normalize from sklearn.utils import check_array, check_X_y from sklearn.utils.validation import check_is_fitted from .slices import slice_y, is_multioutput __all__ = ['SlicedAverageVarianceEstimation'] class SlicedAverageVarianceEstimation(BaseEstimator, TransformerMixin): """Sliced Average Variance Estimation (SAVE) [1] Linear dimensionality reduction using the conditional covariance, Cov(X|y), to identify the directions defining the central subspace of the data. The algorithm performs a weighted principal component analysis on a transformation of slices of the covariance matrix of the whitened data, which has been sorted with respect to the target, y. Since SAVE looks at second moment information, it may miss first-moment information. In particular, it may miss linear trends. See :class:`sliced.sir.SlicedInverseRegression`, which is able to detect linear trends but may fail in other situations. If possible, both SIR and SAVE should be used when analyzing a dataset. Parameters ---------- n_directions : int, str or None (default='auto') Number of directions to keep. Corresponds to the dimension of the central subpace. If n_directions=='auto', the number of directions is chosen by finding the maximum gap in the ordered eigenvalues of the var(X|y) matrix and choosing the directions before this gap. If n_directions==None, the number of directions equals the number of features. n_slices : int (default=10) The number of slices used when calculating the inverse regression curve. Truncated to at most the number of unique values of ``y``. copy : bool (default=True) If False, data passed to fit are overwritten and running fit(X).transform(X) will not yield the expected results, use fit_transform(X) instead. Attributes ---------- directions_ : array, shape (n_directions, n_features) The directions in feature space, representing the central subspace which is sufficient to describe the conditional distribution y|X. The directions are sorted by ``eigenvalues_``. eigenvalues_ : array, shape (n_directions,) The eigenvalues corresponding to each of the selected directions. These are the eigenvalues of the covariance matrix of the inverse regression curve. Larger eigenvalues indicate more prevelant directions. Examples -------- >>> import numpy as np >>> from sliced import SlicedAverageVarianceEstimation >>> from sliced.datasets import make_quadratic >>> X, y = make_quadratic(random_state=123) >>> save = SlicedAverageVarianceEstimation(n_directions=2) >>> save.fit(X, y) SlicedAverageVarianceEstimation(copy=True, n_directions=2, n_slices=10) >>> X_save = save.transform(X) References ---------- [1] Shao, Y, Cook, RD and Weisberg, S (2007). "Marginal Tests with Sliced Average Variance Estimation", Biometrika, 94, 285-296. """ def fit(self, X, y): """Fit the model with X and y. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) The target values (class labels in classification, real numbers in regression). Returns ------- self : object Returns the instance itself. """ if sparse.issparse(X): raise TypeError("SlicedInverseRegression does not support " "sparse input.") X, y = check_X_y(X, y, dtype=[np.float64, np.float32], y_numeric=True, copy=self.copy) # handle n_directions == None if self.n_directions is None: n_directions = X.shape[1] elif (not isinstance(self.n_directions, str) and self.n_directions < 1): raise ValueError('The number of directions `n_directions` ' 'must be >= 1. Got `n_directions`={}'.format( self.n_directions)) else: n_directions = self.n_directions # validate y if is_multioutput(y): raise TypeError("The target `y` cannot be multi-output.") n_samples, n_features = X.shape # Center and Whiten feature matrix using a QR decomposition # (this is the approach used in the dr package) if self.copy: X = X - np.mean(X, axis=0) else: X -= np.mean(X, axis=0) Q, R = linalg.qr(X, mode='economic') Z = np.sqrt(n_samples) * Q # sort rows of Z with respect to the target y Z = Z[np.argsort(y), :] # determine slices and counts slices, counts = slice_y(y, self.n_slices) self.n_slices_ = counts.shape[0] # construct slice covariance matrices M = np.zeros((n_features, n_features)) for slice_idx in range(self.n_slices_): n_slice = counts[slice_idx] # center the entries in this slice Z_slice = Z[slices == slice_idx, :] Z_slice -= np.mean(Z_slice, axis=0) # slice covariance matrix V_slice = np.dot(Z_slice.T, Z_slice) / n_slice M_slice = np.eye(n_features) - V_slice M += (n_slice / n_samples) * np.dot(M_slice, M_slice) # eigen-decomposition of slice matrix evals, evecs = linalg.eigh(M) evecs = evecs[:, ::-1] evals = evals[::-1] try: # TODO: internally handle zero variance features. This would not # be a problem if we used svd, but does not match DR. directions = linalg.solve_triangular(np.sqrt(n_samples) * R, evecs) except (linalg.LinAlgError, TypeError): # NOTE: The TypeError is because of a bug in the reporting of scipy raise linalg.LinAlgError( "Unable to back-solve R for the dimension " "reducing directions. This is usually caused by the presents " "of zero variance features. Try removing these features with " "`sklearn.feature_selection.VarianceThreshold(threshold=0.)` " "and refitting.") # the number of directions is chosen by finding the maximum gap among # the ordered eigenvalues. if self.n_directions == 'auto': n_directions = np.argmax(np.abs(np.diff(evals))) + 1 self.n_directions_ = n_directions # normalize directions directions = normalize( directions[:, :self.n_directions_], norm='l2', axis=0) self.directions_ = directions.T self.eigenvalues_ = evals[:self.n_directions_] return self def transform(self, X): """Apply dimension reduction on X. X is projected onto the EDR-directions previously extracted from a training set. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples in the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_directions) """ check_is_fitted(self) X = check_array(X) return np.dot(X, self.directions_.T)
[ 11748, 14601, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 629, 541, 88, 13, 75, 1292, 70, 355, 300, 1292, 70, 198, 198, 6738, 629, 541, 88, 1330, 29877, 198, 6738, 1341, 35720, 13, 8692, 1330, 7308, 22362, 320, 1352, 11, 36...
2.432925
3,183
# coding: utf-8 import os import re import sys import ConfigParser from util.live import Live from util.google import Google CONFIG_PATH = os.path.dirname(os.path.abspath(__file__)) + '/config' CONFIG_SAMPLE_FILE = CONFIG_PATH + '.sample' LIVE_BASE_URL = 'http://live.nicovideo.jp/watch/' if __name__ == '__main__': if len(sys.argv) != 3: print "invalid arguments" method = sys.argv[1] if re.match('lv', sys.argv[2]): url = LIVE_BASE_URL + sys.argv[2] else: url = sys.argv[2] main = Main() main.run(url, method)
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 11748, 28686, 198, 11748, 302, 198, 11748, 25064, 198, 11748, 17056, 46677, 198, 6738, 7736, 13, 12583, 1330, 7547, 198, 6738, 7736, 13, 13297, 1330, 3012, 198, 198, 10943, 16254, 62, 34219, 7...
2.300813
246
from io import StringIO, SEEK_END from pathlib import Path from typing import Iterator, TextIO from .input import Input
[ 6738, 33245, 1330, 10903, 9399, 11, 31107, 42, 62, 10619, 198, 6738, 3108, 8019, 1330, 10644, 198, 6738, 19720, 1330, 40806, 1352, 11, 8255, 9399, 198, 198, 6738, 764, 15414, 1330, 23412, 628, 198 ]
3.617647
34
ALLOWED_HOSTS = ['*'] BROKER_URL = 'redis://127.0.0.1:6379/0' CELERY_RESULT_BACKEND = 'redis://127.0.0.1:6379/0'
[ 7036, 3913, 1961, 62, 39, 10892, 50, 796, 37250, 9, 20520, 198, 198, 11473, 11380, 1137, 62, 21886, 796, 705, 445, 271, 1378, 16799, 13, 15, 13, 15, 13, 16, 25, 21, 29088, 14, 15, 6, 198, 34, 3698, 19664, 62, 19535, 16724, 62, 3...
1.753846
65
from time import time from functools import reduce from operator import add
[ 6738, 640, 1330, 640, 198, 6738, 1257, 310, 10141, 1330, 4646, 198, 6738, 10088, 1330, 751, 198 ]
4.470588
17
# Derek Santos from imgur import Imgur from pprint import pformat from time import sleep import json import logs import os import re import requests import shutil log = logs.Log('downloader')
[ 2, 20893, 28458, 198, 6738, 33705, 333, 1330, 1846, 45073, 198, 6738, 279, 4798, 1330, 279, 18982, 198, 6738, 640, 1330, 3993, 198, 11748, 33918, 198, 11748, 17259, 198, 11748, 28686, 198, 11748, 302, 198, 11748, 7007, 198, 11748, 4423, ...
3.62963
54
import pandas as pd import numpy as np data = { 'Id': [1, 2, 3, 4, 5], 'Name': ['Syed', 'Shah', 'Sunil', 'Sherif', 'Sugata'], 'Score': [100, 200, 250, 350, 275] } df = pd.DataFrame( data, index=['Rank 1', 'Rank 2', 'Rank 3', 'Rank 4', 'Rank 5']) df["Locations"] = np.array( ['Bangalore', 'New York', 'Houston', 'Boston', 'Sydney']) print(df["Name"]) print(df)
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 299, 32152, 355, 45941, 198, 7890, 796, 1391, 198, 220, 220, 220, 705, 7390, 10354, 685, 16, 11, 362, 11, 513, 11, 604, 11, 642, 4357, 198, 220, 220, 220, 705, 5376, 10354, 37250, 13940, ...
2.213873
173
import cv2 import numpy as np from Functions.Featurespace import find_annodir from Functions import imgproc_func as imf import glob # TODO: FIX FS thresholding, GR detection """ SÆT DIN PATH TIL DIT ONE DRIVE HER -> DOWNLOAD ANNOTATIONS MAPPEN FØRST """ # Path to folder containing the different classes path = r'C:\Users\Muku\OneDrive - Aalborg Universitet\P4 - GrisProjekt\Training data\annotations' # Find what classes have been found class_name, anotations = find_annodir(path) # Define trackers = ['hue_upper', 'hue_lower', 'light_upper', 'light_lower', 'saturation_upper', 'saturation_lower'] hls_values = [255, 70, 255, 37, 255, 30] blue_values = [124, 84, 119, 37, 148, 61] scratches_values = [129, 70, 103, 21, 59, 32] roots_values = [200, 105, 121, 101, 152, 114] for i, tracks in enumerate(trackers): imf.define_trackbar(tracks, 'Base', (hls_values[i], 255)) imf.define_trackbar(tracks, 'Cloth', (blue_values[i], 255)) imf.define_trackbar(tracks, 'Scratches', (scratches_values[i], 255)) imf.define_trackbar(tracks, 'ROE', (roots_values[i], 255)) imf.define_trackbar('gaussian blur', 'processing', (0,1)) # imf.define_trackbar('kernel', 'processing', (3,21)) # imf.define_trackbar('low edge', 'processing', (3,100)) # imf.define_trackbar('high edge', 'processing', (3,100)) # imf.define_trackbar('edge color space', 'processing', (0,3)) for category in class_name: # D used to skip categories D = 0 depth_paths = glob.glob(path.replace('\\', '/') + '/' + category + '/**/*aligned*.png', recursive=True) for i in range(10,20): if D: break depth_path = depth_paths[i] bgr_path = depth_path.replace('aligned', 'bgr') depth2_path = depth_path.replace('aligned', 'depth') depth2_img = imf.convert_to_16(cv2.imread(depth2_path)) depth_img = imf.convert_to_16(cv2.imread(depth_path)) bgr_img = cv2.imread(bgr_path) while (True): kernel = imf.retrieve_trackbar('kernel', 'blurs', True) if imf.retrieve_trackbar('gaussian blur', 'blurs'): blur = cv2.GaussianBlur(bgr_img, (kernel,kernel), cv2.BORDER_DEFAULT) else: blur = cv2.medianBlur(bgr_img, kernel) frame_hsi = cv2.cvtColor(blur, cv2.COLOR_BGR2HLS) frame_hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2YCrCb) hls_up = [] hls_low = [] blue_up = [] blue_low = [] scr_up = [] scr_low = [] roe_up = [] roe_low = [] for i in range(0,len(trackers),2): hls_up.append(imf.retrieve_trackbar(trackers[i], 'Base')) hls_low.append(imf.retrieve_trackbar(trackers[i+1], 'Base')) blue_up.append(imf.retrieve_trackbar(trackers[i], 'Cloth')) blue_low.append(imf.retrieve_trackbar(trackers[i+1], 'Cloth')) scr_up.append(imf.retrieve_trackbar(trackers[i], 'Scratches')) scr_low.append(imf.retrieve_trackbar(trackers[i+1], 'Scratches')) roe_up.append(imf.retrieve_trackbar(trackers[i], 'ROE')) roe_low.append(imf.retrieve_trackbar(trackers[i + 1], 'ROE')) # Generate area of interest from pipe depth data aoi_end = cv2.inRange(depth_img, int(np.max(depth_img) - 100), int(np.max(depth_img))) aoi_pipe = cv2.inRange(depth_img, 600, int(np.max(depth_img) - 100)) cnt, hir = cv2.findContours(aoi_pipe, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) pipe_mask = np.zeros_like(depth_img).astype('uint8') pipe_mask = cv2.fillPoly(pipe_mask, cnt, 255) bg_mask = cv2.subtract(pipe_mask, aoi_end) bg_mask = imf.open_img(bg_mask, 21, 21) bg_mask = cv2.dilate(bg_mask, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (21, 21))) hsi_aoi = cv2.bitwise_and(frame_hsi, frame_hsi, mask=bg_mask) # Edge detection # edge_space = imf.retrieve_trackbar('edge color space', 'processing') # if edge_space == 0: # canny = cv2.Canny(frame_hsi[:, :, 0], imf.retrieve_trackbar('low edge', 'processing'), imf.retrieve_trackbar('high edge', 'processing')) # canny = cv2.dilate(canny, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))) # elif edge_space == 1: # canny = cv2.Canny(frame_hsi[:, :, 1], imf.retrieve_trackbar('low edge', 'processing'), # imf.retrieve_trackbar('high edge', 'processing')) # canny = cv2.dilate(canny, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))) # elif edge_space == 2: # canny = cv2.Canny(frame_hsi[:, :, 2], imf.retrieve_trackbar('low edge', 'processing'), # imf.retrieve_trackbar('high edge', 'processing')) # canny = cv2.dilate(canny, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))) # elif edge_space == 3: # canny = cv2.Canny(imf.depth_to_display(depth_img), imf.retrieve_trackbar('low edge', 'processing'), # imf.retrieve_trackbar('high edge', 'processing')) # canny = cv2.dilate(canny, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (6, 6))) """ HER ER MASKS mask1 = base mask2 = cloth mask3 = scratches Hvis du vil have dem individuelt kan du ændre til "bin = open_img((din mask), 7,7)" ellers kan du udkommentere subtract delene indtil det du gerne vil have """ mask1 = cv2.inRange(frame_hsi, np.asarray(hls_low), np.asarray(hls_up)) # Threshold around highlights mask2 = cv2.inRange(frame_hsi, np.asarray(blue_low), np.asarray(blue_up)) # Remove blue, due to the piece of cloth mask3 = cv2.inRange(frame_hsi, np.asarray(scr_low), np.asarray(scr_up)) # Remove blue, due to scratches mask4 = cv2.inRange(frame_hsv, np.asarray(roe_low), np.asarray(roe_up)) # Find roots and pipe edges hsi_thresh = cv2.add(mask1, mask4) hsi_thresh = cv2.subtract(hsi_thresh,mask2) hsi_thresh = cv2.subtract(hsi_thresh, mask3) # hsi_thresh = cv2.add(hsi_thresh, canny) bin = imf.open_img(hsi_thresh, 7, 7) imf.resize_image(bgr_img, 'original', 0.4) imf.resize_image(bin.copy(), 'binary', 0.4) imf.resize_image(mask4, 'blur', 0.4) imf.resize_image(imf.depth_to_display(depth_img), 'depth', 0.4) # imf.resize_image(imf.depth_to_display(canny), 'canny', 0.4) cv2.imwrite('result.png', bin) key = cv2.waitKey(1) if key == ord('q'): break if key == ord('d'): D = 1 break
[ 11748, 269, 85, 17, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 40480, 13, 23595, 10223, 1330, 1064, 62, 1236, 375, 343, 198, 6738, 40480, 1330, 33705, 36942, 62, 20786, 355, 545, 69, 198, 11748, 15095, 198, 198, 2, 16926, 46, 25, ...
1.965771
3,535
import numpy as np from loguru import logger from scipy import ndimage from skimage import img_as_ubyte, img_as_float from skimage import io from qtpy.QtWidgets import QRadioButton, QPushButton from qtpy import QtWidgets, QtCore, QtGui from survos2.frontend.components.base import * from survos2.frontend.control import Launcher _FeatureNotifier = PluginNotifier()
[ 11748, 299, 32152, 355, 45941, 198, 198, 6738, 2604, 14717, 1330, 49706, 198, 6738, 629, 541, 88, 1330, 299, 67, 9060, 198, 6738, 1341, 9060, 1330, 33705, 62, 292, 62, 549, 88, 660, 11, 33705, 62, 292, 62, 22468, 198, 6738, 1341, 90...
3.008
125
#!/usr/bin/env python3.8 from argparse import ArgumentParser import os, pgen from cacti import cacti from cacti.cacti_interface_pb2 import CactiInput if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 13, 23, 198, 198, 6738, 1822, 29572, 1330, 45751, 46677, 198, 11748, 28686, 11, 279, 5235, 198, 6738, 269, 529, 72, 1330, 269, 529, 72, 198, 6738, 269, 529, 72, 13, 66, 529, 72, 62,...
2.638889
72
fizzbuzz()
[ 198, 69, 6457, 65, 4715, 3419, 198 ]
1.714286
7
import torch from torch import nn, optim, ones, randn, zeros, cat from generator import Generator from discriminator import Discriminator from prep import train_loader, batch_size import matplotlib.pyplot as plt # Setting learning rate, number of epochs and loss function lr = 0.001 n_epochs = 300 loss_function = nn.BCELoss() # Instantiating generator = Generator() discriminator = Discriminator() # Setting optimization algorithm optimizer_discriminator = optim.Adam(discriminator.parameters(), lr=lr) optimizer_generator = optim.Adam(generator.parameters(), lr=lr) # Training process for epoch in range(n_epochs): for n, (real_samples, _) in enumerate(train_loader): # Data for training the discriminator real_samples_labels = ones((batch_size, 1)) latent_space_samples = randn((batch_size, 2)) generated_samples = generator(latent_space_samples) generated_samples_labels = zeros((batch_size, 1)) all_samples = cat((real_samples, generated_samples)) all_samples_labels = cat((real_samples_labels, generated_samples_labels)) # Training the discriminator discriminator.zero_grad() output_discriminator = discriminator(all_samples) loss_discriminator = loss_function( output_discriminator, all_samples_labels ) loss_discriminator.backward() optimizer_discriminator.step() # Data for training the generator latent_space_samples = randn((batch_size, 2)) # Training the generator generator.zero_grad() generated_samples = generator(latent_space_samples) output_discriminator_generated = discriminator(generated_samples) loss_generator = loss_function( output_discriminator_generated, real_samples_labels ) loss_generator.backward() optimizer_generator.step() # Show loss value if epoch % 10 == 0 and n == batch_size - 1: print(f"Epoch: {epoch} , Loss D: {loss_discriminator}") print(f"Epoch: {epoch} , Loss G: {loss_generator}") # Checking the samples generated by GAN generated_samples = generated_samples.detach() plt.plot(generated_samples[:, 0], generated_samples[:, 1], ".") plt.show()
[ 11748, 28034, 198, 6738, 28034, 1330, 299, 77, 11, 6436, 11, 3392, 11, 43720, 77, 11, 1976, 27498, 11, 3797, 198, 6738, 17301, 1330, 35986, 198, 6738, 6534, 20900, 1330, 8444, 3036, 20900, 198, 6738, 3143, 1330, 4512, 62, 29356, 11, 1...
2.601841
869
#!/usr/bin/env python3 import os, sys, traceback import signal import asyncio from asyncio.subprocess import create_subprocess_exec, PIPE import aiohttp from service import load_config, run as service server_proc = None server_cmd = os.path.join(os.path.dirname(__file__), 'server.sh') server_name = 'twofishes' service_name = 'Geolocation' if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='UI Stack %s Service Entrypoint' % service_name, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--config', '-c', metavar='FILE', type=str, default='config.yaml', help='database configuration file') parser.add_argument('--verbose', '-v', action='store_true', help='verbose mode') args = parser.parse_args() config = load_config(args.config) try: loop = asyncio.get_event_loop() asyncio.ensure_future(run_server(loop=loop)) loop.run_until_complete(service(config, verbose=args.verbose)) except KeyboardInterrupt: print('INTERRUPTED') except: print('EXCEPTION') traceback.print_exc()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 28686, 11, 25064, 11, 12854, 1891, 198, 11748, 6737, 198, 198, 11748, 30351, 952, 198, 6738, 30351, 952, 13, 7266, 14681, 1330, 2251, 62, 7266, 14681, 62, 18558, 11, 35...
2.763285
414
''' I2C LCD1602 demo Author: shaoziyang Date: 2018.2 http://www.micropython.org.cn ''' from machine import I2C import time l = LCD1620(I2C(1)) l.puts("Hello microbit!") n = 0 while 1: l.puts(str(n), 0, 1) n = n + 1 time.sleep_ms(1000)
[ 7061, 6, 198, 220, 220, 220, 314, 17, 34, 23598, 1433, 2999, 13605, 628, 220, 220, 220, 6434, 25, 427, 5488, 89, 7745, 648, 198, 220, 220, 220, 7536, 25, 220, 220, 2864, 13, 17, 628, 220, 220, 220, 2638, 1378, 2503, 13, 9383, 17...
1.985294
136
"""各種pythonファイルをコマンドラインから利用するためのCLIツール See Also: - `typer`_ .. _typer: https://qiita.com/iisaka51/items/18bde4dada0827fbe81e Example: ヘルプ >>> `python cli.py --help` サブコマンドのヘルプ >>> `python cli.py [command] --help` Attention: _は-で実行する """ from typing import Optional import typer from src.add.add import add from src.config import settings # noqa app = typer.Typer() @app.command() def hello() -> None: """hello""" typer.echo("hello") @app.command() def sample( text: Optional[str] = typer.Option(None, "-t", "--text", help="出力する文字列") ) -> None: """メインコマンド""" print("text:", text) print(settings.cfg.is_debug_mode) print(add(3, 5)) typer_click_object = typer.main.get_command(app)
[ 37811, 28938, 226, 163, 101, 106, 29412, 41939, 11482, 9202, 31758, 24679, 20115, 6527, 19073, 11482, 6527, 27370, 36853, 26344, 102, 18796, 101, 33623, 25748, 25224, 1792, 223, 5641, 5097, 40, 41115, 43353, 198, 198, 6214, 4418, 25, 198, ...
2.071429
364
'''Defines a pipeline step which aquires data from the pig data source. ''' import os from src import datasets from src.step import Step class GetPigData(Step): '''Defines a pipeline step which aquires data from the pig data source. ''' def __init__(self): '''Initializes a new instance of the GetPigData object. ''' super(GetPigData, self).__init__() self.input = { 'path': os.getenv('PIG_PATH'), } self.output = { 'Movies': 'data/raw/movies/pig.txt', 'Music': 'data/raw/music/pig.txt', 'TV Shows': 'data/raw/tv/pig.txt', } def run(self): '''Runs the pipeline step. ''' for (key, value) in self.output.items(): datasets.write_list_to_file( datasets.get_local_files(self.input['path'] % key), value)
[ 7061, 6, 7469, 1127, 257, 11523, 2239, 543, 14839, 2387, 1366, 422, 262, 12967, 1366, 2723, 13, 198, 198, 7061, 6, 198, 198, 11748, 28686, 198, 198, 6738, 12351, 1330, 40522, 198, 6738, 12351, 13, 9662, 1330, 5012, 198, 198, 4871, 349...
2.164619
407
from flask import render_template, session, redirect, url_for, Flask, request, flash, jsonify from numpy.core.multiarray import ndarray from . import rqalpha import os from multiprocessing import Process import sys sys.path.insert(0, "Z:\Hello\Work\Data\QT") from rqalpha import run_code from .. import db from ..models import Role, User, Strategy from flask_login import login_required, current_user import pickle as pk import numpy as np import time import datetime import json name = None @rqalpha.route('/result/<strategyname>', methods=['GET']) @login_required @rqalpha.route('/result', methods=['GET']) @login_required # noinspection PyGlobalUndefined @rqalpha.route("/result/weather", methods=["GET", "POST"]) @rqalpha.route("/hot", methods=["GET", "POST"])
[ 6738, 42903, 1330, 8543, 62, 28243, 11, 6246, 11, 18941, 11, 19016, 62, 1640, 11, 46947, 11, 2581, 11, 7644, 11, 33918, 1958, 198, 6738, 299, 32152, 13, 7295, 13, 16680, 12571, 2433, 1330, 299, 67, 18747, 198, 198, 6738, 764, 1330, ...
3.091633
251
"""Resource module for live view.""" import logging from aiohttp import web import aiohttp_jinja2 from sprint_webserver.services import ( DeltakereService, InnstillingerService, KjoreplanService, KlasserService, ResultatHeatService, StartListeService, ) class Live(web.View): """Class representing the live view.""" # TODO: reduser kompleksistet i denne funksjonen async def get(self) -> web.Response: # noqa: C901 """Get route function that return the live result page.""" _lopsinfo = await InnstillingerService().get_header_footer_info( self.request.app["db"], ) logging.debug(_lopsinfo) try: valgt_klasse = self.request.rel_url.query["klasse"] logging.debug(valgt_klasse) except Exception: valgt_klasse = "" try: valgt_startnr = self.request.rel_url.query["startnr"] except Exception: valgt_startnr = "" klasser = await KlasserService().get_all_klasser(self.request.app["db"]) deltakere = await DeltakereService().get_deltakere_by_lopsklasse( self.request.app["db"], valgt_klasse ) logging.debug(deltakere) kjoreplan = [] startliste = [] resultatliste = [] colseparators = [] colclass = "w3-third" if valgt_startnr == "": kjoreplan = await KjoreplanService().get_heat_for_live_scroll( self.request.app["db"], valgt_klasse ) # responsive design - determine column-arrangement colseparators = ["KA1", "KA5", "SC1", "SA1", "F1", "F5", "A1", "A5"] icolcount = 0 for heat in kjoreplan: if heat["Heat"] in colseparators: icolcount += 1 if (heat["Heat"] == "SC1") and heat["resultat_registrert"]: colseparators.remove("SC1") elif heat["Heat"] in {"FA", "FB", "FC"}: icolcount += 1 colseparators.append(heat["Heat"]) break if icolcount == 4: colclass = "w3-quart" colseparators.remove("KA1") colseparators.remove("F1") startliste = await StartListeService().get_startliste_by_lopsklasse( self.request.app["db"], valgt_klasse ) logging.debug(startliste) resultatliste = await ResultatHeatService().get_resultatheat_by_klasse( self.request.app["db"], valgt_klasse ) else: # only selected racer logging.debug(valgt_startnr) startliste = await StartListeService().get_startliste_by_nr( self.request.app["db"], valgt_startnr, ) logging.debug(startliste) for start in startliste: _heat = await KjoreplanService().get_heat_by_index( self.request.app["db"], start["Heat"], ) kjoreplan.append(_heat) if valgt_klasse == "": valgt_klasse = start["Løpsklasse"] logging.info(valgt_klasse) # check for resultat resultatliste = await ResultatHeatService().get_resultatheat_by_nr( self.request.app["db"], valgt_startnr, ) valgt_startnr = "Startnr: " + valgt_startnr + ", " """Get route function.""" return await aiohttp_jinja2.render_template_async( "live.html", self.request, { "lopsinfo": _lopsinfo, "valgt_klasse": valgt_klasse, "valgt_startnr": valgt_startnr, "colseparators": colseparators, "colclass": colclass, "klasser": klasser, "deltakere": deltakere, "kjoreplan": kjoreplan, "resultatliste": resultatliste, "startliste": startliste, }, )
[ 37811, 26198, 8265, 329, 2107, 1570, 526, 15931, 198, 11748, 18931, 198, 198, 6738, 257, 952, 4023, 1330, 3992, 198, 11748, 257, 952, 4023, 62, 18594, 6592, 17, 198, 198, 6738, 18553, 62, 732, 1443, 18497, 13, 30416, 1330, 357, 198, 2...
1.888134
2,208
"""DropNAS searching""" from torch import device import torch.nn as nn import xnas.core.config as config import xnas.logger.logging as logging import xnas.logger.meter as meter from xnas.core.config import cfg from xnas.core.builder import * # DropNAS from xnas.algorithms.DropNAS import * from xnas.runner.trainer import DartsTrainer from xnas.runner.optimizer import darts_alpha_optimizer # Load config and check config.load_configs() logger = logging.get_logger(__name__) class DropNAS_Trainer(DartsTrainer): """Trainer for DropNAS. Rewrite the train_epoch with DropNAS's double-losses policy. """ if __name__ == "__main__": main()
[ 37811, 26932, 18293, 10342, 37811, 198, 198, 6738, 28034, 1330, 3335, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 198, 11748, 2124, 24716, 13, 7295, 13, 11250, 355, 4566, 198, 11748, 2124, 24716, 13, 6404, 1362, 13, 6404, 2667, 355...
2.972973
222
#!/usr/bin/python import json from ansible.module_utils.basic import * if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 11748, 33918, 628, 628, 628, 198, 6738, 9093, 856, 13, 21412, 62, 26791, 13, 35487, 1330, 1635, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, ...
2.636364
44
#!/usr/bin/env python # -*- coding: utf-8 -*- """ ------------------------------------------------- @ Author : pengj @ date : 2019/10/22 11:00 @ IDE : PyCharm @ GitHub : https://github.com/JackyPJB @ Contact : pengjianbiao@hotmail.com ------------------------------------------------- Description : ------------------------------------------------- """ import time from typing import List __author__ = 'Max_Pengjb' start_time = time.time() # 下面写上代码块 # 上面中间写上代码块 end_time = time.time() print('Running time: %s Seconds' % (end_time - start_time)) aa = [1, 2, 3, 4]
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 47232, 12, 628, 220, 220, 220, 2488, 220, 220, 6434, 220, 1058, 220, 220, 220, 220, 220, 220, 279, 1516, ...
2.313167
281
class DoublyLinkedList: """ Time Complexity: Insertion = O(1) Removal = O(1) Searching = O(N) Access = O(N) Most of the logic is the same as the SLL except dealing with the extra prev point """ def push(self,val): """ Adds a new Node at the end of the DLL """ newNode = Node(val) if not self.head: self.head = newNode self.tail = self.head else: curTail = self.tail curTail.next = newNode newNode.prev = curTail self.tail = newNode self.length += 1 return self def pop(self): """ remove a node from the tail """ if not self.head: return None prevTail = self.tail if self.length == 1: self.head = None self.tail = None else: self.tail = prevTail.prev prevTail.prev = None self.tail.next = None self.length -= 1 return prevTail def shift(self): """ remove a node from the beginning of the Dll """ if not self.head: return None prevHead = self.head if self.length == 1: self.head = None self.tail = None else: self.head = prevHead.next prevHead.next = None prevHead.prev = None self.length -= 1 return prevHead def unshift(self,val): """ add a node at the beginning of the DLL """ newHead = Node(val) if not self.head: self.head = newHead self.tail = self.head else: self.head.prev = newHead newHead.next = self.head self.head = newHead self.length += 1 return self def get(self,index): """ get a node from a given index """ if index < 0 or index >= self.length: return None current = None half = self.length / 2 if index < half: counter = 0 current = self.head while counter != index: current = current.next counter += 1 else: counter = self.length - 1 current = self.tail while counter != index: current = current.prev counter -= 1 return current def setNode(self,index,val): """ set a node's value given its index and a new value """ node = self.get(index) if not node: return None node.val = val return node def insert(self,index,val): """ insert a new node at a given index """ if index < 0 or index > self.length: return False if index == 0: self.unshift(val) return True elif index == self.length: self.push(val) return True else: newNode = Node(val) prev = self.get(index-1) after = prev.next prev.next = newNode newNode.prev = prev newNode.next = after after.prev = newNode self.length += 1 return True def remove(self,index): """ remove a node from a given index """ if index < 0 or index >= self.length: return None if index == 0: return self.shift() elif index == self.length - 1: return self.pop() else: before = self.get(index-1) remove = before.next after = remove.next before.next = after after.prev = before remove.prev = None remove.next = None self.length -= 1 return remove DLL = DoublyLinkedList() DLL.push("val") DLL.push("val2") DLL.push("val3") print(DLL.setNode(1,"newval")) while DLL.head: print(DLL.head.val) DLL.head = DLL.head.next
[ 198, 4871, 5728, 36874, 11280, 276, 8053, 25, 198, 220, 220, 220, 37227, 198, 220, 220, 220, 3862, 19157, 414, 25, 220, 198, 220, 220, 220, 220, 220, 220, 220, 35835, 295, 796, 440, 7, 16, 8, 198, 220, 220, 220, 220, 220, 220, 2...
1.923186
2,122
import tushare as ts import time start = time.time() df = ts.pro_bar(ts_code='300851.SZ', adj='qfq', start_date='20200714', end_date='20200716') t0 = time.time() - start print(t0) print(df) arr = df.to_records() start = time.time() df = ts.get_k_data("603488", start='2020-07-14', end='2020-07-16', index=False, ktype='D', autype='qfq') t0 = time.time() - start print(t0) print(df)
[ 11748, 256, 1530, 533, 355, 40379, 198, 11748, 640, 198, 198, 9688, 796, 640, 13, 2435, 3419, 198, 7568, 796, 40379, 13, 1676, 62, 5657, 7, 912, 62, 8189, 11639, 6200, 23, 4349, 13, 50, 57, 3256, 9224, 11639, 80, 69, 80, 3256, 923...
2.291667
168