content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
import FWCore.ParameterSet.Config as cms from RecoEgamma.EgammaElectronProducers.gsfElectronSequence_cff import * from RecoEgamma.EgammaElectronProducers.uncleanedOnlyElectronSequence_cff import * from RecoEgamma.EgammaPhotonProducers.photonSequence_cff import * from RecoEgamma.EgammaPhotonProducers.conversionSequence_cff import * from RecoEgamma.EgammaPhotonProducers.conversionTrackSequence_cff import * from RecoEgamma.EgammaPhotonProducers.allConversionSequence_cff import * from RecoEgamma.EgammaPhotonProducers.gedPhotonSequence_cff import * from RecoEgamma.EgammaIsolationAlgos.egammaIsolationSequence_cff import * from RecoEgamma.EgammaIsolationAlgos.interestingEgammaIsoDetIdsSequence_cff import * from RecoEgamma.PhotonIdentification.photonId_cff import * from RecoEgamma.ElectronIdentification.electronIdSequence_cff import * from RecoEgamma.EgammaHFProducers.hfEMClusteringSequence_cff import * from TrackingTools.Configuration.TrackingTools_cff import * from RecoEgamma.EgammaIsolationAlgos.egmIsolationDefinitions_cff import * #importing new gedGsfElectronSequence : #from RecoEgamma.EgammaElectronProducers.gedGsfElectronSequence_cff import * from RecoEgamma.EgammaElectronProducers.pfBasedElectronIso_cff import * egammaGlobalRecoTask = cms.Task(electronGsfTrackingTask,conversionTrackTask,allConversionTask) egammaGlobalReco = cms.Sequence(egammaGlobalRecoTask) # this might be historical: not sure why we do this from Configuration.Eras.Modifier_fastSim_cff import fastSim _fastSim_egammaGlobalRecoTask = egammaGlobalRecoTask.copy() _fastSim_egammaGlobalRecoTask.replace(conversionTrackTask,conversionTrackTaskNoEcalSeeded) fastSim.toReplaceWith(egammaGlobalRecoTask, _fastSim_egammaGlobalRecoTask) egammarecoTask = cms.Task(gsfElectronTask,conversionTask,photonTask) egammareco = cms.Sequence(egammarecoTask) egammaHighLevelRecoPrePFTask = cms.Task(gsfEcalDrivenElectronTask,uncleanedOnlyElectronTask,conversionTask,photonTask) egammaHighLevelRecoPrePF = cms.Sequence(egammaHighLevelRecoPrePFTask) # not commisoned and not relevant in FastSim (?): fastSim.toReplaceWith(egammarecoTask, egammarecoTask.copyAndExclude([conversionTask])) fastSim.toReplaceWith(egammaHighLevelRecoPrePFTask,egammaHighLevelRecoPrePFTask.copyAndExclude([uncleanedOnlyElectronTask,conversionTask])) #egammaHighLevelRecoPostPFTask = cms.Task(gsfElectronMergingTask,interestingEgammaIsoDetIdsTask,photonIDTask,eIdTask,hfEMClusteringTask) #adding new gedGsfElectronTask and gedPhotonTask : #egammaHighLevelRecoPostPFTask = cms.Task(gsfElectronMergingTask,gedGsfElectronTask,interestingEgammaIsoDetIdsTask,gedPhotonTask,photonIDTask,eIdTask,hfEMClusteringTask) egammaHighLevelRecoPostPFTask = cms.Task(interestingEgammaIsoDetIdsTask,egmIsolationTask,photonIDTask,photonIDTaskGED,eIdTask,hfEMClusteringTask) egammaHighLevelRecoPostPF = cms.Sequence(egammaHighLevelRecoPostPFTask) egammarecoFullTask = cms.Task(egammarecoTask,interestingEgammaIsoDetIdsTask,egmIsolationTask,photonIDTask,eIdTask,hfEMClusteringTask) egammarecoFull = cms.Sequence(egammarecoFullTask) egammarecoWithIDTask = cms.Task(egammarecoTask,photonIDTask,eIdTask) egammarecoWithID = cms.Sequence(egammarecoWithIDTask) egammareco_woConvPhotonsTask = cms.Task(gsfElectronTask,photonTask) egammareco_woConvPhotons = cms.Sequence(egammareco_woConvPhotonsTask) egammareco_withIsolationTask = cms.Task(egammarecoTask,egammaIsolationTask) egammareco_withIsolation = cms.Sequence(egammareco_withIsolationTask) egammareco_withIsolation_woConvPhotonsTask = cms.Task(egammareco_woConvPhotonsTask,egammaIsolationTask) egammareco_withIsolation_woConvPhotons = cms.Sequence(egammareco_withIsolation_woConvPhotonsTask) egammareco_withPhotonIDTask = cms.Task(egammarecoTask,photonIDTask) egammareco_withPhotonID = cms.Sequence(egammareco_withPhotonIDTask) egammareco_withElectronIDTask = cms.Task(egammarecoTask,eIdTask) egammareco_withElectronID = cms.Sequence(egammareco_withElectronIDTask) egammarecoFull_woHFElectronsTask = cms.Task(egammarecoTask,interestingEgammaIsoDetIdsTask,photonIDTask,eIdTask) egammarecoFull_woHFElectrons = cms.Sequence(egammarecoFull_woHFElectronsTask) from Configuration.Eras.Modifier_pA_2016_cff import pA_2016 from Configuration.Eras.Modifier_peripheralPbPb_cff import peripheralPbPb from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018 from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017 from Configuration.Eras.Modifier_ppRef_2017_cff import ppRef_2017 #HI-specific algorithms needed in pp scenario special configurations from RecoHI.HiEgammaAlgos.photonIsolationHIProducer_cfi import photonIsolationHIProducerpp from RecoHI.HiEgammaAlgos.photonIsolationHIProducer_cfi import photonIsolationHIProducerppGED from RecoHI.HiEgammaAlgos.photonIsolationHIProducer_cfi import photonIsolationHIProducerppIsland _egammaHighLevelRecoPostPF_HITask = egammaHighLevelRecoPostPFTask.copy() _egammaHighLevelRecoPostPF_HITask.add(photonIsolationHIProducerpp) _egammaHighLevelRecoPostPF_HITask.add(photonIsolationHIProducerppGED) _egammaHighLevelRecoPostPF_HITask.add(photonIsolationHIProducerppIsland) for e in [pA_2016, peripheralPbPb, pp_on_AA_2018, pp_on_XeXe_2017, ppRef_2017]: e.toReplaceWith(egammaHighLevelRecoPostPFTask, _egammaHighLevelRecoPostPF_HITask)
[ 11748, 48849, 14055, 13, 36301, 7248, 13, 16934, 355, 269, 907, 198, 198, 6738, 3311, 78, 36, 28483, 2611, 13, 36, 28483, 2611, 19453, 1313, 11547, 7999, 13, 14542, 69, 19453, 1313, 44015, 594, 62, 66, 487, 1330, 1635, 198, 6738, 3311...
2.626428
2,013
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """TF Dataset for BoolQ in same format as Fever TFDS.""" import json from language.serene import constants from language.serene import util import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 2864, 383, 3012, 9552, 15417, 4816, 46665, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393,...
3.627119
236
import json import pprint from urllib.request import urlopen with urlopen('http://pypi.python.org/pypi/Twisted/json') as url: http_info = url.info() raw_data = url.read().decode(http_info.get_content_charset()) project_info = json.loads(raw_data) pprint.pprint(project_info) print('------------------------------') pprint.pprint(project_info, depth=2) print('------------------------------') pprint.pprint(project_info, depth=2, width=50)
[ 11748, 33918, 198, 11748, 279, 4798, 198, 6738, 2956, 297, 571, 13, 25927, 1330, 19016, 9654, 198, 4480, 19016, 9654, 10786, 4023, 1378, 79, 4464, 72, 13, 29412, 13, 2398, 14, 79, 4464, 72, 14, 5080, 6347, 14, 17752, 11537, 355, 19016...
2.896774
155
from argparse import Namespace from functools import partial from typing import Any import molotov from .formatters import DefaultFormatter from .record_table import RecordTable from .recorder import Recorder from .reporter import Reporter from .scenario import Scenario __all__ = ("Reporter", "register_reporter", "scenario", "recorder") recorder = Recorder(RecordTable()) scenario = partial(Scenario, recorder.on_starting_scenario)
[ 6738, 1822, 29572, 1330, 28531, 10223, 198, 6738, 1257, 310, 10141, 1330, 13027, 198, 6738, 19720, 1330, 4377, 198, 198, 11748, 18605, 313, 709, 198, 198, 6738, 764, 18982, 1010, 1330, 15161, 8479, 1436, 198, 6738, 764, 22105, 62, 11487, ...
3.636364
121
from slugify import slugify from services.viewcounts.models import PageViewsModel def get_page_views(url: str): """Returns the number of views for a given page object.""" # Pre-processing checks: Client should not pass full or partial URL. if not url.startswith("/"): raise Exception("Partial URL detected, only POST the page path.") if ("http" in url) or ("localhost" in url): raise Exception("Full URL detected, only POST the page path.") # Boil down url to slug/path: path = url_to_path(url) print(f"User is at {path}") # Creates a new object if none exists. page, created = PageViewsModel.objects.get_or_create(path=path) # Add a view to the model if not created: page.views = page.views + 1 page.save() return page.views def url_to_path(url: str): """Converts an incoming url into a path-slug.""" return slugify(url, max_length=199)
[ 6738, 31065, 1958, 1330, 31065, 1958, 198, 198, 6738, 2594, 13, 1177, 9127, 82, 13, 27530, 1330, 7873, 7680, 82, 17633, 628, 198, 4299, 651, 62, 7700, 62, 33571, 7, 6371, 25, 965, 2599, 198, 220, 220, 220, 37227, 35561, 262, 1271, 2...
2.827795
331
import tensorflow as tf import tensorflow_probability as tfp from scipy.stats import expon from videos.linalg import safe_cholesky from manim import * # shortcuts tfd = tfp.distributions kernels = tfp.math.psd_kernels
[ 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 11192, 273, 11125, 62, 1676, 65, 1799, 355, 256, 46428, 198, 198, 6738, 629, 541, 88, 13, 34242, 1330, 1033, 261, 198, 6738, 5861, 13, 75, 1292, 70, 1330, 3338, 62, 354, 4316, 2584, ...
2.835443
79
""" This module defines the base model and associated functions """ from flask import Flask, jsonify from psycopg2.extras import RealDictCursor from ....database import db_con
[ 37811, 198, 1212, 8265, 15738, 262, 2779, 2746, 290, 3917, 5499, 198, 37811, 198, 6738, 42903, 1330, 46947, 11, 33918, 1958, 198, 6738, 17331, 22163, 70, 17, 13, 2302, 8847, 1330, 6416, 35, 713, 34, 21471, 198, 6738, 19424, 48806, 1330,...
3.869565
46
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # ================================================================================================ # # Project : Deep Learning for Conversion Rate Prediction (CVR) # # Version : 0.1.0 # # File : \task.py # # Language : Python 3.7.12 # # ------------------------------------------------------------------------------------------------ # # Author : John James # # Email : john.james.ai.studio@gmail.com # # URL : https://github.com/john-james-ai/cvr # # ------------------------------------------------------------------------------------------------ # # Created : Wednesday, January 19th 2022, 5:34:06 pm # # Modified : Thursday, February 10th 2022, 9:28:37 pm # # Modifier : John James (john.james.ai.studio@gmail.com) # # ------------------------------------------------------------------------------------------------ # # License : BSD 3-clause "New" or "Revised" License # # Copyright: (c) 2022 Bryant St. Labs # # ================================================================================================ # from abc import ABC, abstractmethod import pandas as pd import inspect from dataclasses import dataclass, field from datetime import datetime, timedelta from cvr.utils.printing import Printer from cvr.core.asset import AssetPassport from cvr.core.dataset import Dataset # ---------------------------------------------------------------------------- # # TASK RESULT # # ---------------------------------------------------------------------------- # # ---------------------------------------------------------------------------- # # TASK RESPONSE # # ---------------------------------------------------------------------------- # # ---------------------------------------------------------------------------- # # TASK SUMMARY # # ---------------------------------------------------------------------------- # # ---------------------------------------------------------------------------- # # TASK # # ---------------------------------------------------------------------------- # class Task(ABC): """Defines interface for task classes.""" def setup(self, **kwargs) -> None: # Logging facility self._logger = self._config.logger # Subclass specific setup self._setup() def _setup(self) -> None: pass def teardown(self, **kwargs) -> None: # Subclass specific teardown. self._teardown() # Base class gets last word self._result.executed = "No" if self._result.executed is False else "Yes" self._result.passed = "No" if self._result.passed is False else "Yes" self._result.complete = "No" if self._result.complete is False else "Yes" self._summary = TaskSummary( passport=self.passport, response=self.response, result=self.result, ) def _teardown(self, **kwargs) -> None: pass def summary(self) -> TaskSummary: return self._summary def summarize(self) -> None: self._summary.print() # ============================================================================ # # DATASET FACTORY # # ============================================================================ #
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 2, 38093, 4770, 25609, 18604, 1303, 198, 2, 4935, 220, 1058, 10766, 18252, 329, 44101, 14806, 46690, 357, 34, 13024, 8...
2.282958
1,866
from typing import Set from numpy import ndarray
[ 6738, 19720, 1330, 5345, 198, 6738, 299, 32152, 1330, 299, 67, 18747, 628 ]
3.846154
13
"""traits/traitcommon.py - Common functionality across trait operations.""" import re from ..constants import UNIVERSAL_TRAITS VALID_TRAIT_PATTERN = re.compile(r"^[A-z_]+$") def validate_trait_names(*traits): """ Raises a ValueError if a trait doesn't exist and a SyntaxError if the syntax is bad. """ for trait in traits: if (trait_len := len(trait)) > 20: raise ValueError(f"`{trait}` is too long by {trait_len - 20} characters.") if trait.lower() in UNIVERSAL_TRAITS: raise SyntaxError(f"`{trait}` is a reserved trait and cannot be added/updated/deleted.") if VALID_TRAIT_PATTERN.match(trait) is None: raise SyntaxError(f"Traits can only have letters and underscores. Received `{trait}`.")
[ 37811, 9535, 896, 14, 9535, 270, 11321, 13, 9078, 532, 8070, 11244, 1973, 16708, 4560, 526, 15931, 198, 198, 11748, 302, 198, 198, 6738, 11485, 9979, 1187, 1330, 49677, 1847, 62, 51, 3861, 29722, 198, 198, 23428, 2389, 62, 51, 3861, 2...
2.504823
311
import unittest from ..model import RandomForestWithFeatureSelection from sklearn.model_selection import train_test_split import os import numpy as np if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 6738, 11485, 19849, 1330, 14534, 34605, 3152, 38816, 4653, 1564, 198, 6738, 1341, 35720, 13, 19849, 62, 49283, 1330, 4512, 62, 9288, 62, 35312, 198, 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 628, 628, ...
3.258065
62
import matplotlib.pyplot as plt; plt.rcdefaults() import csv import sqlite3 as lite from calendar import monthrange from datetime import datetime, date, timedelta from datetimerange import DateTimeRange import numpy as np import pycountry_convert as pc from dateutil.relativedelta import relativedelta from forex_python.converter import CurrencyRates, RatesNotAvailableError import random import pandas as pd DATABASE_NAME = 'JobDetails.db' con = lite.connect(DATABASE_NAME) cur = con.cursor() bidNames = ["Bid ID", "Job ID", "Country", "User", "Price", "Currency"] jobNames = ["Job ID", "URL", "Title", "Description", "Tags", "Number Of Bidders", "Average Bid Cost", "Final Cost", "Currency", "Time", "Converted Final Cost", "Country Of Poster", "Country Of Winner", "Year", "Week", "Date Range", "Category", "Score", "Positive Matches", "Negative Matches", "Attachment", "Category Type Two", "Possible Months"] reviewJobNames = ["Job ID", "URL", "Title", "Description", "Tags", "Number Of Bidders", "Average Bid Cost", "Final Cost", "Currency", "Time", "Converted Final Cost", "Country Of Poster", "Country Of Winner", "Date Scraped", "Time Ago", "Date Range", "Category", "Score", "Positive Matches", "Negative Matches", "Attachment", "Possible Years", "Category Type Two", "Possible Months"] profileNames = ["Profile ID", "Username", "Number Of Reviews", "Average Review", "Hourly Rate", "Earnings Percentage", "Country"] qualificationNames = ["Qualification ID", "Qualification Type", "User", "Qualification Name", "Extra Information"] reviewNames = ["Review ID", "Project URL", "Profile", "Score", "Amount Paid", "Currency", "Converted Currency", "Date Scraped", "Date", "Country", "Notes", "Date Range", "Possible Months", "Possible Years"] winnerNames = ["Job ID", "Job URL", "Username", "Profile URL"] names = {"Bids": bidNames, "Jobs": jobNames, "JobsHourly": jobNames, "ReviewJobs": reviewJobNames, "Profiles": profileNames, "Qualifications": qualificationNames, "Reviews": reviewNames, "Winners": winnerNames} # Converts the currency to USD at the historic rate # Retrieves saved details to plot # Generates multiple windows of bar charts to display the countries of bidders - grouped by continent # Saving values from the database to CSV files # def doExtras(): # # doAverages() # # jobConversions() # # reviewJobConversions() # # conversions() # # getDateRanges() # # possibleYears() # # plotYears('Projects') # doExtras() # avConversions()
[ 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 26, 198, 198, 489, 83, 13, 6015, 12286, 82, 3419, 198, 11748, 269, 21370, 198, 11748, 44161, 578, 18, 355, 300, 578, 198, 6738, 11845, 1330, 1227, 9521, 198, 6738, 4818, 8079, ...
2.976404
890
from flask import Flask, render_template, request, jsonify import numpy as np import pickle import sys import json import re app = Flask(__name__) app.config['JSON_AS_ASCII'] = False target_names = [ 'AE', 'BH', 'DZ', 'EG', 'IQ', 'JO', 'KW', 'LB', 'LY', 'MA', 'OM','PL', 'QA', 'SA', 'SD', 'SY', 'TN', 'YE' ] arabic_dialects = { 'AE': ' ', 'BH': ' ', 'DZ': ' ', 'EG': ' ', 'IQ': ' ', 'JO': ' ', 'KW': ' ', 'LB': ' ', 'LY': ' ', 'MA': ' ', 'OM': ' ', 'PL': ' ', 'QA': ' ', 'SA': ' ', 'SD': ' ', 'SY': ' ', 'TN': ' ', 'YE': ' ' } if __name__ == '__main__': app.run(debug=True)
[ 6738, 42903, 1330, 46947, 11, 8543, 62, 28243, 11, 2581, 11, 33918, 1958, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 2298, 293, 201, 198, 11748, 25064, 201, 198, 11748, 33918, 201, 198, 11748, 302, 201, 198, 201, 198, 2...
1.990909
330
data = [i.strip() for i in open("input.txt").readlines()] two = 0 three = 0 for code in data: counts = {} for i in range(0,len(code)): if code[i] in counts.keys(): counts[code[i]] += 1 else: counts[code[i]] = 1 if (2 in counts.values()): two += 1 if (3 in counts.values()): three += 1 print(two*three)
[ 7890, 796, 685, 72, 13, 36311, 3419, 329, 1312, 287, 1280, 7203, 15414, 13, 14116, 11074, 961, 6615, 3419, 60, 198, 198, 11545, 796, 657, 198, 15542, 796, 657, 198, 1640, 2438, 287, 1366, 25, 198, 220, 220, 220, 9853, 796, 23884, 19...
2.010471
191
# -*- coding: utf-8 -*- import factory from symposion.speakers.models import Speaker from conf_site.accounts.tests.factories import UserFactory
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 8860, 198, 198, 6738, 5659, 1930, 295, 13, 4125, 3979, 13, 27530, 1330, 14931, 198, 198, 6738, 1013, 62, 15654, 13, 23317, 82, 13, 41989, 13, 22584, 1749, 1330, ...
3.12766
47
# flake8: noqa import pokediadb.dbuilder.version import pokediadb.dbuilder.type import pokediadb.dbuilder.ability import pokediadb.dbuilder.move import pokediadb.dbuilder.pokemon
[ 2, 781, 539, 23, 25, 645, 20402, 198, 11748, 47320, 72, 324, 65, 13, 9945, 3547, 263, 13, 9641, 198, 11748, 47320, 72, 324, 65, 13, 9945, 3547, 263, 13, 4906, 198, 11748, 47320, 72, 324, 65, 13, 9945, 3547, 263, 13, 1799, 198, 1...
2.594203
69
#!/usr/bin/python3 import sys import os import io from orderedset import OrderedSet from shell import Shell import logpath as LogPath VERSION = '1.1' HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' current_path = os.getcwd() current_meta = '' if current_path.endswith('meta-signage') == True: current_meta = 'signage' elif current_path.endswith('meta-commercial') == True: current_meta = 'commercial' elif current_path.endswith('meta-id') == True: current_meta = 'id' else: print('You should execute this file in [%smeta-id, meta-commercial, meta-signage%s] path' % (WARNING, ENDC)) exit() if __name__ == '__main__': generater = GeneratorLog() generater.parseLog() generater.makeLog() InputHelper().process(generater.listLogs)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 11748, 25064, 198, 11748, 28686, 198, 11748, 33245, 198, 6738, 6149, 2617, 1330, 14230, 1068, 7248, 198, 6738, 7582, 1330, 17537, 198, 11748, 2604, 6978, 355, 5972, 15235, 198, 198, 43717, ...
2.510029
349
# These were used when I was trying to map between controllers # To map to a wheel - but was defeated in that by using a driver # 2021 comment (What did I mean there?) GAMEPAD_TRIANGLE = (0, 0x08) GAMEPAD_CIRCLE = (0, 0x04) GAMEPAD_CROSS = (0, 0x02) GAMEPAD_SQUARE = (0, 0x01) GAMEPAD_DPAD_MASK = 0x0F GAMEPAD_DPAD_NONE = (2, 0x0F) GAMEPAD_DPAD_U = (2, 0x00) GAMEPAD_DPAD_R = (2, 0x02) GAMEPAD_DPAD_D = (2, 0x04) GAMEPAD_DPAD_L = (2, 0x06) GAMEPAD_PSMENU = (1, 0x10) GAMEPAD_SELECT = (1, 0x01) GAMEPAD_START = (1, 0x02) GAMEPAD_LJOY_BUTTON = (1, 0x04) GAMEPAD_RJOY_BUTTON = (1, 0x08) GAMEPAD_L1 = (0, 0x10) GAMEPAD_R1 = (0, 0x20) GAMEPAD_L2 = (0, 0x40) GAMEPAD_R2 = (0, 0x80) GAMEPAD_RTRIGGER = 18 GAMEPAD_LTRIGGER = 17 # These are Bytes not Bits GAMEPAD_LJOY_X = 3 GAMEPAD_LJOY_Y = 4 GAMEPAD_RJOY_X = 5 GAMEPAD_RJOY_Y = 6 CLICKER_BUTTONS = 2 CLICKER_LEFT = [0x4B] CLICKER_RIGHT = [0x4E] CLICKER_UP = [0x05] CLICKER_DOWN = [0x3E, 0x29] # Toggles STEER_MIN = 0x0000 STEER_MAX = 0x3FFF STEER_MID = 0x1FFF WHEEL_NEUTRAL = [0x08, 0x00, 0x00, 0x5E, 0x00, 0x20, 0x7F, 0xFF] WHEEL_TRIANGLE = (0, 0x80) WHEEL_CIRCLE = (0, 0x40) WHEEL_CROSS = (0, 0x10) WHEEL_SQUARE = (0, 0x20) WHEEL_DPAD_MASK = 0x0F WHEEL_DPAD_NONE = (0, 0x08) WHEEL_DPAD_U = (0, 0x00) WHEEL_DPAD_R = (0, 0x02) WHEEL_DPAD_D = (0, 0x04) WHEEL_DPAD_L = (0, 0x06) WHEEL_RPADDLE = (1, 0x01) WHEEL_LPADDLE = (1, 0x02) WHEEL_L1 = (1, 0x80) WHEEL_L2 = (1, 0x08) WHEEL_R1 = (1, 0x40) WHEEL_R2 = (1, 0x04) WHEEL_SELECT = (1, 0x10) WHEEL_START = (1, 0x20) WHEEL_PSMENU = (2, 0x08) WHEEL_GEARUP = (2, 0x01) WHEEL_GEARDOWN = (2, 0x02) WHEEL_BACK = (2, 0x04) WHEEL_ADJUST_CLOCKWISE = (2, 0x10) WHEEL_ADJUST_ANTICLOCKWISE = (2, 0x20) WHEEL_PLUS = (2, 0x80) WHEEL_MINUS = (2, 0x40) # Bytes WHEEL_WHEEL_HIGHBYTE = 5 WHEEL_WHEEL_LOWBYTE = 4 # 0000-EFF3 But 0000 is extreme WHEEL_ACCELERATEBYTE = 6 # 0-FF 0 IS DOWN WHEEL_BRAKEBYTE = 7 # 0-FF 0 IS DOWN # (FromByte,From Bit) -> (ToByte,ToBit) # Wheel Has dedicated Gear buttons and Shifter that arent on the controller # Stick Click is not used in TDU2 at all so will use that BUTTON_MAPPINGS = [ (GAMEPAD_TRIANGLE, WHEEL_TRIANGLE), (GAMEPAD_CIRCLE, WHEEL_CIRCLE), (GAMEPAD_SQUARE, WHEEL_SQUARE), (GAMEPAD_CROSS, WHEEL_CROSS), (GAMEPAD_R1, WHEEL_R2), (GAMEPAD_L1, WHEEL_L2), (GAMEPAD_PSMENU, WHEEL_PSMENU), (GAMEPAD_START, WHEEL_START), (GAMEPAD_SELECT, WHEEL_SELECT), (GAMEPAD_LJOY_BUTTON, WHEEL_GEARDOWN), (GAMEPAD_RJOY_BUTTON, WHEEL_GEARUP), ] #These made it work in PS3 menu screen XMB_BUTTON_MAPPINGS = [ (GAMEPAD_TRIANGLE, WHEEL_TRIANGLE), (GAMEPAD_CIRCLE, WHEEL_CIRCLE), (GAMEPAD_CROSS, WHEEL_SQUARE), (GAMEPAD_SQUARE, WHEEL_CROSS), (GAMEPAD_R1, WHEEL_R2), (GAMEPAD_L1, WHEEL_L2), (GAMEPAD_PSMENU, WHEEL_PSMENU), (GAMEPAD_START, WHEEL_START), (GAMEPAD_SELECT, WHEEL_SELECT), (GAMEPAD_LJOY_BUTTON, WHEEL_GEARDOWN), (GAMEPAD_RJOY_BUTTON, WHEEL_GEARUP), ] DPAD_MAPPINGS = [ (GAMEPAD_DPAD_NONE, WHEEL_DPAD_NONE), (GAMEPAD_DPAD_U, WHEEL_DPAD_U), (GAMEPAD_DPAD_D, WHEEL_DPAD_D), (GAMEPAD_DPAD_L, WHEEL_DPAD_L), (GAMEPAD_DPAD_R, WHEEL_DPAD_R), ] STEAM_BUTTON_MAPPINGS = [ WHEEL_CROSS,WHEEL_CIRCLE,WHEEL_TRIANGLE,WHEEL_SQUARE, WHEEL_START,WHEEL_PSMENU,WHEEL_SELECT, WHEEL_GEARUP,WHEEL_GEARDOWN,WHEEL_L1,WHEEL_R1 ] STEAM_BUTTONS2_MAPPINGS = [WHEEL_LPADDLE,WHEEL_RPADDLE,WHEEL_PLUS,WHEEL_MINUS] STEAM_DPAD_MAPPINGS = [ WHEEL_DPAD_U,WHEEL_DPAD_L,WHEEL_DPAD_D,WHEEL_DPAD_R]
[ 2, 2312, 547, 973, 618, 314, 373, 2111, 284, 3975, 1022, 20624, 198, 2, 1675, 3975, 284, 257, 7825, 532, 475, 373, 9772, 287, 326, 416, 1262, 257, 4639, 198, 2, 33448, 2912, 357, 2061, 750, 314, 1612, 612, 10091, 198, 198, 38, 239...
1.737024
2,023
# Copyright (c) 2017 Huawei, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.template import defaultfilters as filters from django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy as _ from horizon import tables from conveyordashboard.common import actions as common_actions from conveyordashboard.common import constants as consts from conveyordashboard.common import resource_state def get_size(volume): return _("%sGiB") % volume.size def get_volume_type(volume): return volume.volume_type if volume.volume_type != "None" else None def get_encrypted_value(volume): if not hasattr(volume, 'encrypted') or volume.encrypted is None: return _("-") elif volume.encrypted is False: return _("No") else: return _("Yes")
[ 2, 15069, 357, 66, 8, 2177, 43208, 11, 3457, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 198, 2, 220, 220, 220, 407, 779, 428, 2393, 2845, 287...
3.225653
421
import requests import re import sqlite3 db =sqlite3.connect("db_tver.db",check_same_thread =False) cur = db.cursor() cur.execute( '''CREATE TABLE if not exists `tver` ( `reference_id` TEXT NOT NULL, `service` TEXT NOT NULL, `player_id` TEXT NOT NULL, `name` TEXT NOT NULL, `title` TEXT, `subtitle` TEXT, `catchup_id` TEXT, `url` TEXT, `service_name` TEXT, `id` TEXT NOT NULL, `json` TEXT, `updated_at` TIMESTAMP, `done` BOOL, UNIQUE (reference_id,player_id,id) );''') ''' /corner/ /episode/ /feature/ ''' pagepattern = re.compile(r'''addPlayer\(\s*?'(?P<player_id>.*?)',\s*?'(?P<player_key>.*?)',\s*?'(?P<catchup_id>.*?)',\s*?'(?P<publisher_id>.*?)',\s*?'(?P<reference_id>.*?)',\s*?'(?P<title>.*?)',\s*?'(?P<subtitle>.*?)',\s*?'(?P<service>.*?)',\s*?'(?P<servicename>.*?)',''') policykeypattern = re.compile(r'''catalog\(\{accountId:\"?(?P<accountId>.*?)\"?,policyKey:\"(?P<policyKey>.*?)\"''') BCOV_POLICY = { #YTV "5330942432001":"BCpkADawqM0kGrWxZoXJvJj5Uv6Lypjp4Nrwzz1ktDAuEbD1r_pj0oR1900CRG04FFkxo0ikc1_KmAlB4uvq_GnFwF4IsG_v9jhYOMajC9MkdVQ-QrpboS7vFV8RvK20V5v-St5WGPfXotPx", #TX "3971130137001":"BCpkADawqM1F2YPxbuFJzWtohXjxdgDgIJcsnWacQKaAuaf0gyu8yxCQUlca9Dh7V0Uu_8Rt5JUWZTpgcqzD_IT5hRVde8JIR7r1UYR73ne8S9iLSroqTOA2P-jtl2EUw_OrSMAtenvuaXRF", #TBS "4031511847001":"BCpkADawqM1n_azNkrwm-kl2UhijTLt4W7KZ6KS9HluAoLPvyRFu2X4Xu2dUuW-lLOmc6X7WjsiBwh83m8ecNmxl-pVy9w3M9iI6-en-_wIDvNJixpoMf4BhdOPtwO_7XIol9P3wVrq2BIzw", "4394098881001":"BCpkADawqM3m-3484dphPL5raj3jQJVlFecOYAvpxhtJaK99BVRKtxd9SC6q0kOsknI1FD3kplVUaJzneAQb55EkCcDHrD9m_yoesmjsIfJpKQXJKfmQ5LfAFJnmf2Sv48heP_R1PGznwbAn", #NTV "4394098882001":"BCpkADawqM1s6XkqRoC2a0eEORY7FFF780eHkHQZ93Fw752A9swymrSMZEVF1d7G3mSby3Etzj8MGJp_ZwXpbSTH1ApfZxZ1FSPQ4LXDQhpaMRADtCbxKFTpAxGYwN61DYKKksmg4uwcdhLD", #MBS "5102072605001":"BCpkADawqM1VhDl0FtgrrM8jB-hVNkcrdrx4x9C_60OSeN4jIHynGkIKw0PY1cOsRqQYJOnJRscPAbdPTcpzZ_4g89Gcte_yQFW-yeWxzrPECulIh9ZlaZsJ_3rH7Gjs_RnuWHx_lTzilaxh", #KTV "5718741494001":"BCpkADawqM1llDtMelQ9nQyE91bAc-E5T1B0135MCCRZ_o4FlDkGWQY8t8Nrt1fJKAgui-kLefX-JGaRItrDXh_C1GlIgCSv-rhNPQYKJsY8nZp_IoJ38Mf3B5BSJLFulW0QhgQduipc9j4D", #EX no publisherid "4031511847001":"BCpkADawqM2N0e6IdrmQn-kEZJ0jRi-Dlm0aUZ9mVF2lcadunJzMVYD6j_51UZzQ3mXuIeV8Zx_UUvbGeeJn73SSrpm0xD7qtiKULPP2NEsp_rgKoVxVWTNZAHN-JAHcuIpFJT7PvUj6gpZv", #ABC "5102072603001":"BCpkADawqM2NfzEA47jZiJNK0SYahFenNwAtoehfrIAaCqxmHjBidnt_YfvFnp5j-Zi58FPj-zXAHATYU1nnOOuEf9XXV8JRGYSuZ5dgyNc2RjGv2Ej5zGfhxWs3_p4F7huxtbAD9fzQlg7b", #World cup "5764318572001":"BCpkADawqM3KJLCLszoqY9KsoXN2Mz52LwKx4UXYRuEaUGr-o3JBSHmz_0WRicxowBj8vmbGRK_R7Us96DdBYuYEoVX9nHJ3DjkVW5-8L6bRmm6gck8IaeLLw21sM6mOHtNs9pIJPF6a4qSZlO6t_RlkpMY6sasaIaSYlarJ_8PFMPdxxfY6cGtJDnc" } linkPattern = re.compile(r'''(\/episode\/.*?)\/?\"|(\/corner\/.*?)\/?\"|(\/feature\/.*?)\/?\"''') findAll() findAllByBrand() # updateJson()
[ 11748, 7007, 198, 11748, 302, 198, 11748, 44161, 578, 18, 198, 198, 9945, 220, 796, 25410, 578, 18, 13, 8443, 7203, 9945, 62, 83, 332, 13, 9945, 1600, 9122, 62, 31642, 62, 16663, 220, 796, 25101, 8, 198, 22019, 796, 20613, 13, 66, ...
1.588364
1,822
from pathlib import Path from click.testing import CliRunner from resolos.interface import res, res_run from resolos.shell import run_shell_cmd from tests.common import verify_result import logging logger = logging.getLogger(__name__)
[ 6738, 3108, 8019, 1330, 10644, 198, 6738, 3904, 13, 33407, 1330, 1012, 72, 49493, 198, 6738, 581, 349, 418, 13, 39994, 1330, 581, 11, 581, 62, 5143, 198, 6738, 581, 349, 418, 13, 29149, 1330, 1057, 62, 29149, 62, 28758, 198, 6738, 5...
3.449275
69
import os import sys op_name = [] with open("name.txt") as lines: for line in lines: line = line.strip() op_name.append(line) with open("shape.txt") as lines: index = 0 for line in lines: name = op_name[index] line = line.strip() items = line.split("\t") if "conv" in name: input_shape = [int(s) for s in items[0].split("#")[0].split("[")[1].split("]")[0].split(",")] weight_shape = [int(s) for s in items[0].split("#")[1].split("[")[1].split("]")[0].split(",")] output_shape = [int(s) for s in items[1].split("[")[1].split("]")[0].split(",")] flops = output_shape[0] * output_shape[1] * output_shape[2] * output_shape[3] * weight_shape[0] * weight_shape[1] * weight_shape[2] * 2 elif "add" in name: output_shape = [int(s) for s in items[1].split("[")[1].split("]")[0].split(",")] flops = output_shape[0] * output_shape[1] * output_shape[2] * output_shape[3] else: flops = 0 print flops index+=1
[ 11748, 28686, 198, 11748, 25064, 198, 198, 404, 62, 3672, 796, 17635, 198, 4480, 1280, 7203, 3672, 13, 14116, 4943, 355, 3951, 25, 198, 197, 1640, 1627, 287, 3951, 25, 198, 197, 197, 1370, 796, 1627, 13, 36311, 3419, 198, 197, 197, ...
2.285024
414
# Wrapper for Mesh Bee library # helping to easier communicate with Mesh Bee module # # Copyright (C) 2014 at seeedstudio # Author: Jack Shao (jacky.shaoxg@gmail.com) # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import os import re import glob import binascii import logging from pan.mesh_bee import * from factory import Factory Factory.register(MeshBeeWrapper)
[ 2, 220, 220, 27323, 2848, 329, 47529, 24719, 5888, 198, 2, 220, 220, 5742, 284, 4577, 10996, 351, 47529, 24719, 8265, 198, 2, 198, 2, 220, 220, 15069, 357, 34, 8, 1946, 379, 766, 276, 19149, 952, 198, 2, 220, 220, 6434, 25, 3619, ...
3.447619
420
import unittest import json import requests from unittest import mock from .reqhandler import send_req from pythonapm.agent import Agent from pythonapm import constants
[ 11748, 555, 715, 395, 220, 198, 11748, 33918, 198, 11748, 7007, 198, 6738, 555, 715, 395, 1330, 15290, 198, 6738, 764, 42180, 30281, 1330, 3758, 62, 42180, 198, 6738, 21015, 499, 76, 13, 25781, 1330, 15906, 198, 6738, 21015, 499, 76, ...
3.804348
46
from django.contrib import admin # Register your models here. from .models import * # To import all the model from .models, then specify those in register admin.site.register(Customer) admin.site.register(Staff) admin.site.register(Service) admin.site.register(Appointment)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 198, 2, 17296, 534, 4981, 994, 13, 198, 198, 6738, 764, 27530, 1330, 220, 1635, 1303, 1675, 1330, 477, 262, 2746, 422, 764, 27530, 11, 788, 11986, 883, 287, 7881, 198, 198, 28482, ...
3.597403
77
worlds_data = { 'around': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(1, 9): 1, (2, 10): 1, (8, 10): 1, (10, 10): 1, (9, 10): 1, (5, 10): 1, (10, 8): 1, (10, 4): 1, (10, 1): 1, (8, 1): 1, (7, 1): 1, (6, 1): 1, (5, 1): 1, (3, 1): 1, (1, 6): 1, (1, 5): 1, (1, 3): 1}}, 'around2': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(2, 1): 2, (3, 1): 3, (5, 1): 2, (7, 1): 1, (10, 1): 1, (10, 4): 3, (10, 3): 1, (10, 7): 2, (10, 6): 1, (10, 10): 4, (10, 9): 3, (9, 10): 1, (7, 10): 2, (5, 10): 1, (4, 10): 1, (3, 10): 1, (2, 10): 1, (1, 10): 2, (1, 8): 1, (1, 6): 4, (1, 5): 1, (1, 3): 3, (1, 2): 1}}, 'around3': {'avenues': 6, 'streets': 6, 'walls': [], 'beepers': {(2, 1): 2, (3, 1): 1, (6, 1): 1, (6, 2): 3, (6, 3): 1, (6, 6): 2, (4, 6): 3, (1, 6): 1, (1, 4): 2, (1, 3): 1, (1, 2): 1}}, 'cave': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (4, 1), (5, 4), (2, 5), (3, 6), (5, 6), (6, 3), (6, 1), (8, 1), (8, 3), (9, 4), (10, 3), (11, 2), (1, 8), (3, 8), (5, 8), (7, 8), (8, 7), (14, 1), (14, 3), (13, 4), (11, 6), (12, 7), (13, 8), (14, 7), (14, 5), (9, 8)], 'beepers': {(6, 5): 1}}, 'cave2': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (4, 1), (4, 3), (4, 5), (3, 6), (1, 8), (3, 8), (5, 8), (6, 7), (7, 8), (9, 8), (10, 7), (9, 6), (8, 5), (8, 1), (10, 1), (10, 3), (7, 4), (6, 3)], 'beepers': {(6, 3): 1}}, 'cave3': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (1, 4), (5, 2), (6, 1), (3, 4), (5, 6), (3, 6), (2, 5), (6, 3), (7, 6), (8, 5), (8, 1), (9, 2), (12, 1), (12, 3), (12, 5), (9, 4), (12, 7), (11, 8), (11, 6), (9, 8), (7, 8), (5, 8), (3, 8)], 'beepers': {(1, 5): 4, (2, 2): 2, (3, 3): 3, (4, 2): 1, (6, 2): 1, (5, 4): 1, (1, 4): 3}}, 'cave4': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (1, 4), (3, 2), (5, 2), (3, 4), (5, 6), (6, 5), (7, 4), (8, 3), (8, 1), (2, 5), (1, 8), (3, 8), (5, 8), (7, 8), (9, 8), (9, 6), (10, 5), (11, 8), (12, 7), (12, 5), (11, 4), (12, 1), (10, 3)], 'beepers': {(3, 2): 1, (2, 4): 3, (4, 4): 3, (7, 2): 4}}, 'chimney': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (2, 5), (2, 7), (2, 9), (2, 11), (4, 11), (4, 9), (4, 7), (4, 5), (4, 3), (3, 12), (5, 2), (6, 3), (6, 5), (7, 6), (8, 5), (8, 3), (9, 2), (11, 2), (12, 3), (12, 5), (12, 7), (12, 9), (13, 10), (14, 9), (14, 7), (14, 5), (14, 3), (15, 2), (16, 3), (16, 5), (16, 7), (16, 9), (16, 11), (16, 13), (16, 15), (17, 16), (18, 15), (18, 13), (18, 11), (18, 9), (18, 7), (18, 5), (18, 3), (19, 2)], 'beepers': {(2, 6): 1, (2, 5): 1, (2, 4): 2, (2, 2): 1, (9, 7): 1, (9, 5): 2, (9, 4): 3, (4, 3): 5, (7, 2): 1, (7, 4): 3, (7, 3): 1, (7, 5): 1}}, 'chimney2': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (2, 5), (2, 7), (3, 8), (4, 7), (4, 5), (4, 3), (4, 9), (4, 11), (4, 13), (4, 15), (5, 16), (6, 15), (6, 13), (6, 11), (6, 9), (6, 7), (6, 5), (6, 3), (7, 2), (8, 3), (10, 3), (11, 2), (13, 2), (14, 3), (16, 3), (18, 3), (17, 2), (18, 5), (18, 7), (18, 9), (18, 11), (18, 13), (18, 15), (19, 16), (15, 4), (8, 5), (10, 5), (10, 11), (9, 12), (8, 11), (8, 9), (10, 9), (10, 7), (8, 7)], 'beepers': {(3, 8): 2, (8, 2): 3, (2, 3): 2, (2, 4): 1, (3, 3): 3, (3, 2): 2, (3, 5): 3, (3, 6): 1, (5, 2): 2, (5, 6): 1, (10, 7): 2}}, 'chimney3': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (2, 5), (2, 7), (3, 8), (4, 7), (4, 5), (4, 3), (4, 9), (4, 11), (5, 12), (6, 11), (6, 9), (6, 7), (6, 5), (6, 3), (7, 2), (9, 2), (10, 3), (10, 5), (10, 7), (11, 8), (12, 9), (12, 11), (13, 12), (14, 11), (14, 9), (15, 8), (16, 9), (16, 11), (16, 15), (16, 13), (16, 17), (18, 17), (18, 15), (18, 13), (18, 11), (18, 9), (19, 8), (13, 2), (15, 2), (17, 2), (19, 2), (13, 4), (15, 4), (17, 4), (19, 4), (13, 6), (15, 6), (17, 6), (19, 6), (17, 18)], 'beepers': {(3, 2): 1, (2, 3): 3, (2, 4): 2, (3, 4): 6, (3, 5): 1, (7, 6): 5, (7, 5): 1, (9, 5): 3, (9, 7): 2}}, 'mine': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2)], 'beepers': {(2, 1): 1, (3, 1): 1, (5, 1): 1, (8, 1): 1, (10, 1): 1}}, 'mine2':{'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2)], 'beepers': {(2, 1): 2, (3, 1): 2, (6, 1): 3, (5, 1): 1, (8, 1): 1, (10, 1): 4}}, 'mine3': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2)], 'beepers': {(10, 1): 5, (9, 1): 1, (8, 1): 3, (6, 1): 2, (1, 1): 2, (2, 1): 1, (3, 1): 3}}, 'mine4': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (6, 3), (7, 4), (8, 1), (9, 2), (11, 2), (12, 1), (9, 4), (11, 4), (13, 4), (14, 3), (15, 2), (17, 2), (19, 2)], 'beepers': {(10, 1): 2, (8, 1): 3, (7, 2): 1, (7, 1): 1, (4, 2): 6, (5, 2): 1, (4, 1): 1, (3, 1): 2}}, 'mine5': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (6, 3), (9, 2), (8, 1), (10, 1), (7, 4), (9, 4), (11, 4), (12, 3), (13, 2), (14, 3), (14, 5), (14, 7), (15, 8), (17, 8), (19, 8), (17, 6), (16, 5), (18, 5), (19, 4), (16, 3), (16, 1)], 'beepers': {(10, 3): 1, (2, 1): 2, (4, 1): 3, (5, 2): 2, (7, 1): 3, (8, 2): 4, (8, 3): 1, (8, 4): 2}}, 'stairs': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (16, 15), (17, 16), (18, 17), (19, 18)], 'beepers': {(10, 10): 1}}, 'stairs2': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (3, 2), (5, 2), (6, 3), (7, 4), (8, 5), (9, 6), (11, 6), (12, 7), (13, 8), (14, 9), (15, 10), (17, 10), (18, 11), (19, 12)], 'beepers': {(10, 7): 1}}, 'stairs3': {'avenues': 10, 'streets': 10, 'walls': [(4, 1), (5, 2), (6, 3), (7, 4), (9, 4), (11, 4), (12, 5), (13, 6), (14, 7), (15, 8), (17, 8), (18, 9), (19, 10)], 'beepers': {(10, 6): 1}}, 'stairs4': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (3, 2), (4, 3), (5, 4), (7, 4), (9, 4), (11, 4), (12, 5), (13, 6), (15, 6), (16, 7), (17, 8), (18, 9), (19, 10)], 'beepers': {(4, 3): 1}}, 'coins': {'avenues': 10, 'streets': 10, 'walls': [(3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2), (2, 3), (2, 5), (2, 7), (2, 9), (2, 11), (2, 13), (2, 15), (2, 17), (2, 19)], 'beepers': {(2, 1): 1, (4, 1): 3, (5, 1): 2, (8, 1): 3, (7, 1): 6, (1, 2): 3, (1, 10): 1, (1, 8): 3, (1, 9): 1, (1, 4): 1}}, 'coins2': {'avenues': 10, 'streets': 10, 'walls': [(2, 19), (2, 17), (2, 15), (2, 13), (2, 11), (2, 9), (2, 7), (2, 5), (2, 3), (3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2)], 'beepers': {(6, 1): 1, (7, 1): 1, (5, 1): 2, (10, 1): 3, (2, 1): 1, (1, 2): 3, (1, 3): 2, (1, 6): 4, (1, 10): 7}}, 'news': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (4, 3), (5, 4), (6, 3), (7, 2), (8, 3), (9, 4), (10, 3), (11, 2), (13, 2), (14, 3), (15, 4), (16, 3), (17, 2), (19, 2)], 'beepers': {}}, 'news2': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (3, 4), (4, 3), (5, 2), (6, 3), (7, 4), (8, 3), (9, 2), (10, 3), (11, 4), (12, 3), (15, 2), (17, 2), (13, 2), (18, 3), (19, 4)], 'beepers': {}}, 'news3': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 4), (4, 3), (6, 3), (7, 4), (8, 3), (9, 4), (10, 3), (11, 4), (12, 3), (13, 2), (14, 3), (15, 4), (16, 3), (17, 4), (18, 3), (19, 2)], 'beepers': {}}, 'read': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(10, 1): 7}}, 'read2': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(9, 1): 2, (10, 1): 4, (8, 1): 3}}, 'read3': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(6, 1): 2, (8, 1): 3, (9, 1): 1, (10, 1): 7}}, 'hurdles1': { 'avenues': 10, 'streets': 10, 'walls': [(4, 1), (8, 1), (12, 1), (16, 1)], 'beepers': {(10, 1): 1}, }, 'hurdles2': { 'avenues': 10, 'streets': 10, 'walls': [(4, 1), (8, 1), (12, 1), (16, 1)], 'beepers': {(7, 1): 1}, }, 'hurdles3': { 'avenues': 10, 'streets': 10, 'walls': [(4, 1), (8, 1), (16, 1), (2, 1), (10, 1), (18, 1), (12, 1)], 'beepers': {(10, 1): 1}, }, 'beepers1': { 'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(3, 1): 1}, }, 'corner3_4': { 'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {}, }, 'rain1': { 'avenues': 10, 'streets': 10, 'walls': [(5, 6), (4, 7), (4, 9), (4, 13), (4, 15), (5, 16), (9, 16), (13, 16), (15, 16), (16, 15), (16, 11), (16, 9), (16, 7), (15, 6), (11, 6), (7, 6)], 'beepers': {}, }, 'newspaper': { 'avenues': 10, 'streets': 10, 'walls': [(4, 1), (5, 2), (7, 2), (8, 3), (9, 4), (11, 4), (12, 5), (13, 6), (15, 6), (16, 7), (17, 8), (19, 8)], 'beepers': {}, }, 'hurdles4': { 'avenues': 10, 'streets': 10, 'walls': [(4, 1), (8, 1), (16, 1), (2, 1), (10, 1), (18, 1), (12, 1), (4, 3), (10, 3), (10, 5)], 'beepers': {(10, 1): 1}, }, 'frank18': { 'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(7, 4): 1, (3, 7): 2, (7, 1): 19, (6, 6): 2, (3, 4): 2}, }, 'rain2': { 'avenues': 12, 'streets': 9, 'walls': [(5, 6), (7, 6), (11, 6), (13, 6), (15, 6), (16, 5), (17, 4), (21, 4), (22, 5), (22, 9), (22, 11), (22, 15), (21, 16), (19, 16), (15, 16), (13, 16), (9, 16), (5, 16), (4, 15), (4, 13), (4, 9), (4, 7)], 'beepers': {}, }, 'wrong': { 'avenues': 10, 'streets': 10, 'walls': [10, (10, 3), (10, 5), (1, 10), (3, 10), (5, 10), (2, 1), (2, 3), (1, 6), (3, 6), (4, 5), (4, 3), (5, 2), (6, 3), (7, 8), (5, 8), (2, 7), (7, 10), (8, 7), (9, 6), (8, 3), (9, 4), (9, 10), (10, 9)], 'beepers': {(6, 4): 1}, }, 'hanoi3': { 'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(2, 1): 3, (2, 2): 2, (2, 3): 1}, }, 'fairy_tale': { 'avenues': 14, 'streets': 8, 'walls': [(1, 10), (3, 10), (4, 9), (5, 8), (6, 7), (9, 8), (11, 8), (12, 7), (12, 5), (12, 3), (12, 1)], 'beepers': {}, }, 'hanoi4': { 'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(2, 4): 1, (2, 1): 4, (2, 2): 3, (2, 3): 2}, }, 'empty': { 'avenues': 8, 'streets': 8, 'walls': [], 'beepers': {}, }, 'trash1': { 'avenues': 10, 'streets': 10, 'walls': [(3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2), (1, 4), (2, 3)], 'beepers': {(6, 1): 1, (3, 1): 3, (5, 1): 1, (10, 1): 2, (7, 1): 2}, }, 'trash2': { 'avenues': 10, 'streets': 10, 'walls': [(3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2), (1, 4), (2, 3)], 'beepers': {(9, 1): 1, (5, 1): 13, (2, 1): 2, (7, 1): 2}, }, 'trash3': { 'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(1, 2): 18, (7, 3): 4, (4, 8): 1, (5, 6): 7, (7, 1): 4, (9, 2): 11, (8, 8): 1, (1, 10): 3, (2, 5): 3, (5, 8): 2, (7, 9): 2}, }, 'trash4': { 'avenues': 11, 'streets': 10, 'walls': [], 'beepers': {(6, 9): 3, (1, 3): 2, (9, 8): 2, (10, 6): 1, (5, 1): 2, (1, 11): 2, (10, 3): 1, (5, 5): 2, (2, 9): 1, (6, 10): 2, (1, 5): 1, (2, 2): 1, (8, 6): 2, (4, 10): 1, (8, 2): 1, (8, 11): 2, (9, 10): 3, (4, 11): 1, (2, 7): 1, (4, 6): 1, (9, 2): 1, (3, 4): 3, (5, 7): 1, (3, 8): 3, (7, 8): 5}, }, 'amazing3a': { 'avenues': 7, 'streets': 7, 'walls': [(2, 1), (3, 2), (5, 2), (6, 3), (6, 5), (6, 7), (6, 9), (6, 11), (6, 13)], 'beepers': {(1, 2): 1, (2, 7): 1, (3, 2): 1, (1, 3): 1, (3, 3): 1, (1, 7): 1, (1, 4): 1, (2, 4): 1, (1, 5): 1, (2, 6): 1, (1, 6): 1, (3, 6): 1, (2, 2): 1, (2, 3): 1, (3, 7): 1, (2, 5): 1, (3, 4): 1, (1, 1): 1, (3, 5): 1}, }, 'yardwork': { 'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(1, 2): 18, (7, 3): 4, (4, 8): 1, (5, 6): 7, (7, 1): 4, (9, 2): 11, (8, 8): 1, (1, 10): 3, (2, 5): 3, (5, 8): 2, (7, 9): 2}, }, 'sort1': { 'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(1, 2): 1, (1, 3): 1, (2, 2): 1, (1, 4): 1, (2, 4): 1, (1, 5): 1, (1, 6): 1, (2, 1): 1, (1, 7): 1, (2, 3): 1, (2, 5): 1, (1, 1): 1}, }, 'harvest4': { 'avenues': 7, 'streets': 7, 'walls': [], 'beepers': {(7, 3): 1, (6, 6): 1, (5, 6): 1, (3, 2): 1, (2, 1): 1, (6, 2): 1, (5, 1): 2, (2, 5): 1, (7, 2): 1, (5, 5): 1, (7, 6): 1, (4, 4): 1, (3, 6): 1, (2, 2): 2, (3, 5): 1, (4, 1): 1, (6, 4): 1, (5, 4): 1, (7, 1): 1, (4, 5): 1, (2, 3): 1, (4, 2): 1, (6, 5): 2, (5, 3): 2, (4, 6): 1, (6, 1): 1, (7, 4): 1, (4, 3): 1, (3, 4): 2, (2, 4): 1}, }, 'amazing5': { 'avenues': 7, 'streets': 7, 'walls': [(3, 2), (6, 5), (6, 7), (6, 9), (6, 11), (6, 13), (4, 1), (2, 3), (3, 4), (5, 4)], 'beepers': {}, }, 'maze1': { 'avenues': 10, 'streets': 10, 'walls': [(10, 1), (10, 3), (10, 5), (1, 10), (3, 10), (5, 10), (2, 1), (2, 3), (1, 6), (3, 6), (4, 5), (4, 3), (5, 2), (6, 3), (7, 8), (5, 8), (2, 7), (7, 10), (8, 7), (9, 6), (8, 3), (9, 4), (9, 10), (10, 9)], 'beepers': {(6, 4): 1}, }, 'harvest1': { 'avenues': 7, 'streets': 7, 'walls': [], 'beepers': {(3, 3): 1, (3, 2): 1, (3, 1): 1, (5, 6): 1, (5, 1): 1, (3, 6): 1, (5, 3): 1, (5, 2): 1, (7, 6): 1, (7, 5): 1, (7, 4): 1, (7, 3): 1, (7, 2): 1, (7, 1): 1, (3, 5): 1, (3, 4): 1, (2, 4): 1, (2, 5): 1, (2, 6): 1, (2, 1): 1, (2, 2): 1, (2, 3): 1, (4, 6): 1, (4, 4): 1, (4, 5): 1, (4, 2): 1, (4, 3): 1, (4, 1): 1, (6, 1): 1, (6, 2): 1, (6, 3): 1, (6, 4): 1, (6, 5): 1, (6, 6): 1, (5, 5): 1, (5, 4): 1}, }, 'amazing1': { 'avenues': 5, 'streets': 5, 'walls': [], 'beepers': {}, }, 'harvest2': { 'avenues': 12, 'streets': 12, 'walls': [], 'beepers': {(7, 3): 1, (6, 10): 1, (6, 6): 1, (2, 8): 1, (10, 6): 1, (7, 7): 1, (4, 6): 1, (6, 2): 1, (7, 11): 1, (3, 7): 1, (10, 8): 1, (5, 5): 1, (4, 4): 1, (8, 10): 1, (4, 8): 1, (8, 6): 1, (5, 3): 1, (9, 7): 1, (4, 10): 1, (2, 6): 1, (5, 11): 1, (5, 9): 1, (7, 5): 1, (6, 12): 1, (6, 4): 1, (3, 5): 1, (11, 7): 1, (6, 8): 1, (5, 7): 1, (9, 9): 1, (8, 8): 1, (7, 9): 1, (1, 7): 1, (9, 5): 1, (3, 9): 1, (8, 4): 1}, }, 'amazing3': { 'avenues': 7, 'streets': 7, 'walls': [(2, 1), (3, 2), (5, 2), (6, 3), (6, 5), (6, 7), (6, 9), (6, 11), (6, 13)], 'beepers': {}, }, 'amazing2': { 'avenues': 7, 'streets': 7, 'walls': [(6, 13), (6, 11), (6, 9), (13, 6), (11, 6), (9, 6), (7, 6), (6, 7)], 'beepers': {}, }, 'harvest3': { 'avenues': 7, 'streets': 7, 'walls': [], 'beepers': {(7, 3): 1, (6, 6): 1, (5, 6): 1, (3, 2): 1, (2, 1): 1, (6, 2): 1, (5, 1): 1, (2, 5): 1, (7, 2): 1, (7, 6): 1, (4, 4): 1, (3, 6): 1, (2, 2): 1, (3, 5): 1, (4, 1): 1, (6, 4): 1, (5, 4): 1, (7, 1): 1, (4, 5): 1, (5, 5): 1, (2, 3): 1, (4, 2): 1, (6, 5): 1, (5, 3): 1, (4, 6): 1, (3, 4): 1, (6, 1): 1, (7, 4): 1, (4, 3): 1, (2, 4): 1}, }, 'add1': { 'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(10, 1): 3, (10, 2): 2} }, 'add2': { 'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(9, 2): 1, (9, 1): 2, (10, 1): 2, (10, 2): 3} }, 'add34': { 'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(8, 2): 9, (7, 1): 1, (8, 1): 3, (9, 2): 8, (10, 1): 4, (10, 2): 7} }, }
[ 198, 6894, 82, 62, 7890, 796, 1391, 198, 220, 220, 220, 705, 14145, 10354, 1391, 6, 4005, 947, 10354, 838, 11, 705, 22853, 1039, 10354, 838, 11, 705, 86, 5691, 10354, 685, 4357, 705, 1350, 538, 364, 10354, 1391, 7, 16, 11, 860, 25...
1.679877
9,437
from abc import ABCMeta, abstractproperty, abstractmethod import inspect import random import re import time from requests.exceptions import HTTPError def __call__(self,**kwargs): formatter = dict.fromkeys(self._path_keys) for _path_key, _validator in self._path_keys.items(): _value = kwargs.pop(_path_key,None) if not _validator(_value) : raise RoutePathInvalidException(_path_key, _value, self.path, _validator) formatter[_path_key] = _value _path = self.path if self.path[0] != '/' else self.path[1:] _path = _path.format(**formatter) if self._watcher: self._watcher(str(self),kwargs.pop('info','call')) try: _result = self.session.request(self.httpMethod, _path, **kwargs) self._watcher(str(self),'200') return _result except HTTPError as HE: self._watcher(str(self), str(HE.response)) raise return self.session.request(self.httpMethod, _path, **kwargs) def call_when(self, condition=lambda x:True, call=lambda x: None, step=1, timeout=500, **kwargs): _remaining = timeout if self._watcher: kwargs['info'] = 'call' while _remaining > 0: _remaining = _remaining - step time.sleep(step) _res = self.__call__(**kwargs) if condition(_res) : return call(_res) elif kwargs.get('info', None) == 'call': kwargs['info'] = 'retry' if self._watcher: self._watcher(str(self),'timeout') return None def wait_until(self, condition=lambda x:True, step=1, timeout=60, **kwargs): _remaining = timeout if self._watcher: kwargs['info'] = 'call' while _remaining > 0: _remaining = _remaining - step time.sleep(step) _res = self.__call__(**kwargs) if condition(_res) : return _res elif kwargs.get('info', None) == 'call': kwargs['info'] = 'retry' if self._watcher: self._watcher(str(self),'timeout') return None class Resource(object): __metaclass__ = ABCMeta def __repr__(self): return '{} <{}>'.format(self.__class__.__name__, id(self))
[ 6738, 450, 66, 1330, 9738, 48526, 11, 12531, 26745, 11, 12531, 24396, 198, 11748, 10104, 198, 11748, 4738, 198, 11748, 302, 198, 11748, 640, 198, 6738, 7007, 13, 1069, 11755, 1330, 14626, 12331, 628, 220, 220, 220, 825, 11593, 13345, 83...
2.062016
1,161
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-01-06 00:30 from __future__ import unicode_literals from django.db import migrations
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 319, 2864, 12, 486, 12, 3312, 3571, 25, 1270, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738,...
2.754717
53
from flask import Flask from flask import render_template, redirect, url_for from flask import request import blockChain app = Flask(__name__) if __name__ == '__main__': app.run(debug=True) import hashlib import json import os from time import time BLOCKCHAIN_DIR = os.curdir + '/blocks/' if __name__ == '__main__': # for i in range(10): # write_block(str(i),True) for i in range(2,10): print(check_block(str(i))) print(check_blocks_integrity())
[ 6738, 42903, 1330, 46947, 198, 6738, 42903, 1330, 8543, 62, 28243, 11, 18941, 11, 19016, 62, 1640, 198, 6738, 42903, 1330, 2581, 198, 11748, 2512, 35491, 198, 198, 1324, 796, 46947, 7, 834, 3672, 834, 8, 198, 198, 361, 11593, 3672, 83...
2.596859
191
__author__ = 'TechWhizZ199'
[ 834, 9800, 834, 796, 705, 17760, 1199, 528, 57, 19104, 6 ]
2.454545
11
import os import wget import time import argparse import subprocess import geckodriver_autoinstaller import chromedriver_autoinstaller from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver import FirefoxOptions from selenium.webdriver import DesiredCapabilities from selenium.webdriver import DesiredCapabilities from selenium.common.exceptions import TimeoutException from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.common.keys import Keys from selenium_stealth import stealth from selenium.webdriver.common.desired_capabilities import DesiredCapabilities #def execute_with_retry(method, max_attempts): # e = None # for i in range(0, max_attempts): # try: # return method() # except Exception as e: # print(e) # time.sleep(1) # if e is not None: # raise e if __name__ == '__main__': args = get_args() user = args.user link = args.link url_list = args.url_list if link == None: if url_list == None: print('Please enter an url or an url file!') exit() links = open(url_list, 'r').read().splitlines() else: links = [link] log_file = open('log.txt','w') images_err = open('images_err.txt', 'w') #geckodriver_autoinstaller.install() chromedriver_autoinstaller.install() options = webdriver.ChromeOptions() options.add_argument("--start-maximized") options.add_argument("--user-data-dir=C:\\Users\\{}\\AppData\\Local\\Google\\Chrome\\User Data".format(user)) driver = webdriver.Chrome(options=options) images_folder = 'images' print('Creating folder ' + images_folder + '...!') log_file.write('Creating folder ' + images_folder + '...!\n') os.makedirs(images_folder, exist_ok=True) num_links = len(links) cont = 0 for link in links: dpath = 'images/' + str(cont).zfill(4) os.mkdir(dpath) print('\nDownloading ' + str(cont) + '/' + str(num_links) + '...') log_file.write('Downloading ' + str(cont) + '/' + \ str(num_links) + '...\n') cont += 1 print('Accessing pinterest link: ' + link) log_file.write('Accessing pinterest link: ' + link + '\n') try: driver.get(link) print('Link successfully accessed!') log_file.write('Link successfully accessed!\n') except TimeoutException as e: print('Could not access the link:' + link) log_file.write('Could not access the link:' + link + '\n') #exit() print('Waitning page load...') log_file.write('Waiting page load...\n') time.sleep(10) last_height = driver.execute_script("return document.body.scrollHeight") urls = [] len_urls = 0 change_times = 0 scroll_times = 0 print('Searching images... It can take a long time!') log_file.write('Searching images... It can take a long time!\n') cont_images = 0 while True: link_tags = driver.find_elements_by_tag_name('img') for tag in link_tags: try: url = tag.get_attribute('srcset') url = url.split(' ') if len(url) == 8: url = url[6] urls.append(url) except: continue driver.execute_script("window.scrollBy(0, 50);") scroll_times += 1 if scroll_times == 50: cont_images += len(urls) download_images(urls, dpath) urls = [] new_height = driver.execute_script("return document.body.scrollHeight") if new_height == last_height or cont_images > 20000: break else: last_height = new_height scroll_times = 0 log_file.close() images_err.close()
[ 11748, 28686, 201, 198, 11748, 266, 1136, 201, 198, 11748, 640, 201, 198, 11748, 1822, 29572, 201, 198, 11748, 850, 14681, 201, 198, 11748, 4903, 694, 375, 38291, 62, 23736, 17350, 263, 201, 198, 11748, 15358, 276, 38291, 62, 23736, 173...
2.09299
2,097
from kafka import KafkaProducer import json import random from time import sleep from datetime import datetime # Create an instance of the Kafka producer producer = KafkaProducer(bootstrap_servers='kafka-server:9092', value_serializer=lambda m: json.dumps( m).encode('utf-8'), api_version=(0, 11, 5)) stream_algorithm_str = {"id":"1","import_str": "from sklearn.tree import DecisionTreeClassifier", "alg_str": "DecisionTreeClassifier", "parameters_str": None, "db_training_path": "test_training.csv","db_test_path":"test_test.csv"} producer.send('sk-individual-topic', stream_algorithm_str) # block until all async messages are sent producer.flush()
[ 6738, 479, 1878, 4914, 1330, 46906, 11547, 2189, 198, 11748, 33918, 198, 11748, 4738, 198, 6738, 640, 1330, 3993, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 2, 13610, 281, 4554, 286, 262, 46906, 9920, 198, 18230, 2189, 796, 4690...
2.359517
331
""":mod:`kinsumer.helpers` --- Implements various helpers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ from threading import RLock _missing = object()
[ 15931, 1298, 4666, 25, 63, 5331, 6975, 13, 16794, 364, 63, 11420, 1846, 1154, 902, 2972, 49385, 198, 27156, 27156, 27156, 15116, 93, 198, 198, 37811, 198, 6738, 4704, 278, 1330, 371, 25392, 198, 198, 62, 45688, 796, 2134, 3419, 628, 1...
4.095238
42
import os import tempfile import unittest from py4j.protocol import Py4JJavaError from pyspark.ml import Pipeline from pyspark.sql import types as t from mleap.pyspark.feature.string_map import StringMap from mleap.pyspark.spark_support import SimpleSparkSerializer from tests.pyspark.lib.assertions import assert_df from tests.pyspark.lib.spark_session import spark_session INPUT_SCHEMA = t.StructType([t.StructField('key_col', t.StringType(), False), t.StructField('extra_col', t.StringType(), False)]) OUTPUT_SCHEMA = t.StructType([t.StructField('key_col', t.StringType(), False), t.StructField('extra_col', t.StringType(), False), t.StructField('value_col', t.DoubleType(), False)]) def _serialize_to_file(model, df_for_serializing): jar_file_path = _to_jar_file_path( os.path.join(tempfile.mkdtemp(), 'test_serialize_to_bundle-pipeline.zip')) SimpleSparkSerializer().serializeToBundle(model, jar_file_path, df_for_serializing) return jar_file_path def _to_jar_file_path(path): return "jar:file:" + path def _deserialize_from_file(path): return SimpleSparkSerializer().deserializeFromBundle(path)
[ 11748, 28686, 198, 11748, 20218, 7753, 198, 11748, 555, 715, 395, 198, 198, 6738, 12972, 19, 73, 13, 11235, 4668, 1330, 9485, 19, 41, 29584, 12331, 198, 6738, 279, 893, 20928, 13, 4029, 1330, 37709, 198, 6738, 279, 893, 20928, 13, 254...
2.46
500
from .predict import YolactK from .data import * __version__ = "0.1.0"
[ 6738, 764, 79, 17407, 1330, 575, 349, 529, 42, 198, 6738, 764, 7890, 1330, 1635, 198, 198, 834, 9641, 834, 796, 366, 15, 13, 16, 13, 15, 1, 198 ]
2.482759
29
#!/usr/bin/env python # coding: utf-8 # In[6]: import pandas as pd import io import requests import time import random # In[3]: # gets the hidden API keys api_key = pd.read_csv('secrets.csv').api_key.to_string().split()[1] # In[124]: # gets data using user's parameters def get_data(symbol, interval): """ Signature: get_data(symbol, period) -> 'DataFrame' Docstring: Retrieves market data for the selected symbol and period. Parameters ---------- symbol : str The name of the equity of your choice. For example: symbol=GOOGL. interval : str Time interval between two consecutive data points in the time series. The following values are supported: 1min, 5min, 15min, 30min, 60min. Returns ------- DataFrame Examples -------- >>> get_data('GOOGL', '60min') """ # main url or alphavantage and selection of features from user BASE_URL = 'https://www.alphavantage.co/query?' q = { 'function':'TIME_SERIES_INTRADAY_EXTENDED', 'symbol':symbol, 'interval':interval, 'slice':'year1month1', 'apikey':'KO4L9YMRD2VLJX8O' } df=pd.DataFrame() for y in range(1,3): for m in range(1,13): # create 'slices' of 1 month each. has to do with how the api functions q['slice'] = f'year{y}month{m}' # concatenate all user's selected values into one string q_str = "".join([i for i in [str(i) + "=" + str(q[i]) + "&" for i in q]])[:-1] # concatenate the base alphavantage url with the user's query url = BASE_URL + q_str print(url) # GET url response = requests.get(url) # read data into a pandas dataframe df=pd.concat([df, pd.read_csv(io.StringIO(response.content.decode('utf-8')))], axis=0) # because the free api has a limit of 5 calls per minute, we need to wait time.sleep(60/5) # returns a dataframe return(df) # In[125]: # auto complete function for stocks def auto_complete_stocks(x): """ Signature: auto_complete_stocks(str) -> 'json' Docstring: Makes use of the auto-completion function of Alpha Vantage API. It takes the user's input and returns a json with the coincidences. Parameters ---------- symbol : str A string containing part of the symbol or description of the equity. For example 'amaz' would return the symbol and description for AMZN stocks, etc. Returns ------- json """ BASE_URL = 'https://www.alphavantage.co/query?' url = f'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={x}&datatype=json&apikey={api_key}' response = requests.get(url).json() return(response) # In[ ]: # to fetch all updated stocks and ETFs supported def get_supported_stocks(): """ Signature: get_supported_stocks() -> 'DataFrame' Docstring: Retrieves the supported list of stocks and ETFs from Alpha Vantage, using their API. See https://www.alphavantage.co/ Returns ------- DataFrame Examples -------- >>> get_supported_stocks() """ BASE_URL = 'https://www.alphavantage.co/query?' url = f'https://www.alphavantage.co/query?function=LISTING_STATUS&apikey={api_key}' response = requests.get(url) x=pd.read_csv(io.StringIO(response.content.decode('utf-8'))) return(x) # In[ ]: # to fetch all updated stocks and ETFs supported # static version loading from .csv previously downloaded def get_supported_stocks_static(): """ Signature: get_supported_stocks() -> 'DataFrame' Docstring: Retrieves the supported list of stocks and ETFs from Alpha Vantage, using their API. This 'static' version loads the list from a .csv file. Returns ------- DataFrame Examples -------- >>> get_supported_stocks() """ x = pd.read_csv('data/stocks_etfs_list.csv') l1 = x['symbol'].to_list() l2 = x['name'].to_list() l3 = [str(i) + " - " + str(j) for i, j in zip(l1, l2)] return(l1, l2, l3)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 2, 554, 58, 21, 5974, 628, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 33245, 198, 11748, 7007, 198, 11748, 640, 198, 11748, 4738, 6...
2.441995
1,724
#!/bin/python3 from collections import Counter _ = int(input().strip()) socks = list(map(int, input().strip().split(' '))) print(pairs(socks))
[ 2, 48443, 8800, 14, 29412, 18, 198, 198, 6738, 17268, 1330, 15034, 628, 198, 198, 62, 796, 493, 7, 15414, 22446, 36311, 28955, 198, 82, 3320, 796, 1351, 7, 8899, 7, 600, 11, 5128, 22446, 36311, 22446, 35312, 10786, 705, 22305, 198, ...
2.882353
51
import os from dpsniper.utils.my_multiprocessing import initialize_parallel_executor from dpsniper.utils.paths import get_output_directory, set_output_directory from statdpwrapper.algorithms_ext import * from statdpwrapper.experiments.base import run_statdp from statdpwrapper.experiments.mechanism_config import statdp_mechanism_map, statdp_arguments_map,\ statdp_postprocessing_map, statdp_sensitivity_map, statdp_num_inputs_map
[ 11748, 28686, 198, 198, 6738, 288, 862, 45554, 13, 26791, 13, 1820, 62, 16680, 541, 305, 919, 278, 1330, 41216, 62, 1845, 29363, 62, 18558, 38409, 198, 6738, 288, 862, 45554, 13, 26791, 13, 6978, 82, 1330, 651, 62, 22915, 62, 34945, ...
3.113475
141
__all__ = ['symimg'] from tempfile import mktemp from .reflect_image import reflect_image from .interface import registration from .apply_transforms import apply_transforms from ..core import image_io as iio def symimg(img, gs=0.25): """ Symmetrize an image Example ------- >>> import ants >>> img = ants.image_read( ants.get_ants_data('r16') , 'float') >>> simg = ants.symimg(img) """ imgr = reflect_image(img, axis=0) imgavg = imgr * 0.5 + img for i in range(5): w1 = registration(imgavg, img, type_of_transform='SyN') w2 = registration(imgavg, imgr, type_of_transform='SyN') xavg = w1['warpedmovout']*0.5 + w2['warpedmovout']*0.5 nada1 = apply_transforms(img, img, w1['fwdtransforms'], compose=w1['fwdtransforms'][0]) nada2 = apply_transforms(img, img, w2['fwdtransforms'], compose=w2['fwdtransforms'][0]) wavg = (iio.image_read(nada1) + iio.image_read(nada2)) * (-0.5) wavgfn = mktemp(suffix='.nii.gz') iio.image_write(wavg, wavgfn) xavg = apply_transforms(img, imgavg, wavgfn) return xavg
[ 198, 198, 834, 439, 834, 796, 37250, 37047, 9600, 20520, 198, 198, 6738, 20218, 7753, 1330, 33480, 29510, 198, 198, 6738, 764, 35051, 62, 9060, 1330, 4079, 62, 9060, 198, 6738, 764, 39994, 1330, 9352, 198, 6738, 764, 39014, 62, 7645, ...
2.184971
519
import datetime time=datetime.datetime.today().strftime("%H-%M-%S") text_file = open("/home/pi/TutorCal/CRONtest/"+time+".txt", "w") text_file.write("Hello world!") text_file.close()
[ 11748, 4818, 8079, 198, 198, 2435, 28, 19608, 8079, 13, 19608, 8079, 13, 40838, 22446, 2536, 31387, 7203, 4, 39, 12, 4, 44, 12, 4, 50, 4943, 198, 198, 5239, 62, 7753, 796, 1280, 7203, 14, 11195, 14, 14415, 14, 51, 38409, 9771, 14,...
2.453333
75
import dlib import face_recognition import glob import pickle import cv2 import numpy as np import os from PIL import Image,ImageFont, ImageDraw, ImageEnhance # from google.colab.patches import cv2_imshow add_target_faces('known') faces = load_encoded_faces('encoded_faces.pkl') identify_faces_video('al.mp4', faces, 1)
[ 11748, 288, 8019, 201, 198, 11748, 1986, 62, 26243, 653, 201, 198, 11748, 15095, 201, 198, 11748, 2298, 293, 201, 198, 11748, 269, 85, 17, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 28686, 201, 198, 6738, 350, 4146, 133...
2.737705
122
from . import text from .base import IFormatter try: from . import binary except ImportError: binary = None
[ 6738, 764, 1330, 2420, 198, 6738, 764, 8692, 1330, 314, 8479, 1436, 198, 198, 28311, 25, 198, 220, 220, 220, 422, 764, 1330, 13934, 198, 16341, 17267, 12331, 25, 198, 220, 220, 220, 13934, 796, 6045, 198 ]
3.162162
37
from django.conf.urls import include, url from django.contrib import admin urlpatterns = [ url(r'^example/', include('pulpo_example.urls')), url(r'^pulpo/', include('pulpo_forms.urls'), name='base'), url(r'^admin/', include(admin.site.urls)), url(r'^model_field_form/$', 'pulpo_forms.views.render_form', {'instance': 'model-field-example'}), ]
[ 6738, 42625, 14208, 13, 10414, 13, 6371, 82, 1330, 2291, 11, 19016, 198, 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 19016, 7, 81, 6, 61, 20688, 14, 3256, 2291, 10786, 79,...
2.319018
163
import codecs from contextlib import contextmanager import hashlib import logging from logging.handlers import RotatingFileHandler import random import string from pathlib import Path def random_string(length=32, prefix='', suffix=''): ''' Generate random string length : Length of string prefix : Prefix to place before random characters suffix : Suffix to place after random characters ''' chars = string.ascii_lowercase + string.digits generated = "".join(random.choice(chars) for _ in range(length - len(prefix) - len(suffix))) return f'{prefix}{generated}{suffix}' def md5(input_file, chunksize=64*1024): ''' Get md5 base64 hash of input file ''' hash_value = hashlib.md5() with open(input_file, 'rb') as read: while True: chunk = read.read(chunksize) if not chunk: break try: hash_value.update(chunk.encode('utf-8')) except AttributeError: # File is likely binary hash_value.update(chunk) md5_value = codecs.encode(hash_value.digest(), 'base64') # This leaves "b'<hash> at beginning, so take out first two chars return str(md5_value).rstrip("\\n'")[2:] def setup_logger(name, log_file_level, logging_file=None, console_logging=True, console_logging_level=logging.INFO): ''' Setup logging ''' logger = logging.getLogger(name) formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logger.setLevel(log_file_level) if logging_file is not None: fh = RotatingFileHandler(logging_file, backupCount=4, maxBytes=((2 ** 20) * 10)) fh.setLevel(log_file_level) fh.setFormatter(formatter) logger.addHandler(fh) if console_logging: sh = logging.StreamHandler() sh.setLevel(console_logging_level) sh.setFormatter(formatter) logger.addHandler(sh) return logger
[ 11748, 40481, 82, 198, 6738, 4732, 8019, 1330, 4732, 37153, 198, 11748, 12234, 8019, 198, 11748, 18931, 198, 6738, 18931, 13, 4993, 8116, 1330, 18481, 803, 8979, 25060, 198, 11748, 4738, 198, 11748, 4731, 198, 198, 6738, 3108, 8019, 1330,...
2.215256
957
from logging import lastResort from pydantic import BaseModel from invmonApi.database import Base from invmonInfra.enum import InventoryLastStatusEnum from sqlalchemy import Column, String, Boolean from uuid import uuid4
[ 6738, 18931, 1330, 938, 4965, 419, 198, 6738, 279, 5173, 5109, 1330, 7308, 17633, 198, 6738, 800, 2144, 32, 14415, 13, 48806, 1330, 7308, 198, 6738, 800, 2144, 18943, 430, 13, 44709, 1330, 35772, 5956, 19580, 4834, 388, 198, 6738, 44161...
3.578125
64
# coding: utf-8 """ Module `chatette.parsing.lexing.rule_slot_val` Contains the definition of the class that represents the lexing rule to tokenize a slot value being set within a unit rule (only for a slot). """ from chatette.parsing.lexing.lexing_rule import LexingRule from chatette.parsing.lexing import LexicalToken, TerminalType from chatette.parsing.utils import find_next_comment, SLOT_VAL_SYM
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 37811, 198, 26796, 4600, 17006, 5857, 13, 79, 945, 278, 13, 2588, 278, 13, 25135, 62, 43384, 62, 2100, 63, 198, 4264, 1299, 262, 6770, 286, 262, 1398, 326, 6870, 262, 31191, 278, 3896, 198, 146...
3.181102
127
# -*- coding: utf-8 -*- """ Created on Sun Feb 23 20:28:51 2020 @author: Administrator """ """ rand7()1~7,rand10(),1-10. """ import random if __name__ == "__main__": print(rand10())
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 37811, 201, 198, 41972, 319, 3825, 3158, 2242, 1160, 25, 2078, 25, 4349, 12131, 201, 198, 201, 198, 31, 9800, 25, 22998, 201, 198, 37811, 201, 198, 201, 198, 3781...
2.159574
94
from abc import ABC, abstractmethod from enum import Enum from functools import partial # from math import isinf from typing import Union, Optional, Any from typing import Callable, Tuple, Dict, List, Set, Type # noqa: F401 from ..builtin_values import Bool, ops_symbols from ..abstract_value import AbstractValue from ...abstract_domain import AbstractDomain from ...errors import TypeCheckLogger from .objects_ids import new_id from ...miscelaneous import Pos __all__ = ['PythonValue', 'PT', 'AbstractMutVal', 'Args']
[ 6738, 450, 66, 1330, 9738, 11, 12531, 24396, 198, 6738, 33829, 1330, 2039, 388, 198, 6738, 1257, 310, 10141, 1330, 13027, 198, 2, 422, 10688, 1330, 318, 10745, 198, 6738, 19720, 1330, 4479, 11, 32233, 11, 4377, 198, 6738, 19720, 1330, ...
3.490196
153
import keras import tensorflow as tf import numpy.random as rng from keras.datasets import cifar10 from keras.utils import np_utils def data_cifar10(**kwargs): """ Preprocess CIFAR10 dataset :return: """ # These values are specific to CIFAR10 img_rows = 32 img_cols = 32 nb_classes = 10 # the data, shuffled and split between train and test sets (X_train, y_train), (X_test, y_test) = cifar10.load_data() if keras.backend.image_dim_ordering() == 'th': X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols) X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols) else: X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3) X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3) X_train = X_train.astype('float32') X_test = X_test.astype('float32') tpermutation = rng.permutation(X_test.shape[0]) X_test = X_test[tpermutation] y_test = y_test[tpermutation] permutation = rng.permutation(X_train.shape[0]) X_train = X_train[permutation] y_train = y_train[permutation] X_train /= 255 X_test /= 255 print('X_train shape:', X_train.shape) print(X_train.shape[0], 'train samples') print(X_test.shape[0], 'test samples') # convert class vectors to binary class matrices Y_train = np_utils.to_categorical(y_train, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) return X_train, Y_train, X_test, Y_test
[ 11748, 41927, 292, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 299, 32152, 13, 25120, 355, 374, 782, 198, 198, 6738, 41927, 292, 13, 19608, 292, 1039, 1330, 269, 361, 283, 940, 198, 6738, 41927, 292, 13, 26791, 1330, 45941, ...
2.287651
664
#!/usr/bin/python #-*- coding:utf-8 -*- from . import const const.UK = 'UK' const.US = 'US'
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 12, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 6738, 764, 1330, 1500, 198, 198, 9979, 13, 15039, 796, 705, 15039, 6, 198, 9979, 13, 2937, 796, 705, 2937, 6, 198 ]
2.162791
43
# There are copyright holders. import pandapower as pp import pandapower.networks as pn net = pn.case9() pp.runpp(net) print ("Canvass NR Power Flow Results At The Buses") print ("------------------------------------------") print (net.res_bus)
[ 2, 1318, 389, 6634, 16392, 13, 220, 198, 11748, 19798, 499, 789, 355, 9788, 198, 11748, 19798, 499, 789, 13, 3262, 5225, 355, 279, 77, 198, 3262, 796, 279, 77, 13, 7442, 24, 3419, 628, 198, 381, 13, 5143, 381, 7, 3262, 8, 198, 1...
3.351351
74
from django.urls import include, path from . import arche_rdf_views app_name = "archeutils" urlpatterns = [ path('<app_name>/<model_name>/<pk>', arche_rdf_views.res_as_arche_graph, name='res_as_arche_graph'), path('<app_name>/<model_name>', arche_rdf_views.qs_as_arche_graph, name='qs_as_arche_graph'), ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 2291, 11, 3108, 198, 6738, 764, 1330, 20944, 62, 4372, 69, 62, 33571, 198, 198, 1324, 62, 3672, 796, 366, 283, 2395, 26791, 1, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, ...
2.316176
136
import sys from shutil import copyfile if __name__== "__main__": main()
[ 11748, 25064, 198, 6738, 4423, 346, 1330, 4866, 7753, 220, 220, 220, 220, 198, 198, 361, 11593, 3672, 834, 855, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 3419, 198 ]
2.53125
32
x = 2 ** (1/2) y = 3 ** (1/3) z = 5 ** (1/5) print(x) print(y) print(z) print() if x>y and x>z: print(x,'jest najwiksza') elif y>x and y>z: print(y,'jest najwiksza') elif z>x and z>y: print(z,'jest najwiksza') print() if x<y and x<z: print(x,'jest najmniejsza') elif y<x and y<z: print(y,'jest najmniejsza') elif z<x and z<y: print(z,'jest najmniejsza')
[ 87, 796, 362, 12429, 357, 16, 14, 17, 8, 198, 88, 796, 513, 12429, 357, 16, 14, 18, 8, 198, 89, 796, 642, 12429, 357, 16, 14, 20, 8, 198, 198, 4798, 7, 87, 8, 198, 4798, 7, 88, 8, 198, 4798, 7, 89, 8, 198, 4798, 3419, 19...
1.75576
217
title=open("file.txt","w") title.write("\n" ) title.close() sum=0 while 1: sentence=open("file.txt","a") sum+=1 if sum>4: sentence.close() break k =input("") sentence.write(f"{k}\n") sentence.close()
[ 7839, 28, 9654, 7203, 7753, 13, 14116, 2430, 86, 4943, 198, 7839, 13, 13564, 7203, 59, 77, 1, 1267, 198, 7839, 13, 19836, 3419, 198, 16345, 28, 15, 198, 4514, 352, 25, 198, 220, 220, 220, 6827, 28, 9654, 7203, 7753, 13, 14116, 243...
2.077586
116
import bluesky.plan_stubs as bps import bluesky.plans as bp from xpdacq.beamtime import _configure_area_det from xpdacq.glbl import glbl from xpdacq.xpdacq import open_shutter_stub, close_shutter_stub from xpdacq.xpdacq_conf import xpd_configuration # below is the code to run at the beamtime # register the scanplan # ScanPlan(bt, acq_rel_grid_scan, 60, 30, -5, 5, 10, -5, 5, 10) # use bt.list() to see the index of the scanplan and use it in xrun
[ 11748, 25570, 2584, 13, 11578, 62, 301, 23161, 355, 275, 862, 198, 11748, 25570, 2584, 13, 489, 504, 355, 275, 79, 198, 6738, 2124, 30094, 330, 80, 13, 40045, 2435, 1330, 4808, 11250, 495, 62, 20337, 62, 15255, 198, 6738, 2124, 30094,...
2.622093
172
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-08-09 17:25 from __future__ import unicode_literals import django.core.validators import django.db.models.deletion import kolibri.content.models from django.db import migrations, models
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 24, 13, 22, 319, 1584, 12, 2919, 12, 2931, 1596, 25, 1495, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 1...
2.806818
88
from setuptools import setup, find_packages setup( name='picker-my-sticker', version='0.0.1', description='Stickers for Slack', long_description='S t i c k e r s', url='https://github.com/kennydo/pick-my-stick', author='Kenny Do', author_email='chinesedewey@gmail.com', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Topic :: Internet', ], packages=find_packages(), entry_points={ }, )
[ 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 628, 198, 40406, 7, 198, 220, 220, 220, 1438, 11639, 79, 15799, 12, 1820, 12, 13915, 263, 3256, 198, 220, 220, 220, 2196, 11639, 15, 13, 15, 13, 16, 3256, 198, 220, 220, 22...
2.488789
223
import numpy as np import time import matplotlib.pyplot as plt import imageio from scipy.optimize import fsolve from body import Body def get_position_from_Kepler(semimajor_axis, eccentricity, inclination, ascending_node, argument_of_periapsis, mean_anomaly, mass_orbit, G=6.67430 * 10**(-11)): """ Get the position vectors from the Keplerian coordinates First part from https://downloads.rene-schwarz.com/download/M001-Keplerian_Orbit_Elements_to_Cartesian_State_Vectors.pdf Second part from https://space.stackexchange.com/questions/19322/converting-orbital-elements-to-cartesian-state-vectors >>> position = get_position_from_Kepler(1.5*10**8, 0.0167, (5*10**(-5))*np.pi/180, 1, 1, 190*np.pi/180, 1.988435 * (10**30)) >>> position array([ 8.58449271e+07, -1.26004733e+08, -1.22449388e+02]) >>> np.linalg.norm(position) 152468174.39880842 """ mu = G * mass_orbit func = lambda EA: mean_anomaly - (EA - eccentricity * np.sin(EA)) eccentric_anomaly = fsolve(func, np.pi)[0] true_anomaly = 2 * np.arctan2(np.sqrt(1 + eccentricity) * np.sin(eccentric_anomaly / 2), np.sqrt(1 - eccentricity) * np.cos(eccentric_anomaly / 2)) radius = semimajor_axis * (1 - eccentricity * np.cos(eccentric_anomaly)) h = np.sqrt(mu * semimajor_axis * (1 - eccentricity**2)) p = semimajor_axis * (1 - eccentricity**2) Om = ascending_node w = argument_of_periapsis nu = true_anomaly r = radius i = inclination e = eccentricity x = r*(np.cos(Om)*np.cos(w+nu) - np.sin(Om)*np.sin(w+nu)*np.cos(i)) y = r*(np.sin(Om)*np.cos(w+nu) + np.cos(Om)*np.sin(w+nu)*np.cos(i)) z = r*(np.sin(i)*np.sin(w+nu)) #print(x, r, Om, w, nu, i, e, eccentric_anomaly) position = np.array([x, y, z]) xd = (x*h*e/(r*p))*np.sin(nu) - (h/r)*(np.cos(Om)*np.sin(w+nu) + np.sin(Om)*np.cos(w+nu)*np.cos(i)) yd = (x*h*e/(r*p))*np.sin(nu) - (h/r)*(np.sin(Om)*np.sin(w+nu) - np.cos(Om)*np.cos(w+nu)*np.cos(i)) zd = (x*h*e/(r*p))*np.sin(nu) - (h/r)*(np.cos(w+nu)*np.sin(i)) velocity = np.array([xd, yd, zd]) #print(velocity) return position def get_coordinates_from_Kepler(semimajor_axis, eccentricity, inclination, ascending_node, argument_of_periapsis, mean_anomaly, current_velocity, mass_orbit, G=6.67430 * 10**(-11), delta=0.001): """ Lol wtf pls kil me. >>> position, velocity = get_coordinates_from_Kepler(1.5*10**8, 0.0167, (5*10**(-5))*np.pi/180, 1, 1, 190*np.pi/180, 29300, 1.988435 * (10**30)) >>> position array([ 8.58449271e+07, -1.26004733e+08, -1.22449388e+02]) >>> velocity array([ 2.41591639e+04, 1.65778407e+04, -9.92410781e-03]) >>> np.linalg.norm(position) 152468174.39880842 >>> np.linalg.norm(velocity) 29299.999999999993 """ position = get_position_from_Kepler(semimajor_axis, eccentricity, inclination, ascending_node, argument_of_periapsis, mean_anomaly, mass_orbit, G) position_plus_delta = get_position_from_Kepler(semimajor_axis, eccentricity, inclination, ascending_node, argument_of_periapsis, mean_anomaly + delta, mass_orbit, G) delta_position = position_plus_delta - position direction_unit_vector = delta_position / np.linalg.norm(delta_position) return position, current_velocity * direction_unit_vector def ld_to_m(ld): """ Converts the input distance (or velocity) of the input from Lunar distances to meters. """ return ld * 384402 * 10**3 def au_to_m(au): """ Converts the input distance (or velocity) of the input from atronomical units to meters. """ return au * 1.495978707 * 10**11 def ly_to_m(ly): """ Converts the input distance (or velocity) of the input from light years to meters. """ return ly * 9.4607 * 10**15 def pc_to_m(pc): """ Converts the input distance (or velocity) of the input from parsec to meters. """ return pc * 3.085677581 * 10**18 def get_test_Space_simple_solar(): """ Generates a simple test Space object. It is filled with the 8 plannets of the solar system (and the moon). They are position in a way that doesn't 100% correspond to reality. """ bodies = [] mass_orbit = 1.988435 * (10**30) # The most important bodies. bodies.append(Body(np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0]), 1.988435 * (10**30), 695700000, 'Sun', True, 'tab:orange')) position_earth, velocity_earth = get_coordinates_from_Kepler(1.0*1.496*10**11, 0.01671, (5*10**(-5))*np.pi/180, 0, 0, 190*np.pi/180, 29300, mass_orbit) bodies.append(Body(position_earth, velocity_earth, 5.97 * (10**24), 6371009, 'Earth', True, 'tab:blue')) position, velocity = get_coordinates_from_Kepler(384400*1000, 0.0554, 5.16*np.pi/180, 125*np.pi/180, 318.15*np.pi/180, 213*np.pi/180, 1020, bodies[1].mass) position = position + position_earth velocity = velocity + velocity_earth bodies.append(Body(position,velocity, 7.349 * (10**22), 1737400, 'Moon', True, 'darkgrey')) # Other inner plannets. position, velocity = get_coordinates_from_Kepler(0.38709893*1.496*10**11, 0.20563069, 7.00487*np.pi/180, 48.33*np.pi/180, 29.12*np.pi/180, 269*np.pi/180, 45810, mass_orbit) bodies.append(Body(position, velocity, 3.301 * (10**23), 2440000, 'Mercury', True, 'lightsteelblue')) position, velocity = get_coordinates_from_Kepler(0.72333199*1.496*10**11, 0.00677, 3.39471*np.pi/180, 76.68069*np.pi/180, 54.85*np.pi/180, 187*np.pi/180, 34790, mass_orbit) bodies.append(Body(position, velocity, 4.867 * (10**24), 6050000, 'Venus', True, 'goldenrod')) position, velocity = get_coordinates_from_Kepler(1.52366*1.496*10**11, 0.09341, 1.85061*np.pi/180, 49.57*np.pi/180, 286*np.pi/180, 349*np.pi/180, 26450, mass_orbit) bodies.append(Body(position, velocity, 6.417 * (10**23), 3390000, 'Mars', True, 'sandybrown')) # Outer planets. position_jupiter, velocity_jupiter = get_coordinates_from_Kepler(5.2033*1.496*10**11, 0.04839, 1.3053*np.pi/180, 100.556*np.pi/180, -85.80*np.pi/180, 283*np.pi/180, 13170, mass_orbit) bodies.append(Body(position_jupiter, velocity_jupiter, 1.898 * (10**27), 69950000, 'Jupiter', True, 'darkorange')) position_saturn, velocity_saturn = get_coordinates_from_Kepler(9.537*1.496*10**11, 0.0541, 2.48446*np.pi/180, 113.715*np.pi/180, -21.2831*np.pi/180, 207*np.pi/180, 91590, mass_orbit) bodies.append(Body(position_saturn, velocity_saturn, 5.683 * (10**26), 58300000, 'Saturn', True, 'navajowhite')) position_uranus, velocity_uranus = get_coordinates_from_Kepler(19.1912*1.496*10**11, 0.0471771, 0.76986*np.pi/180, 74.22988*np.pi/180, 96.73436*np.pi/180, 229*np.pi/180, 6578, mass_orbit) bodies.append(Body(position_uranus, velocity_uranus, 8.681 * (10**25), 25360000, 'Uranus', True, 'powderblue')) position_neptune, velocity_neptune = get_coordinates_from_Kepler(30.06896*1.496*10**11, 0.00858587, 1.76917*np.pi/180, 131.72169*np.pi/180, -86.75*np.pi/180, 301*np.pi/180, 5449, mass_orbit) bodies.append(Body(position_neptune, velocity_neptune, 1.024 * (10**26), 24600000, 'Neptune', True, 'dodgerblue')) return bodies if __name__ == "__main__": import doctest doctest.testmod()
[ 11748, 299, 32152, 355, 45941, 198, 11748, 640, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 2939, 952, 198, 6738, 629, 541, 88, 13, 40085, 1096, 1330, 43458, 6442, 198, 198, 6738, 1767, 1330, 12290, 198, ...
2.259019
3,243
""" Copyright (c) 2015 Marshall Farrier license http://opensource.org/licenses/MIT lib/ui/handlers.py Handlers for edit menu """ from bson.codec_options import CodecOptions import datetime as dt from functools import partial import json from pymongo.errors import BulkWriteError from ..dbschema import SPREADS from ..dbtools import delete_many, find_job, getcoll, insert_many from ..dbwrapper import job from ..spreads.optspread import SPREAD_TYPES from ..spreads.optspread_factory import OptSpreadFactory from .spread_ui import SpreadUi from .utils import confirm
[ 37811, 198, 15269, 357, 66, 8, 1853, 13606, 6755, 5277, 198, 43085, 2638, 1378, 44813, 1668, 13, 2398, 14, 677, 4541, 14, 36393, 198, 198, 8019, 14, 9019, 14, 4993, 8116, 13, 9078, 198, 198, 12885, 8116, 329, 4370, 6859, 198, 37811, ...
3.339181
171
# class SidebarView(GenericAPIView): # permission_classes = [AllowAny] # def get(self, request, *args, **kwargs): # org_id = request.GET.get("org", None) # user_id = request.GET.get("user", None) # room = settings.ROOM_COLLECTION # plugin_id = settings.PLUGIN_ID # roomid = settings.ROOM_ID # token = verify_token # pub_room = get_room_info() # # subscription_channel: org_id_memberid_sidebar # if request.GET.get("org") and request.GET.get("user"): # subscription_channel = "{org_id}_{user_id}_sidebar" # #sidebar_update = "currentWorkspace_userInfo_sidebar" # sidebar_update_payload = { # "event": "sidebar_update", # "plugin_id": "music.zuri.chat", # "data": { # "name": "Music Plugin", # "description": "This is a virtual lounge where people can add, watch and listen to YouTube videos or music", # "plugin_id": plugin_id, # "organisation_id": org_id, # "room_id": roomid, # "user_id": user_id, # "category": "entertainment", # "group_name": "music", # "show_group": False, # "button_url": f"/music/{org_id}/{roomid}", # "public_rooms": [pub_room], # # "starred" : [], # "joined_rooms": [pub_room], # }, # } # # centrifugo_post(sidebar_update_payload, subscription_channel) # # return Response(sidebar_update_payload) # url = "https://api.zuri.chat/sidebar?org={org_id}&user={user_id}" # # http://127.0.0.1:8000/sidebar?org=61695d8bb2cc8a9af4833d46&user=61695d8bb2cc8a9af4833d47 # r = requests.get(url) # # print(r.status_code) # if r.status_code == 200: # # public_url = f"https://api.zuri.chat/data/read/{org_id}/{plugin_id}/{room}/{roomid}" # # r = requests.get(public_url) # publish_to_sidebar(plugin_id, user_id, {"event": "sidebar_update", "data": pub_room}) # centrifugo_post(sidebar_update_payload, subscription_channel) # return Response(r) # else: # centrifugo_post(sidebar_update_payload, subscription_channel) # return Response( # { # "event": "sidebar_update", # "name": "Music Plugin", # "description": "This is a virtual lounge where people can add, watch and listen to YouTube videos or music", # "plugin_id": plugin_id, # "organisation_id": org_id, # "room_id": roomid, # "user_id": user_id, # "group_name": [], # "show_group": False, # "category": "entertainment", # "public_rooms": [pub_room], # "joined_rooms": [pub_room], # } # ) # else: # centrifugo_post(sidebar_update_payload, subscription_channel) # return JsonResponse( # { # "name": "Music Plugin", # "description": "This is a virtual lounge where people can add, watch and listen to YouTube videos or music", # "plugin_id": plugin_id, # "organisation_id": org_id, # "room_id": roomid, # "user_id": user_id, # "group_name": [], # "show_group": False, # "category": "entertainment", # "public_rooms": [pub_room], # "joined_rooms": [pub_room], # } # ) # def is_valid(param): # return param != "" and param is not None
[ 2, 1398, 12075, 5657, 7680, 7, 46189, 2969, 3824, 769, 2599, 198, 2, 220, 220, 220, 220, 7170, 62, 37724, 796, 685, 35265, 7149, 60, 198, 198, 2, 220, 220, 220, 220, 825, 651, 7, 944, 11, 2581, 11, 1635, 22046, 11, 12429, 46265, ...
1.737263
2,375
# -*- coding: utf-8 -*- """ Load Duqa labeled dataset. """ from __future__ import absolute_import, division, print_function import collections import json import logging import math from io import open from tqdm import tqdm from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize logger = logging.getLogger(__name__) def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): label_map = {label : i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in tqdm(enumerate(examples), desc='loading_data'): tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] tokens = ["[CLS]"] + tokens_a + ["[SEP]"] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("label: %s (id = %d)" % (example.label, label_id)) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() processors = { 'duqa': DuQAProcessor, } num_labels_task = { 'duqa': 2, }
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 8778, 10343, 20402, 15494, 27039, 13, 37227, 198, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 11, 3601, 62, 8818, 198, 198, 11748, 17268, 198, ...
2.159489
1,486
from typing import Any if __name__ == '__main__': main()
[ 6738, 19720, 1330, 4377, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 1388, 3419, 198 ]
2.73913
23
import collections import colorsys from typing import Iterable, List, Tuple import matplotobjlib as plot from backports import zoneinfo from matplotlib.colors import ListedColormap import utils from gui.components import PlotComponent from gui.options import ArtistChooser, ColorMap, Spinbox from track import Track
[ 11748, 17268, 198, 11748, 7577, 893, 198, 6738, 19720, 1330, 40806, 540, 11, 7343, 11, 309, 29291, 198, 198, 11748, 2603, 29487, 26801, 8019, 355, 7110, 198, 6738, 736, 3742, 1330, 6516, 10951, 198, 6738, 2603, 29487, 8019, 13, 4033, 66...
3.890244
82
# [h] interpolated nudge dialog '''a simple RoboFont dialog for the famous "interpolated nudge" script''' # Interpolated Nudge for RoboFont -- Travis Kochel # http://tktype.tumblr.com/post/15254264845/interpolated-nudge-for-robofont # Interpolated Nudge -- Christian Robertson # http://betatype.com/node/18 from vanilla import * from NudgeCore import * # run interpolatedNudgeDialog()
[ 2, 685, 71, 60, 39555, 515, 299, 12587, 17310, 198, 198, 7061, 6, 64, 2829, 39702, 23252, 17310, 329, 262, 5863, 366, 3849, 16104, 515, 299, 12587, 1, 4226, 7061, 6, 198, 198, 2, 4225, 16104, 515, 399, 12587, 329, 39702, 23252, 1377...
3.015385
130
""" This module defines a mixin, which can be used by all implementations for all databases. All the databases have a different hierarchy of DatabaseWrapper, but all of them derive from BaseDatabaseWrapper """ from abc import ABC from typing import Optional from django.db.backends.base.base import BaseDatabaseWrapper from django.db.backends.utils import CursorDebugWrapper, CursorWrapper from .cursor_wrapper_instrumentation import QueryProfilerCursorDebugWrapper, QueryProfilerCursorWrapper
[ 37811, 198, 1212, 8265, 15738, 257, 5022, 259, 11, 543, 460, 307, 973, 416, 477, 25504, 329, 477, 20083, 13, 198, 3237, 262, 20083, 423, 257, 1180, 18911, 286, 24047, 36918, 2848, 11, 475, 477, 286, 606, 27099, 422, 7308, 38105, 36918...
3.890625
128
''' Webserver for the Penguin Guano Classification AI4Earth API To run: export FLASK_APP=frontend-server.py python -m flask run --host=0.0.0.0 To access the website, enter your IP address:5000 into a browser. e.g., http://127.0.0.1:5000/ ''' from flask import Flask, send_from_directory, request import requests print("Running frontend server") API_ENDPOINT = "http://penguinguano.eastus.azurecontainer.io:80/v1/pytorch_api/classify" app = Flask(__name__, static_url_path='') # front-end server stuff if __name__ == '__main__': app.run()
[ 7061, 6, 198, 1135, 1443, 18497, 329, 262, 34424, 1962, 5733, 40984, 9552, 19, 22840, 7824, 198, 198, 2514, 1057, 25, 198, 39344, 9977, 1921, 42, 62, 24805, 28, 8534, 437, 12, 15388, 13, 9078, 198, 29412, 532, 76, 42903, 1057, 1377, ...
2.713592
206
# -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-06-29 13:56 from __future__ import unicode_literals import django.contrib.postgres.fields.jsonb import django.db.models.deletion from django.db import migrations, models import brouwers.shop.models.utils
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 13, 1238, 319, 13130, 12, 3312, 12, 1959, 1511, 25, 3980, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198...
2.773196
97
# # # Copyright (C) 2015, 2016 Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Module interacting with PAM performing authorization and authentication This module authenticates and authorizes RAPI users based on their credintials. Both actions are performed by interaction with PAM as a 'ganeti-rapi' service. """ import logging try: import ctypes as c # pylint: disable=F0401 import ctypes.util as util except ImportError: c = None from ganeti import constants from ganeti.errors import PamRapiAuthError import ganeti.http as http from ganeti.http.auth import HttpServerRequestAuthentication from ganeti.rapi import auth __all__ = ['PamAuthenticator'] DEFAULT_SERVICE_NAME = 'ganeti-rapi' MAX_STR_LENGTH = 100000 MAX_MSG_COUNT = 100 PAM_ENV_URI = 'GANETI_RAPI_URI' PAM_ENV_BODY = 'GANETI_REQUEST_BODY' PAM_ENV_METHOD = 'GANETI_REQUEST_METHOD' PAM_ENV_ACCESS = 'GANETI_RESOURCE_ACCESS' PAM_ABORT = 26 PAM_BUF_ERR = 5 PAM_CONV_ERR = 19 PAM_SILENT = 32768 PAM_SUCCESS = 0 PAM_PROMPT_ECHO_OFF = 1 PAM_AUTHTOK = 6 PAM_USER = 2 if c: CONV_FUNC = c.CFUNCTYPE(c.c_int, c.c_int, c.POINTER(c.POINTER(PamMessage)), c.POINTER(c.POINTER(PamResponse)), c.c_void_p) def Authenticate(cf, pam_handle, authtok=None): """Performs authentication via PAM. Perfroms two steps: - if authtok is provided then set it with pam_set_item - call pam_authenticate """ try: authtok_copy = None if authtok: authtok_copy = cf.strndup(authtok, len(authtok)) if not authtok_copy: raise http.HttpInternalServerError("Not enough memory for PAM") ret = cf.pam_set_item(c.pointer(pam_handle), PAM_AUTHTOK, authtok_copy) if ret != PAM_SUCCESS: raise http.HttpInternalServerError("pam_set_item failed [%d]" % ret) ret = cf.pam_authenticate(pam_handle, 0) if ret == PAM_ABORT: raise http.HttpInternalServerError("pam_authenticate requested abort") if ret != PAM_SUCCESS: raise http.HttpUnauthorized("Authentication failed") except: cf.pam_end(pam_handle, ret) raise finally: if authtok_copy: cf.free(authtok_copy) def PutPamEnvVariable(cf, pam_handle, name, value): """Wrapper over pam_setenv. """ setenv = "%s=" % name if value: setenv += value ret = cf.pam_putenv(pam_handle, setenv) if ret != PAM_SUCCESS: raise http.HttpInternalServerError("pam_putenv call failed [%d]" % ret) def Authorize(cf, pam_handle, uri_access_rights, uri=None, method=None, body=None): """Performs authorization via PAM. Performs two steps: - initialize environmental variables - call pam_acct_mgmt """ try: PutPamEnvVariable(cf, pam_handle, PAM_ENV_ACCESS, uri_access_rights) PutPamEnvVariable(cf, pam_handle, PAM_ENV_URI, uri) PutPamEnvVariable(cf, pam_handle, PAM_ENV_METHOD, method) PutPamEnvVariable(cf, pam_handle, PAM_ENV_BODY, body) ret = cf.pam_acct_mgmt(pam_handle, PAM_SILENT) if ret != PAM_SUCCESS: raise http.HttpUnauthorized("Authorization failed") except: cf.pam_end(pam_handle, ret) raise def ValidateParams(username, _uri_access_rights, password, service, authtok, _uri, _method, _body): """Checks whether ValidateRequest has been called with a correct params. These checks includes: - username is an obligatory parameter - either password or authtok is an obligatory parameter """ if not username: raise http.HttpUnauthorized("Username should be provided") if not service: raise http.HttpBadRequest("Service should be proivded") if not password and not authtok: raise http.HttpUnauthorized("Password or authtok should be provided") def ValidateRequest(cf, username, uri_access_rights, password=None, service=DEFAULT_SERVICE_NAME, authtok=None, uri=None, method=None, body=None): """Checks whether it's permitted to execute an rapi request. Calls pam_authenticate and then pam_acct_mgmt in order to check whether a request should be executed. @param cf: An instance of CFunctions class containing necessary imports @param username: username @param uri_access_rights: handler access rights @param password: password @param service: a service name that will be used for the interaction with PAM @param authtok: user's authentication token (e.g. some kind of signature) @param uri: an uri of a target resource obtained from an http header @param method: http method trying to access the uri @param body: a body of an RAPI request @return: On success - authenticated user name. Throws an exception otherwise. """ ValidateParams(username, uri_access_rights, password, service, authtok, uri, method, body) def ConversationFunction(num_msg, msg, resp, _app_data_ptr): """Conversation function that will be provided to PAM modules. The function replies with a password for each message with PAM_PROMPT_ECHO_OFF style and just ignores the others. """ if num_msg > MAX_MSG_COUNT: logging.warning("Too many messages passed to conv function: [%d]", num_msg) return PAM_BUF_ERR response = cf.calloc(num_msg, c.sizeof(PamResponse)) if not response: logging.warning("calloc failed in conv function") return PAM_BUF_ERR resp[0] = c.cast(response, c.POINTER(PamResponse)) for i in range(num_msg): if msg[i].contents.msg_style != PAM_PROMPT_ECHO_OFF: continue resp.contents[i].resp = cf.strndup(password, len(password)) if not resp.contents[i].resp: logging.warning("strndup failed in conv function") for j in range(i): cf.free(c.cast(resp.contents[j].resp, c.c_void_p)) cf.free(response) return PAM_BUF_ERR resp.contents[i].resp_retcode = 0 return PAM_SUCCESS pam_handle = PamHandleT() conv = PamConv(CONV_FUNC(ConversationFunction), 0) ret = cf.pam_start(service, username, c.pointer(conv), c.pointer(pam_handle)) if ret != PAM_SUCCESS: cf.pam_end(pam_handle, ret) raise http.HttpInternalServerError("pam_start call failed [%d]" % ret) Authenticate(cf, pam_handle, authtok) Authorize(cf, pam_handle, uri_access_rights, uri, method, body) # retrieve the authorized user name puser = c.c_void_p() ret = cf.pam_get_item(pam_handle, PAM_USER, c.pointer(puser)) if ret != PAM_SUCCESS or not puser: cf.pam_end(pam_handle, ret) raise http.HttpInternalServerError("pam_get_item call failed [%d]" % ret) user_c_string = c.cast(puser, c.c_char_p) cf.pam_end(pam_handle, PAM_SUCCESS) return user_c_string.value def MakeStringC(string): """Converts a string to a valid C string. As a C side treats non-unicode strings, encode unicode string with 'ascii'. Also ensure that C string will not be longer than MAX_STR_LENGTH in order to prevent attacs based on too long buffers. """ if string is None: return None if isinstance(string, unicode): string = string.encode("ascii") if not isinstance(string, str): return None if len(string) <= MAX_STR_LENGTH: return string return string[:MAX_STR_LENGTH]
[ 2, 198, 2, 198, 198, 2, 15069, 357, 34, 8, 1853, 11, 1584, 3012, 3457, 13, 198, 2, 1439, 2489, 10395, 13, 198, 2, 198, 2, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, 11, 351, 393, 1231, 198, 2, 17613, 11, 389, 1043...
2.675316
3,160
# Donk Project # Copyright (c) 2021 Warriorstar Orion <orion@snowfrost.garden> # SPDX-License-Identifier: MIT import pathlib from typing import Dict from iconparse.reader import DmiData, Reader from iconparse.extractor import Extractor
[ 2, 2094, 74, 4935, 198, 2, 15069, 357, 66, 8, 33448, 14019, 7364, 26153, 1279, 273, 295, 31, 82, 2197, 69, 23341, 13, 70, 5872, 29, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 17168, 198, 11748, 3108, 8019, 198, 6738, 19720...
3.352113
71
from .gan import SNPatchGAN
[ 6738, 764, 1030, 1330, 25632, 963, 45028, 198 ]
3.5
8
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2013 MATOBA Akihiro <matobaa+trac-hacks@gmail.com> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. from trac.core import * from trac.config import Option from trac.core import Component, implements from trac.db.api import ConnectionBase from trac.db.api import DatabaseManager from trac.db.api import IDatabaseConnector from trac.db.api import _parse_db_str, get_column_names from trac.db.api import ConnectionBase from trac.db.util import ConnectionWrapper from trac.env import IEnvironmentSetupParticipant, ISystemInfoProvider from trac.env import BackupError from trac.db import Table, Column import re try: import pymssql as pymssql has_mssql = True except ImportError: has_mssql = False # force enables this plugin in trac-admin initenv #enabled = BoolOption("components", "mssql_backend.*", "enabled") # Mapping from "abstract" SQL types to DB-specific types _type_map = { 'int64': 'bigint', 'text': 'nvarchar(512)', } # TODO: You cannot use MS Access because column name 'value' can seems not use via odbc. _column_map = { 'key': '"key"', # 'value': '"value"' } re_limit = re.compile(" LIMIT (\d+)( OFFSET (\d+))?", re.IGNORECASE) re_order_by = re.compile("ORDER BY ", re.IGNORECASE) re_where = re.compile("WHERE ", re.IGNORECASE) re_equal = re.compile("(\w+)\s*=\s*(['\w]+|\?)", re.IGNORECASE) re_isnull = re.compile("(\w+) IS NULL", re.IGNORECASE) re_select = re.compile('SELECT( DISTINCT)?( TOP)?', re.IGNORECASE) re_coalesce_equal = re.compile("(COALESCE\([^)]+\))=([^,]+)", re.IGNORECASE) def _to_sql(table): sql = ["CREATE TABLE %s (" % table.name] coldefs = [] for column in table.columns: column.name = _column_map.get(column.name, column.name) ctype = column.type.lower() ctype = _type_map.get(ctype, ctype) # for SQL Server, patch for "enum" table, value is not text, use int instead. if table.name == 'enum' and column.name == 'value': ctype = 'int' if (table.name, column.name) in [ ('wiki', 'text'), ('report', 'query'), ('report', 'description'), ('milestone', 'description'), ('version', 'description'), ]: ctype = 'nvarchar(MAX)' if (table.name, column.name) in [ ('ticket', 'description'), ('ticket_change', 'oldvalue'), ('ticket_change', 'newvalue'), ('ticket_custom', 'value'), ('session_attribute', 'value') ]: ctype = 'nvarchar(4000)' # I'm using SQL Userver 2012 Express if column.auto_increment: ctype = 'INT IDENTITY NOT NULL' # SQL Server Style # ctype = 'INT UNSIGNED NOT NULL AUTO_INCREMENT' # MySQL Style # ctype = 'SERIAL' # PGSQL Style # ctype = "integer constraint P_%s PRIMARY KEY" % table.name # SQLite Style else: # if column.name in table.key or any([column.name in index.columns for index in table.indices]): # ctype = {'ntext': 'nvarchar(255)'}.get(ctype, ctype) # SQL Server cannot use text as PK if len(table.key) == 1 and column.name in table.key: ctype += " constraint P_%s PRIMARY KEY" % table.name coldefs.append(" %s %s" % (column.name, ctype)) if len(table.key) > 1: coldefs.append(" UNIQUE (%s)" % ','.join(table.key)) sql.append(',\n'.join(coldefs) + '\n);') yield '\n'.join(sql) for index in table.indices: type_ = ('INDEX', 'UNIQUE INDEX')[index.unique] yield "CREATE %s %s_%s_idx ON %s (%s);" % (type_, table.name, '_'.join(index.columns), table.name, ','.join(index.columns))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 201, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 2, 201, 198, 2, 15069, 357, 34, 8, 2211, 36775, 46, 4339, 48663, 49907, 1279, 6759, 672, 7252, 10, 2213, ...
2.40013
1,542
#Author: Matthew Wicker # Impliments the BayesByBackprop optimizer for BayesKeras import os import math import logging import numpy as np import tensorflow as tf import tensorflow_probability as tfp from tensorflow.keras.models import * from tensorflow.keras.layers import * from tqdm import tqdm from tqdm import trange from BayesKeras.optimizers import optimizer from BayesKeras.optimizers import losses from BayesKeras import analyzers from abc import ABC, abstractmethod # A dumb mistake on my part which needs to be factored out
[ 2, 13838, 25, 9308, 370, 15799, 198, 2, 34347, 6800, 262, 4696, 274, 3886, 7282, 22930, 6436, 7509, 329, 4696, 274, 42, 263, 292, 198, 198, 11748, 28686, 198, 11748, 10688, 198, 11748, 18931, 198, 11748, 299, 32152, 355, 45941, 198, 1...
3.280488
164
#!/usr/bin/env python """Create a .cxx file that performs explicit instantiation over float/double and dimensions 1, 2, and 3. Writes the file to the current directory.""" usage = "ExplicitInstantiation.py <class_name>" import sys if len(sys.argv) < 2 or sys.argv[1] == '-h' or sys.argv[1] == '--help': print(usage) sys.exit(1) copyright_header = """/* * * Copyright 2011 by the CALATK development team * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * */ """ explicit_file = open(sys.argv[1] + '.cxx', 'w') explicit_file.write(copyright_header) content = """ #include "{0}.txx" namespace CALATK { template class {0}< float, 1 >; template class {0}< float, 2 >; template class {0}< float, 3 >; template class {0}< double, 1 >; template class {0}< double, 2 >; template class {0}< double, 3 >; } // namespace CALATK """.replace('{0}', sys.argv[1]) explicit_file.write(content) explicit_file.close()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 16447, 257, 764, 66, 5324, 2393, 326, 17706, 7952, 9113, 3920, 625, 12178, 14, 23352, 290, 198, 27740, 5736, 352, 11, 362, 11, 290, 513, 13, 220, 12257, 274, 262, 2393, 2...
2.928571
490
from concurrent.futures import ThreadPoolExecutor from functools import partial from time import time try: from gpsoauth import perform_master_login, perform_oauth except ImportError: perform_oauth = perform_master_login from .auth import Auth from .exceptions import AuthException, InvalidCredentialsException
[ 6738, 24580, 13, 69, 315, 942, 1330, 14122, 27201, 23002, 38409, 198, 6738, 1257, 310, 10141, 1330, 13027, 198, 6738, 640, 1330, 640, 198, 198, 28311, 25, 198, 220, 220, 220, 422, 27809, 568, 18439, 1330, 1620, 62, 9866, 62, 38235, 11...
3.701149
87
def evenly_parallelize(input_list): '''return evenly partitioned spark resilient distributed dataset (RDD)''' import numpy as np from pyspark.sql.session import SparkSession spark = SparkSession.builder.getOrCreate() sc = spark.sparkContext n_input = len(input_list) n_parts = sc.parallelize(input_list).getNumPartitions() partitions = np.floor(np.linspace(0, n_parts, n_input, endpoint=False)).astype(int) return sc.parallelize(zip(partitions, input_list)).partitionBy(n_parts)
[ 4299, 21894, 62, 1845, 29363, 1096, 7, 15414, 62, 4868, 2599, 198, 220, 220, 220, 705, 7061, 7783, 21894, 18398, 276, 9009, 30738, 9387, 27039, 357, 49, 16458, 8, 7061, 6, 628, 220, 220, 220, 1330, 299, 32152, 355, 45941, 198, 220, ...
2.861878
181
"""Model helper module. """ from __future__ import annotations from typing import Union import numpy as np import torch
[ 37811, 17633, 31904, 8265, 13, 198, 37811, 198, 6738, 11593, 37443, 834, 1330, 37647, 198, 6738, 19720, 1330, 4479, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 628, 628 ]
4.1
30
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the 'license' file accompanying this file. This file is # distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from __future__ import absolute_import import socket from contextlib import closing import test.utils.local_mode as localmode # From https://stackoverflow.com/a/45690594
[ 2, 15069, 13130, 6186, 13, 785, 11, 3457, 13, 393, 663, 29116, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 705, 34156, 27691, 921, 198, 2, 743, 407, 779, 428, 2393, ...
3.705
200
#!/usr/bin/python2.5 # Copyright (C) 2019 KUWAYAMA, Masayuki # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (C) 2011 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import agency import agency_jp import stop import route import route_jp import trip import office_jp import fareattribute import farerule import shape import feedinfo import translation import gtfsfactory import schedule
[ 2, 48443, 14629, 14, 8800, 14, 29412, 17, 13, 20, 198, 198, 2, 15069, 357, 34, 8, 13130, 509, 52, 27285, 25087, 11, 11066, 323, 11308, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, ...
3.715789
380
# Generated by Django 2.2.5 on 2020-09-30 09:47 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 17, 13, 20, 319, 12131, 12, 2931, 12, 1270, 7769, 25, 2857, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
# Exerccio Python 076: Crie um programa que tenha uma tupla nica com nomes de produtos e seus respectivos preos, na sequncia. # No final, mostre uma listagem de preos, organizando os dados em forma tabular. produtos = ('LPIS', 1.75, 'BORRACHA', 2, 'CADERNO', 20, 'CANETAS', 7, 'MOCHILA', 120) print('-'*40) print(f'{"PRODUTOS":^40}') print('-'*40) for c in range(0, len(produtos)): if c % 2 == 0: print(f'{produtos[c]:.<30}', end='R$') else: print(f'{produtos[c]:>7.2f}') print('-'*40)
[ 2, 1475, 263, 535, 952, 11361, 657, 4304, 25, 327, 5034, 23781, 1430, 64, 8358, 3478, 3099, 334, 2611, 12777, 489, 64, 299, 3970, 401, 299, 2586, 390, 40426, 315, 418, 304, 384, 385, 2461, 452, 418, 662, 418, 11, 12385, 33756, 19524...
1.90785
293
import unittest from unittest.mock import patch from libsimba.simba import Simba
[ 11748, 555, 715, 395, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 198, 6738, 9195, 14323, 7012, 13, 14323, 7012, 1330, 3184, 7012, 628, 628, 198 ]
2.965517
29
from golem import actions description = 'Verify wait_for_element_enabled action'
[ 6738, 467, 10671, 1330, 4028, 628, 198, 11213, 796, 705, 13414, 1958, 4043, 62, 1640, 62, 30854, 62, 25616, 2223, 6, 198 ]
3.772727
22
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- """\ ===================== Simple Cube component ===================== TODO """ import Axon import pygame from pygame.locals import * from OpenGL.GL import * from OpenGL.GLU import * from Display3D import Display3D from Util3D import * from Object3D import * if __name__=='__main__': from Kamaelia.Util.Graphline import Graphline CUBEC = SimpleCube(pos=Vector(0, 0,-12), name="Center cube").activate() CUBER = SimpleCube(pos=Vector(4,0,-22), name="Right cube").activate() CUBEB = SimpleCube(pos=Vector(0,-4,-18), name="Bottom cube").activate() ROTATOR = CubeRotator().activate() ROTATOR.link((ROTATOR, "outbox"), (CUBEC, "rel_rotation")) Axon.Scheduler.scheduler.run.runThreads()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 3050, 3517, 32250, 10501, 290, 509, 1689, 25418, 25767, 669, 7, 16, 8, 198, 2, 198, 2, 357, 16, ...
3.036364
550
from rest_framework.routers import SimpleRouter from transactions.api.views import TransactionsViewSet router_v1 = SimpleRouter(trailing_slash=False) router_v1.register(r'transactions', TransactionsViewSet, base_name='transactions')
[ 6738, 1334, 62, 30604, 13, 472, 1010, 1330, 17427, 49, 39605, 198, 198, 6738, 8945, 13, 15042, 13, 33571, 220, 1330, 46192, 7680, 7248, 198, 198, 472, 353, 62, 85, 16, 796, 17427, 49, 39605, 7, 9535, 4386, 62, 6649, 1077, 28, 25101,...
3.371429
70
#!/usr/bin/python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. DOCUMENTATION = ''' --- module: waf_domain short_description: Add/Modify/Delete WAF domain extends_documentation_fragment: opentelekomcloud.cloud.otc version_added: "0.0.3" author: "Anton Sidelnikov (@anton-sidelnikov)" description: - Add/Modify/Delete WAF domain from the OTC. options: name: description: Specifies the domain name. required: true type: str certificate: description: Specifies the certificate. type: str server: description: Specifies the origin server information. Each element contains client_protocol (HTTP or HTTPS), server_protocol (HTTP or HTTPS), address (IP address or domain name), port (from 0 to 65535) type: list elements: dict proxy: description: Specifies whether a proxy is configured. type: bool sip_header_name: description: Specifies the type of the source IP header. choices: [default, cloudflare, akamai, custom] type: str sip_header_list: description: Specifies the HTTP request header for identifying the real source IP address. type: list elements: str state: description: - Should the resource be present or absent. choices: [present, absent] default: present type: str requirements: ["openstacksdk", "otcextensions"] ''' RETURN = ''' waf_domain: description: List of dictionaries describing domains matching query. type: complex returned: On Success. contains: id: description: Specifies the instance ID. type: str hostname: description: Specifies the domain name. type: str cname: description: Specifies the CNAME value. type: str sample: "efec1196267b41c399f2980ea4048517.waf.cloud.com." policy_id: description: Specifies the policy ID. type: str protect_status: description: Specifies the WAF mode. type: int access_status: description: Specifies whether a domain name is connected to WAF. type: int protocol: description: Specifies the protocol type. type: str certificate_id: description: Specifies the certificate ID. type: str server: description: Specifies the origin server information. type: dict proxy: description: Specifies whether a proxy is configured. type: bool timestamp: description: Specifies the time when a domain name is created. type: str ''' EXAMPLES = ''' # Create Domain. - waf_domain: name: test.domain.name server: - client_protocol: https server_protocol: https address: 4.3.2.1 port: 8080 proxy: False state: present # Modify Domain. - waf_domain: name: "{{ domain_name }}" certificate: "{{ cert_name }}" # Delete Domain. - waf_domain: name: "{{ domain_id }}" state: absent ''' from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule if __name__ == '__main__': main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743, 733...
2.846774
1,240
import collections from cluster import Cluster import logging logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO) VoteResult = collections.namedtuple('VoteResult', ['term', 'vote_granted', 'id'])
[ 11748, 17268, 198, 198, 6738, 13946, 1330, 38279, 198, 11748, 18931, 198, 198, 6404, 2667, 13, 35487, 16934, 7, 18982, 11639, 4, 7, 292, 310, 524, 8, 82, 532, 4064, 7, 5715, 3672, 8, 82, 25, 4064, 7, 20500, 8, 82, 3256, 3128, 69, ...
2.877778
90
#!/usr/bin/env python # encoding: utf-8 import _load_lib import sys import logging import os from unicorn.language.app\ import main as languae_main if __name__ == '__main__': try: languae_main() except Exception as ex: logging.exception("main except") os._exit(1)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 21004, 25, 3384, 69, 12, 23, 198, 198, 11748, 4808, 2220, 62, 8019, 198, 11748, 25064, 198, 11748, 18931, 198, 11748, 28686, 198, 198, 6738, 44986, 13, 16129, 13, 1324, 59, 198, 2...
2.463415
123
import incremental_evaluation.utils as IE import incremental_evaluation.scenario_sets as SS import incremental_evaluation.visualisation_helper as VH import models.basic_predictor_interfaces import models.ensgendel_interface import incremental_evaluation.data_file_helper as DFH import os import argparse SS_MNIST012 = "mnist012" SS_MNIST197 = "mnist197" SS_MNIST_CN5 = "mnist_cn5" SS_GAUSS3 = "gauss_3" RESULTS = os.path.join("results", "incremental_evaluation_run") parser = argparse.ArgumentParser(description="EnsGenDel algorithm & Incremental evaluation framework.\n" "The continual learning algorithms are evaluated in predefined scenarios." "For example: [{0:[9,7]}, {0:[8], 1:[7]}] is a scenario of two tasks." "In the first task {0: [9, 7]} the predictor gets training instances of " "nines and sevens images labeled as 0. In the second task {0:[8], 1:[7]} " "the predictor gets training instances of eights labeled as 0 and " "sevens labeled as 1. Note that the sevens changed the label. After the " "second task the predictor should classify nines and eights as 0 and " "sevens as 1.\n" "The scenario is encoded into bracket-less notation in filenames, e.g., " "[{0:[9,7]}, {0:[8], 1:[7]}] -> T0x97T0x8a1x7 (any resemblance with " "hexadecimals is purely coincidental).") parser.add_argument('experiment_name', help="Experiment name which will be in file prefix.") parser.add_argument('scenario_name', help="Select the scenario. One of the following: " + str([ SS_MNIST012, SS_MNIST197, SS_MNIST_CN5, SS_GAUSS3]) + "The scenario name is appended after experiment_name.") parser.add_argument('modes', help="Series of numbers activating five modes of this application:" "1:scenario preview; 2:predictor training; 3:debug evaluation; " "4:generate csv table with evaluation stats; 5:generate accuracy plots" ";e.g., '24' trains the predictors and then generates csv table with results.") parser.add_argument('--trials', type=int, default=1, help="Number of independent runs. The trial number is appended " "in the postfix of the file.") parser.add_argument('--trials_from', type=int, default=0, help="Index of the first trial.") parser.add_argument('--scout_number', type=int, default=-1, help="Cropping the training set. Speeding up the training " "at the cost of less accuracy.") parser.add_argument("--debug", default=False, type=bool, help="Runs only light weight models. True/False") if __name__ == '__main__': args = parser.parse_args() # Experiment setup trial_tags = [i for i in range(args.trials_from, args.trials_from + args.trials)] experiment_name = args.experiment_name scout_subset = args.scout_number if args.scout_number > 0 else None scenario_set_name = args.scenario_name mode = list(map(int, args.modes)) # mode += [1] # show scenario data # mode += [2] # run predictor learning on scenarios # mode += [3] # evaluate predictors scenarios # mode += [4] # write accuracy statistics into table # mode += [5] # write accuracy statistics into table # list of predictor classes that implement the incremental_evaluation.interfaces.Predictor if args.debug: predictor_builders = [ models.basic_predictor_interfaces.SGD, models.basic_predictor_interfaces.Perceptron, ] else: predictor_builders = [ models.ensgendel_interface.Ensgendel, models.ensgendel_interface.Ensgen, models.ensgendel_interface.Ens, models.basic_predictor_interfaces.Perceptron, ] # scenario sets implementing the incremental_evaluation.interfaces.ScenarioSet if scenario_set_name == SS_MNIST012: scenario_set = SS.MnistMinimalScenarios(digits_tripplet=(0, 1, 2), debug_set=False, scout_subset=scout_subset) visualiser = VH.mnist_visualiser elif scenario_set_name == SS_MNIST197: scenario_set = SS.MnistMinimalScenarios(digits_tripplet=(1, 9, 7), debug_set=False, scout_subset=scout_subset) visualiser = VH.mnist_visualiser elif scenario_set_name == SS_MNIST_CN5: scenario_set = SS.MnistConvergentFiveScenarios(scout_subset=scout_subset) visualiser = VH.mnist_visualiser elif scenario_set_name == SS_GAUSS3: scenario_set = SS.Gauss3DMinimalScenarios(train_size=scout_subset) visualiser = VH.gauss3d_visualiser else: raise NotImplementedError(scenario_set_name) # setting up basic directories if not os.path.exists("results"): os.mkdir("results") if not os.path.exists(RESULTS): os.mkdir(RESULTS) # Pre-flight check of the scenario if 1 in mode: scenarios = scenario_set.get_scenarios() train_sam, train_sub = scenario_set.get_training_set() test_sam, test_sub = scenario_set.get_test_set() for scenario in scenarios: folder_name = "preview_{}".format(VH.scenario_into_filename(str(scenario))) folder_path = os.path.join(RESULTS, folder_name) if not os.path.exists(folder_path): os.mkdir(folder_path) VH.show_scenario(scenario, test_sam, test_sub, visualiser, save_into=folder_path) # Cycle of experiment runs for trial_tag in trial_tags: experiment_path = datafile_path(experiment_name, scenario_set_name, trial_tag) if not os.path.exists(experiment_path): os.mkdir(experiment_path) if 2 in mode: DFH.run_and_save(predictor_builders, scenario_set, experiment_path) if 3 in mode: evals = DFH.datafile_evaluation(experiment_path, { DFH.TOTAL_ACCURACY: IE.evaluate_task_total_accuracy, DFH.LOCAL_ACCURACY: IE.evaluate_task_accuracy, DFH.SUBCLASS_ACCURACY: IE.evaluate_subclass_accuracy, }) print(evals) # Stats evaluation files = [datafile_path(experiment_name, scenario_set_name, trial_tag) for trial_tag in trial_tags] portfolio = dict([(str(clazz), files) for clazz in predictor_builders]) if 4 in mode: eval_stats_total = DFH.extract_stats_for_portfolio(portfolio, over_testing_set=True, task_accuracy_type=DFH.TOTAL_ACCURACY) table = VH.stats_into_text_table(eval_stats_total, stat_cell_format, cell_join=';', row_join='\n') print(table) table_path = os.path.join(RESULTS, "{}_{}_total_accuracy.csv".format(experiment_name, scenario_set_name)) with open(table_path, "w") as fil: fil.write(table) print("Saved stats of total accuracy into {}".format(table_path)) if 5 in mode: figure_styles = [ [("color", "r"), ("marker", "o")], [("color", "g"), ("marker", "^")], [("color", "b"), ("marker", "x")], [("color", "c"), ("marker", "s")], [("color", "m"), ("marker", "d")], [("color", "y"), ("marker", "+")], [("color", "k"), ("marker", "*")], ] classifier_style = dict( [(str(clazz), dict([("label", clazz.__name__)] + figure_styles[i % len(figure_styles)])) for i, clazz in enumerate(predictor_builders)] ) eval_stats_total = DFH.extract_stats_for_portfolio(portfolio, over_testing_set=True, task_accuracy_type=DFH.TOTAL_ACCURACY) scenarios = list(eval_stats_total[list(eval_stats_total.keys())[0]].keys()) print(scenarios) for i, scenario in enumerate(scenarios): # picking subclass for tracking scenario_obj = eval(scenario) tracked_label = list(scenario_obj[0].keys())[0] tracked_subclass = scenario_obj[0][tracked_label][-1] # tracking the selected subclass label assignment eval_stats_tracked = DFH.extract_stats_for_portfolio( portfolio, over_testing_set=True, task_accuracy_type=None, evaluator=tracked_evaluation) # titles and names _scenario_str = scenario if type(scenario) is bytes: _scenario_str = scenario.decode('ASCII') # sometimes hdf5 returns bytes instead of strings test_task = str(IE.get_perfect_task_map(scenario_obj, len(scenario_obj) - 1)) tracked_task = "{{{}: [{}]}}".format(tracked_label, tracked_subclass) title = "Scenario: {}\ntest task {}(full), tracked assignment {}(dashed)".format( _scenario_str, test_task, tracked_task) # visualisaiton fig_path = os.path.join(RESULTS, "{}_{}_{}_accuracy.pdf".format(experiment_name, scenario_set_name, VH.scenario_into_filename(_scenario_str))) VH.show_metric_evol(eval_stats_total, scenario, classifier_style, fig_path=fig_path, tracked_eval_stats=eval_stats_tracked, title=title) print("fig of scenario {} saved into {}".format(scenario, fig_path))
[ 11748, 29497, 62, 18206, 2288, 13, 26791, 355, 28157, 198, 11748, 29497, 62, 18206, 2288, 13, 1416, 39055, 62, 28709, 355, 6723, 198, 11748, 29497, 62, 18206, 2288, 13, 41464, 5612, 62, 2978, 525, 355, 569, 39, 198, 11748, 4981, 13, 3...
2.174945
4,510
import os import re from bs4 import BeautifulSoup from django.core.exceptions import ObjectDoesNotExist from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator from django.db import models from django.db.models import Case, Count, Q, Value, When from django.utils.encoding import python_2_unicode_compatible from django.utils.html import mark_safe from modelcluster.fields import ParentalKey from modelcluster.tags import ClusterTaggableManager from taggit.models import Tag, TaggedItemBase from core import panels from core.forms import SubmitFormBuilder from core.utilities import has_recaptcha, validate_only_one_instance from wagtail.wagtailcore.fields import RichTextField from wagtail.wagtailcore.models import Page from wagtail.wagtailforms.models import AbstractEmailForm, AbstractFormField from wagtail.wagtailsearch import index from wagtailcaptcha.models import WagtailCaptchaEmailForm # Main core Page model. All main content pages inherit from this class. def get_context(self, request, *args, **kwargs): # Get pages pages = self.children() # Pagination page = request.GET.get('page') paginator = Paginator(pages, 12) # Show 12 pages per page try: pages = paginator.page(page) except PageNotAnInteger: pages = paginator.page(1) except EmptyPage: pages = paginator.page(paginator.num_pages) # Update template context context = super(WagtailCompanyPage, self).get_context(request, *args, **kwargs) context['pages'] = pages return context content_panels = panels.WAGTAIL_COMPANY_PAGE_CONTENT_PANELS settings_panels = panels.WAGTAIL_COMPANY_PAGE_SETTINGS_PANELS
[ 11748, 28686, 198, 11748, 302, 198, 198, 6738, 275, 82, 19, 1330, 23762, 50, 10486, 198, 6738, 42625, 14208, 13, 7295, 13, 1069, 11755, 1330, 9515, 13921, 3673, 3109, 396, 198, 6738, 42625, 14208, 13, 7295, 13, 79, 363, 20900, 1330, 3...
2.86252
611
#!/usr/bin/python from calvin.utilities import certificate import os print "Trying to create a new domain configuration." testconfig = certificate.Config(domain="test") # testconfig2 = certificate.Config(domain="evil") print "Reading configuration successfull." print "Creating new domain." certificate.new_domain(testconfig) # certificate.new_domain(testconfig2) print "Created new domain." for i in range(1, 5): for j in range(0, 6): name = "node{}:{}".format(i, j) certreq = certificate.new_runtime(testconfig, name) certificate.sign_req(testconfig, os.path.basename(certreq), name) certreq = certificate.new_runtime(testconfig, "evil") certificate.sign_req(testconfig, os.path.basename(certreq), "evil") # certreq = certificate.new_runtime(testconfig, "evil2") # certificate.sign_req(testconfig2, os.path.basename(certreq), "evil2")
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 6738, 2386, 7114, 13, 315, 2410, 1330, 10703, 198, 11748, 28686, 198, 4798, 366, 51, 14992, 284, 2251, 257, 649, 7386, 8398, 526, 198, 9288, 11250, 796, 10703, 13, 16934, 7, 27830, 2625,...
3.02439
287
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-10-23 05:30 from __future__ import unicode_literals import django.core.validators from django.db import migrations, models import django.db.models.deletion
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 13, 18, 319, 2177, 12, 940, 12, 1954, 8870, 25, 1270, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, ...
2.807692
78
# _*_ coding:utf-8 _*_ # arr=["aaa",True,100,"ccc"] print arr print arr[::-1]
[ 2, 4808, 9, 62, 19617, 25, 40477, 12, 23, 4808, 9, 62, 198, 2, 198, 198, 3258, 28, 14692, 46071, 1600, 17821, 11, 3064, 553, 535, 66, 8973, 198, 198, 4798, 5240, 198, 198, 4798, 5240, 58, 3712, 12, 16, 60 ]
1.95122
41
''' EASY 441. Arranging Coins You have a total of n coins that you want to form in a staircase shape, where every k-th row must have exactly k coins. '''
[ 7061, 6, 198, 36, 26483, 604, 3901, 13, 943, 32319, 30108, 198, 198, 1639, 423, 257, 2472, 286, 299, 10796, 326, 345, 765, 284, 1296, 287, 220, 198, 64, 27656, 5485, 11, 810, 790, 479, 12, 400, 5752, 1276, 423, 3446, 479, 10796, 1...
3.297872
47
# Written by Minh Nguyen and CBIG under MIT license: # https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md import unittest import torch import cbig.Nguyen2020.rnn as rnn
[ 2, 22503, 416, 1855, 71, 42379, 290, 47970, 38, 739, 17168, 5964, 25, 198, 2, 3740, 1378, 12567, 13, 785, 14, 22405, 35543, 78, 17822, 14, 34, 3483, 38, 14, 2436, 672, 14, 9866, 14, 43, 2149, 24290, 13, 9132, 198, 11748, 555, 715,...
2.84127
63
from django import forms from django.forms import ModelForm from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import User from django.core.validators import EmailValidator from . import models from .models import ProfileModel from io import BytesIO from PIL import Image, ExifTags from django.core.files import File # class PostForm(ModelForm): # class meta: # model = models.PostModel # fields = ('image', 'caption', 'location') def must_be_unique_email(value): user = User.objects.filter(email=value) if len(user) > 0: raise forms.ValidationError("Email Already Exists") return value def must_be_unique_username(value): user = User.objects.filter(username=value) if len(user) > 0: raise forms.ValidationError("Username Already Exists") return value # class ProfileForm(forms.Form): # profilePicture = forms.ImageField(label="Profile Picture", required=False) # bio = forms.CharField(label="Bio", max_length=512, required=False) # def save(self, request): # profileInstance = models.PostModel() # postInstance.user = request.user # profileInstance.profilePicture = self.cleaned_data["profilePicture"] # profileInstance.bio = self.cleaned_data["bio"] # profileInstance.save() # return profileInstance
[ 6738, 42625, 14208, 1330, 5107, 198, 6738, 42625, 14208, 13, 23914, 1330, 9104, 8479, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 23914, 1330, 11787, 12443, 341, 8479, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27...
2.906926
462
from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY from up.tasks.cls.models.heads import BaseClsHead, ConvNeXtHead __all__ = ['SparseBaseClsHead', 'SparseConvNeXtHead']
[ 6738, 510, 13, 26791, 13, 24622, 13, 2301, 4592, 62, 69, 9548, 1330, 33893, 62, 57, 6684, 62, 31553, 1797, 40405, 198, 6738, 510, 13, 83, 6791, 13, 565, 82, 13, 27530, 13, 16600, 1330, 7308, 2601, 82, 13847, 11, 34872, 8199, 55, 8...
2.633803
71
# Copyright (c) 2019. Partners HealthCare and other members of # Forome Association # # Developed by Sergey Trifonov based on contributions by Joel Krier, # Michael Bouzinier, Shamil Sunyaev and other members of Division of # Genetics, Brigham and Women's Hospital # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from threading import Condition from datetime import datetime from forome_tools.job_pool import ExecutionTask from forome_tools.log_err import logException from app.config.a_config import AnfisaConfig #===============================================
[ 2, 220, 15069, 357, 66, 8, 13130, 13, 14205, 3893, 17784, 290, 584, 1866, 286, 198, 2, 220, 1114, 462, 5396, 198, 2, 198, 2, 220, 6013, 276, 416, 36106, 833, 361, 261, 709, 1912, 319, 9284, 416, 18623, 509, 5277, 11, 198, 2, 220...
3.776224
286
import pyttsx3 import speech_recognition as sr import os import subprocess #from requests import request , session #from pprint import pprint as pp import json import requests import datetime from datetime import date import time import calendar import warnings import random import wikipedia import webbrowser from pywhatkit import sendwhatmsg_instantly import smtplib import sys import pyjokes import pyautogui import PyPDF2 from tkinter.filedialog import * import psutil import speedtest import wolframalpha warnings.filterwarnings("ignore") #ignoring all the warnings if sys.platform == "win32": engine=pyttsx3.init('sapi5') voices=engine.getProperty('voices') engine.setProperty('voice',voices[1].id) else: engine=pyttsx3.init('nsss') #sapi5 - SAPI5 on Windows #nsss - NSSpeechSynthesizer on Mac OS X #espeak - eSpeak on every other platform voices=engine.getProperty('voices') #for i in range(48): #print(voices[i].id) engine.setProperty('voice',voices[10].id)#10b 17 26 28 37 39 if __name__=="__main__": TaskExecution()
[ 11748, 12972, 83, 912, 87, 18, 201, 198, 11748, 4046, 62, 26243, 653, 355, 19677, 201, 198, 11748, 28686, 201, 198, 11748, 850, 14681, 201, 198, 2, 6738, 7007, 1330, 2581, 837, 6246, 201, 198, 2, 6738, 279, 4798, 1330, 279, 4798, 35...
2.615561
437
import random # najprej konstante STEVILO_DOVOLJENIH_NAPAK = 10 PRAVILNA_CRKA = "+" PONOVLJENA_CRKA = "o" NAPACNA_CRKA = "-" ZMAGA = "W" PORAZ = "X" bazen_besed = [] with open("besede.txt", encoding ="utf8") as input_file: bazen_besed = input_file.readlines()
[ 11748, 4738, 198, 198, 2, 299, 1228, 3866, 73, 479, 261, 301, 12427, 198, 2257, 20114, 4146, 46, 62, 35, 8874, 3535, 41, 1677, 40, 39, 62, 45, 2969, 10206, 796, 838, 198, 198, 47, 3861, 53, 4146, 4535, 62, 9419, 25123, 796, 43825,...
1.964029
139