content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
#!/usr/bin/env python # -*- coding: utf-8 -*- # Python version: 3.6 import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import copy import numpy as np from torchvision import datasets, transforms import torch import os import torch.distributed as dist from utils.sampling import mnist_iid, mnist_noniid, cifar_iid from utils.options import args_parser from models.Update import LocalUpdate from models.Update import LocalUpdateF from models.Nets import MLP, CNNMnist, CNNCifar from models.Fed import FedAvg from models.test import test_img from torch.multiprocessing import Process from deep_gradient_compression import DGC import json # __name__main_fed.py__main__ # .pyimportmain_fed.pymain_fed.py__name__,main_fed.py__name__main_fed.py if __name__ == '__main__': # parse args args = args_parser() args.device = torch.device('cuda:{}'.format(args.gpu)) torch.manual_seed(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False rank = 0 device_id = rank os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = '29500' dist.init_process_group(backend='gloo', rank=rank, world_size=args.world_size) # if torch.cuda.is_available() and args.gpu != -1 else 'cpu' # load dataset and split users if args.dataset == 'mnist': # ToTensor():0,1Normalizedate-0.1307/0.3081,-1 1 trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) if trans_mnist is not None: print(1) print(trans_mnist) # 6000010000 dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist) dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist) # sample users # Noniid if args.iid: dict_users = mnist_iid(dataset_train, args.num_users) else: dict_users = mnist_noniid(dataset_train, args.num_users) elif args.dataset == 'cifar': trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar) dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar) if args.iid: dict_users = cifar_iid(dataset_train, args.num_users) else: exit('Error: only consider IID setting in CIFAR10') else: exit('Error: unrecognized dataset') img_size = dataset_train[0][0].shape # print('df ',img_size) [1,28,28] # build model # print(args.model) if args.model == 'cnn' and args.dataset == 'cifar': net_glob = CNNCifar(args=args).to(args.device) elif args.model == 'cnn' and args.dataset == 'mnist': net_glob = CNNMnist(args=args).to(args.device) elif args.model == 'mlp': len_in = 1 for x in img_size: # print('x',x) len_in *= x net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) # add control_global = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) else: exit('Error: unrecognized model') # net_glob.train() print(net_glob) control_weights =control_global.state_dict() # copy weights # w_glob = net_glob.state_dict() c_glob = copy.deepcopy(net_glob.state_dict()) # print(w_glob) # training loss_train = [] accuracy = [] cv_loss, cv_acc = [], [] val_loss_pre, counter = 0, 0 net_best = None best_loss = None val_acc_list, net_list = [], [] count = 0, 0 test_acc_list = [] if args.all_clients: print("Aggregation over all clients") w_locals = [w_glob for i in range(args.num_users)] # add else: # c_local = [MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) for i in range(args.num_users)] for net in c_local: net.load_state_dict(control_weights) delta_c = copy.deepcopy(net_glob.state_dict()) # delta_x = copy.deepcopy(net_glob.state_dict()) # with open("test.txt", "w") as f: # for i in range(0, len(c_local)): # for k,v in c_local[i].state_dict().items(): # f.write(f"{k},{v}\n".format(k,v)) # with open("test.txt", "a") as f: # for i in range(0, len(c_local)): # for k, v in w_locals[i].items(): # f.write(f"{k},{v}\n".format(k, v)) # add # print("why?") for iter in range(args.epochs): # for i in delta_c: delta_c[i] = 0.0 # for i in delta_x: # delta_x[i] = 0.0 loss_locals = [] if not args.all_clients: w_locals = [] m = max(int(args.frac * args.num_users), 1) # idxs_users = np.random.choice(range(args.num_users), m, replace=False) for idx in idxs_users: # momentumSGD local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx]) w, loss, local_delta_c, local_delta, control_local_w= local.train(net=copy.deepcopy(net_glob).to(args.device), control_local = c_local[idx], control_global=control_global, rank=rank, device_id=device_id, size=args.world_size) # add if iter != 0: c_local[idx].load_state_dict(control_local_w) if args.all_clients: w_locals[idx] = copy.deepcopy(w) else: w_locals.append(copy.deepcopy(w)) # add loss_locals.append(copy.deepcopy(loss)) # add for i in delta_c: if iter != 0: delta_c[i] += w[i] else: delta_c[i] += local_delta_c[i] # delta_x[i] += local_delta[i] # add # update the delta C for i in delta_c: delta_c[i] /= m # delta_x[i] /= m # update global weights w_glob = FedAvg(w_locals) # add cw # w_glob = net_glob.state_dict() control_global_w = control_global.state_dict() for i in control_global_w: if iter !=0: # w_glob[i] = delta_x[i] # else: # w_glob[i] += delta_x[i] control_global_w[i] += (m / args.num_users) * delta_c[i] # copy weight to net_glob net_glob.load_state_dict(w_glob) # add control_global.load_state_dict(control_global_w) # print loss loss_avg = sum(loss_locals) / len(loss_locals) print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg)) loss_train.append(loss_avg) # acc_train, loss_train = test_img(net_glob, dataset_train, args) acc_test, loss_test = test_img(net_glob, dataset_test, args) accuracy.append(acc_test) # add for c in range(args.num_users): local_model = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx]) torch.cuda.empty_cache() # net_glob.eval() # print("Training accuracy: {:.2f}".format(acc_train)) # print("Testing accuracy: {:.2f}".format(acc_test)) ####################################################################################################################### ####################################################################################################################### ####################################################################################################################### ####################################################################################################################### # Fedavg # build model if args.model == 'cnn' and args.dataset == 'cifar': net_globF = CNNCifar(args=args).to(args.device) elif args.model == 'cnn' and args.dataset == 'mnist': net_globF = CNNMnist(args=args).to(args.device) elif args.model == 'mlp': len_in = 1 for x in img_size: len_in *= x net_globF = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) else: exit('Error: unrecognized model') print(net_globF) net_globF.train() # copy weights w_globF = net_globF.state_dict() # training loss_trainF = [] accuracyF = [] cv_loss, cv_acc = [], [] val_loss_pre, counter = 0, 0 net_best = None best_loss = None val_acc_list, net_list = [], [] if args.all_clients: print("Aggregation over all clients") w_localsF = [w_globF for i in range(args.num_users)] for iter in range(args.epochs): loss_locals = [] if not args.all_clients: w_localsF = [] m = max(int(args.frac * args.num_users), 1) idxs_users = np.random.choice(range(args.num_users), m, replace=False) for idx in idxs_users: localF = LocalUpdateF(args=args, dataset=dataset_train, idxs=dict_users[idx]) w, loss = localF.train(net=copy.deepcopy(net_globF).to(args.device)) if args.all_clients: w_localsF[idx] = copy.deepcopy(w) else: w_localsF.append(copy.deepcopy(w)) loss_locals.append(copy.deepcopy(loss)) # update global weights w_globF = FedAvg(w_localsF) # copy weight to net_globF net_globF.load_state_dict(w_globF) # print loss loss_avgF = sum(loss_locals) / len(loss_locals) print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avgF)) loss_trainF.append(loss_avgF) acc_test, loss_test = test_img(net_globF, dataset_test, args) accuracyF.append(acc_test) # plot loss curve plt.figure() print(loss_train, loss_trainF) plt.plot(range(len(loss_train)), loss_train, label='Scaffold', zorder=2) plt.plot(range(len(loss_trainF)), loss_trainF, 'r', label='FedAvg',zorder=1) plt.ylabel('train_loss') plt.xlabel('epochs') plt.legend(loc='best') plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'train_loss', args.iid)) # testing net_glob.eval() acc_train, loss_train = test_img(net_glob, dataset_train, args) acc_test, loss_test = test_img(net_glob, dataset_test, args) print("Training accuracy: {:.2f}".format(acc_train)) print("Testing accuracy: {:.2f}".format(acc_test)) # plot loss curve plt.figure() # plt.plot((np.arange(1, len(accuracy)), 1), accuracy, 'r') plt.plot(range(len(accuracy)), accuracy, label='Scaffold', zorder=2) plt.plot(range(len(accuracyF)), accuracyF, 'r', label='FedAvg', zorder=1) plt.ylabel('test_acc') plt.xlabel('epochs') plt.legend(loc='best') plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'acc_test', args.iid))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 11361, 2196, 25, 513, 13, 21, 198, 198, 11748, 2603, 29487, 8019, 198, 6759, 29487, 8019, 13, 1904, 10786, 46384, ...
2.125756
5,288
from aws_cdk.aws_lambda import Function, Code, Runtime from aws_cdk.core import Stack, Duration from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack from b_cfn_lambda_layer.package_version import PackageVersion from b_lambda_layer_common.layer import Layer from b_lambda_layer_common_test.unit import root
[ 6738, 3253, 82, 62, 10210, 74, 13, 8356, 62, 50033, 1330, 15553, 11, 6127, 11, 43160, 198, 6738, 3253, 82, 62, 10210, 74, 13, 7295, 1330, 23881, 11, 22920, 198, 6738, 275, 62, 8356, 62, 33407, 62, 30604, 13, 31391, 13, 10210, 74, ...
3.393939
99
""" This script creates a test that fails when metarl.tf.baselines failed to initialize. """ import tensorflow as tf from metarl.envs import MetaRLEnv from metarl.tf.baselines import ContinuousMLPBaseline from metarl.tf.baselines import GaussianMLPBaseline from tests.fixtures import TfGraphTestCase from tests.fixtures.envs.dummy import DummyBoxEnv
[ 37811, 198, 1212, 4226, 8075, 257, 1332, 326, 10143, 618, 198, 4164, 7063, 13, 27110, 13, 12093, 20655, 4054, 284, 41216, 13, 198, 37811, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 198, 6738, 1138, 7063, 13, 268, 14259, 1330, 30277...
3.320755
106
import jinja2 import json from send_email import send_email from app.models import User, MyResourcesAWS, db from app.es.awsdetailedlineitem import AWSDetailedLineitem from sqlalchemy import desc import subprocess import datetime from flask import render_template
[ 11748, 474, 259, 6592, 17, 198, 11748, 33918, 198, 6738, 3758, 62, 12888, 1330, 3758, 62, 12888, 198, 6738, 598, 13, 27530, 1330, 11787, 11, 2011, 33236, 12298, 50, 11, 20613, 198, 6738, 598, 13, 274, 13, 8356, 15255, 6255, 1370, 9186...
3.666667
72
# Ryan Turner (turnerry@iro.umontreal.ca) from __future__ import division, print_function from builtins import range import numpy as np import scipy.stats as ss import mlpaper.constants as cc import mlpaper.mlpaper as bt import mlpaper.perf_curves as pc from mlpaper.classification import DEFAULT_NGRID, curve_boot from mlpaper.test_constants import FPR from mlpaper.util import area, interp1d _FPR = FPR / 3.0 # Divide by number of test funcs def test_boot_EB_and_test(runs=100): """Arguably this should do out to its own file since it tests bt core.""" mu = np.random.randn() stdev = np.abs(np.random.randn()) N = 201 confidence = 0.95 fail = [0] * 3 for ii in range(runs): x = mu + stdev * np.random.randn(N) fail_CI, fail_CI2, fail_P = run_trial(x, mu) fail[0] += fail_CI fail[1] += fail_CI2 fail[2] += fail_P expect_p_fail = 1.0 - confidence print("boot mean and test") fail_check_stat(fail, runs, expect_p_fail, _FPR) if __name__ == "__main__": np.random.seed(56467) test_boot() test_boot_mean() test_boot_EB_and_test() print("passed")
[ 2, 6047, 15406, 357, 15344, 6996, 31, 7058, 13, 388, 756, 5305, 13, 6888, 8, 198, 6738, 11593, 37443, 834, 1330, 7297, 11, 3601, 62, 8818, 198, 198, 6738, 3170, 1040, 1330, 2837, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, ...
2.427673
477
from typing import Any, Dict import numpy as np import pandas as pd import core.artificial_signal_generators as sig_gen import core.statistics as stats import core.timeseries_study as tss import helpers.unit_test as hut
[ 6738, 19720, 1330, 4377, 11, 360, 713, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 11748, 4755, 13, 433, 9542, 62, 12683, 282, 62, 8612, 2024, 355, 43237, 62, 5235, 198, 11748, 4755, 13, ...
3.308824
68
#! -*- coding:utf-8 -*- import os import sys import cv2 import numpy as np
[ 2, 0, 532, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 269, 85, 17, 198, 11748, 299, 32152, 355, 45941, 198 ]
2.375
32
# Generated by Django 3.2.7 on 2021-09-23 20:01 import cloudinary.models from django.db import migrations
[ 2, 2980, 515, 416, 37770, 513, 13, 17, 13, 22, 319, 33448, 12, 2931, 12, 1954, 1160, 25, 486, 198, 198, 11748, 6279, 3219, 13, 27530, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
3
36
# VAR example from statsmodels.tsa.vector_ar.var_model import VAR from random import random # contrived dataset with dependency data = list() for i in range(100): v1 = i + random() v2 = v1 + random() row = [v1, v2] data.append(row) # fit model model = VAR(data) model_fit = model.fit() # make prediction yhat = model_fit.forecast(model_fit.y, steps=1) print(yhat)
[ 2, 569, 1503, 1672, 198, 6738, 9756, 27530, 13, 912, 64, 13, 31364, 62, 283, 13, 7785, 62, 19849, 1330, 569, 1503, 198, 6738, 4738, 1330, 4738, 198, 2, 542, 36207, 27039, 351, 20203, 198, 7890, 796, 1351, 3419, 198, 1640, 1312, 287,...
2.60274
146
import json import urllib.request import MySQLdb db = MySQLdb.connect(host="localhost", # your host, usually localhost user="root", # your username passwd="", # your password db="election") cur = db.cursor() # user_agent for sending headers with the request user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7' # header headers={'User-Agent':user_agent,} district = input("Enter the Name of the district: ") url = "http://election.ujyaaloonline.com/api/candidates?district=" + district request = urllib.request.Request(url, None, headers) response = urllib.request.urlopen(request) source = response.read() # print(source) data = json.loads(source) #print(data['candidates']['2']['400'][0]['cName']) election_area = data['election_areas'] # get all the possible election-areas from the district # data needed for the database ''' resultno :> autoincrement constituencyname :> stateno :> Remove the column? districtno :> candidate :> gender :> Remove the column??? votes :> set to zero for now ''' i = 0 j = 0 for key, value in election_area.items(): area_key = key district_name = data['district_slug'] try: for item in data["candidates"]['1'][area_key]: print(item['aName']) print(item["cName"]) i = i + 1 except: for item in data["candidates"]['2'][area_key]: constituencyname = item['aName'].encode('utf-8') candidatename = item["cName"].encode('utf-8') sql = "INSERT INTO `test` (`id`, `candidatename`, `constituencyname`) VALUES (NULL, %s, %s)" cur.execute(sql, (candidatename, constituencyname)) db.commit() print('INSERTED ' + item["cName"] + " into the database") j = j + 1 print(data['district_slug'] + " has " + str(i) + " candidates in provincial election") print(data['district_slug'] + " has " + str(j) + " candidates in federal election") print("Total: " + str(i + j) + " candidates added to the database")
[ 11748, 33918, 198, 11748, 2956, 297, 571, 13, 25927, 198, 11748, 33476, 9945, 198, 9945, 796, 33476, 9945, 13, 8443, 7, 4774, 2625, 36750, 1600, 220, 220, 220, 1303, 534, 2583, 11, 3221, 1957, 4774, 198, 220, 220, 220, 220, 220, 220, ...
2.399103
892
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jun 9 23:28:21 2017 @author: samriddhi """ import re import sangita.hindi.tokenizer as tok import sangita.hindi.corpora.lemmata as lt if __name__ == '__main__': input_str = ' - - - . ' print(lookupLemmatizer(input_str)) print(numericLemmatizer(input_str)) print(defaultLemmatizer(input_str)) print(Lemmatizer(input_str))
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 19480, 7653, 220, 860, 2242, 25, 2078, 25, 2481, 2177, 198, 198, 31, 9800, 25, 6072, 81, ...
2.125
208
# -*- coding: utf-8 -*- # Copyright 2021 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for stet_util.py.""" from __future__ import absolute_import from __future__ import print_function from __future__ import division from __future__ import unicode_literals import os import shutil from gslib import storage_url from gslib.tests import testcase from gslib.tests import util from gslib.tests.util import unittest from gslib.utils import execution_util from gslib.utils import stet_util import mock
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 33448, 3012, 3457, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, ...
3.547619
294
from markdown import markdown from unittest import TestCase from markdown_editing.extension import EditingExtension
[ 6738, 1317, 2902, 1330, 1317, 2902, 198, 6738, 555, 715, 395, 1330, 6208, 20448, 198, 198, 6738, 1317, 2902, 62, 276, 1780, 13, 2302, 3004, 1330, 39883, 11627, 3004, 628, 198 ]
3.83871
31
""" SIREN/DIANA basic functionality testing framework Requires env vars: - GMAIL_USER - GMAIL_APP_PASSWORD - GMAIL_BASE_NAME -- ie, abc -> abc+hobitduke@gmail.com These env vars are set to default: - ORTHANC_PASSWORD - SPLUNK_PASSWORD - SPLUNK_HEC_TOKEN TODO: Move stuff to archive after collected TODO: Write data into daily folder or something from mi-share ingress TODO: Suppress dicom-simplify missing (series) creation time """ import time import logging import shutil import io import tempfile from pathlib import Path from pprint import pformat from contextlib import redirect_stdout from multiprocessing import Process from datetime import datetime, timedelta from interruptingcow import timeout from crud.manager import EndpointManager from crud.abc import Watcher, Trigger from crud.endpoints import Splunk from wuphf.endpoints import SmtpMessenger from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir from diana.dixel import Dixel, ShamDixel from diana.utils.dicom import DicomLevel as DLv, DicomEventType as DEv from wuphf.cli.string_descs import * from diana.utils import unpack_data from crud.utils import deserialize_dict from diana.utils.gateways import suppress_urllib_debug from diana.utils.endpoint.watcher import suppress_watcher_debug from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, \ handle_file_arrived, start_watcher, tagged_studies from trial_dispatcher import TrialDispatcher as Dispatcher LOCAL_SERVICES = False # Set False to use UMich services USE_GMAIL = True # Set False to use UMich smtp DO_DIR_UPLOAD = False CHECK_SPLUNK = False # Set False to skip long wait for dixel to index CHECK_WATCH_STUDIES= False # Set False to skip long wait for orthanc watcher EMAIL_DRYRUN = False # Set False to send live emails # CONFIG _services = "@services.yaml" _subscriptions = "@subscriptions.yaml" os.environ["SPLUNK_INDEX"] = "testing" SMTP_MESSENGER_NAME = "smtp_server" if LOCAL_SERVICES: # Set everythin back to default os.environ["UMICH_HOST"] = "localhost" # For testing del os.environ["ORTHANC_USER"] del os.environ["ORTHANC_PASSWORD"] del os.environ["SPLUNK_USER"] del os.environ["SPLUNK_PASSWORD"] if USE_GMAIL: SMTP_MESSENGER_NAME = "gmail:" test_email_addr1 = "derek.merck@ufl.edu" #test_email_addr1 = "ejacob@med.umich.edu" #test_email_addr1 = os.environ.get("TEST_EMAIL_ADDR1") # os.environ["TEST_GMAIL_BASE"] = test_email_addr1.split("@")[0] anon_salt = "Test+Test+Test" fkey = b'o-KzB3u1a_Vlb8Ji1CdyfTFpZ2FvdsPK4yQCRzFCcss=' msg_t = """to: {{ recipient.email }}\nfrom: {{ from_addr }}\nsubject: Test Message\n\nThis is the message text: "{{ item.msg_text }}"\n""" notify_msg_t = "@./notify.txt.j2" # TESTING CONfIG test_sample_zip = os.path.abspath("../../tests/resources/dcm_zip/test.zip") test_sample_file = os.path.abspath("../../tests/resources/dcm/IM2263") test_sample_dir = os.path.expanduser("~/data/test") # Need to dl separately # TESTS if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) suppress_urllib_debug() suppress_watcher_debug() # Create service endpoints services = EndpointManager(serialized_ep_descs=_services) print(pformat(services.ep_descs)) orth: ObservableOrthanc = services.get("hobit") orth.polling_interval = 2.0 messenger: SmtpMessenger = services.get(SMTP_MESSENGER_NAME) messenger.msg_t = msg_t splunk: Splunk = services.get("splunk") dcm_dir = DcmDir(path=test_sample_dir) # Load a dixel dixel = dcm_dir.get("HOBIT1172/IM0", file=True) # assert( dixel ) # assert( dixel.file ) # # # Verify that all endpoints are online # assert( orth.check() ) # assert( messenger.check() ) # assert( splunk.check() ) # # # Verify basic capabilities: # # - upload # # - anonymize # # - index # # - message # # - distribute # # assert( test_upload_one(orth, dixel) ) # assert( test_anonymize_one(orth, dixel) ) # assert( test_index_one(splunk, dixel) ) assert( test_email_messenger(messenger) ) # assert( test_distribute(_subscriptions, messenger) ) exit() # Verify observer daemons: # - watch dir # - watch orth assert( test_watch_dir(test_sample_file) ) assert( test_watch_orthanc(dixel, orth) ) # Verify handlers: # - directory # - zip # - file # - notify if DO_DIR_UPLOAD: assert( test_upload_dir_handler(dcm_dir, orth) ) assert( test_upload_zip_handler(test_sample_zip, orth) ) assert( test_file_arrived_handler(test_sample_file, test_sample_zip, orth) ) assert( test_notify_handler(dixel, orth, _subscriptions, messenger, splunk) ) # Verify watcher pipeline # - run watcher assert( test_siren_receiver(test_sample_file, orth, _subscriptions, messenger, splunk) )
[ 37811, 198, 50, 4663, 1677, 14, 35, 16868, 32, 4096, 11244, 4856, 9355, 198, 198, 39618, 17365, 410, 945, 25, 198, 12, 402, 5673, 4146, 62, 29904, 198, 12, 402, 5673, 4146, 62, 24805, 62, 47924, 54, 12532, 198, 12, 402, 5673, 4146, ...
2.537169
1,964
# # Copyright (c) 2019 James E. King III # # Use, modification, and distribution are subject to the # Boost Software License, Version 1.0. (See accompanying file # LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt) # import json import networkx import re from pathlib import Path if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Generate PlantUML dependency tree.') parser.add_argument('root', type=str, help='Boost root directory.') parser.add_argument('out', type=str, help='Output filename.') require_one = parser.add_mutually_exclusive_group(required=True) require_one.add_argument('--cycles', action='store_true', help='Show direct repository dependency cycles.') require_one.add_argument('--from', help='Show dependencies from a given repository.') args = parser.parse_args() root = Path(args.root) assert root.is_dir(), "root is not a directory" out = Path(args.out) tree = BoostDependencyTree(root, out) tree.load() if args.cycles: tree.report_cycles() else: tree.report_dependencies_from(args.__dict__["from"])
[ 2, 198, 2, 15069, 357, 66, 8, 13130, 3700, 412, 13, 2677, 6711, 198, 2, 198, 2, 5765, 11, 17613, 11, 290, 6082, 389, 2426, 284, 262, 198, 2, 19835, 10442, 13789, 11, 10628, 352, 13, 15, 13, 357, 6214, 19249, 2393, 198, 2, 38559,...
2.938931
393
from flask import Flask, flash, request, jsonify, render_template, redirect, url_for, g, session, send_from_directory, abort from flask_cors import CORS # from flask import status from datetime import date, datetime, timedelta from calendar import monthrange from dateutil.parser import parse import pytz import os import sys import time import uuid import json import random import string import pathlib import io from uuid import UUID from bson.objectid import ObjectId # straight mongo access from pymongo import MongoClient import sentry_sdk from sentry_sdk.integrations.flask import FlaskIntegration sentry_sdk.init( dsn="https://acea88276810494e96828c4fd0e1471f@o555579.ingest.sentry.io/5685529", integrations=[FlaskIntegration()], # Set traces_sample_rate to 1.0 to capture 100% # of transactions for performance monitoring. # We recommend adjusting this value in production. traces_sample_rate=1.0, # By default the SDK will try to use the SENTRY_RELEASE # environment variable, or infer a git commit # SHA as release, however you may want to set # something more human-readable. # release="myapp@1.0.0", ) # mongo # mongo_client = MongoClient('mongodb://localhost:27017/') mongo_client = MongoClient( "mongodb+srv://Mahitha-Maddi:Mahitha%4042@cluster0.1z0g8.mongodb.net/test") app = Flask(__name__) # CORS(app) CORS(app, resources={r"/*": {"origins": "*"}}) basedir = os.path.abspath(os.path.dirname(__file__)) # Here are my datasets bookings = dict() ################ # Apply to mongo ################ # database access layer # endpoint to check Availability # endpoint to create new Booking ################## # Apply from mongo ################## def applyRecordLevelUpdates(): return None ################## # ADMINISTRATION # ################## # This runs once before the first single request # Used to bootstrap our collections # This runs once before any request ############################ # INFO on containerization # ############################ # To containerize a flask app: # https://pythonise.com/series/learning-flask/building-a-flask-app-with-docker-compose if __name__ == '__main__': app.run(debug=True, host='0.0.0.0')
[ 6738, 42903, 1330, 46947, 11, 7644, 11, 2581, 11, 33918, 1958, 11, 8543, 62, 28243, 11, 18941, 11, 19016, 62, 1640, 11, 308, 11, 6246, 11, 3758, 62, 6738, 62, 34945, 11, 15614, 198, 6738, 42903, 62, 66, 669, 1330, 327, 20673, 198, ...
3.121508
716
""" .. --------------------------------------------------------------------- ___ __ __ __ ___ / | \ | \ | \ / the automatic \__ |__/ |__/ |___| \__ annotation and \ | | | | \ analysis ___/ | | | | ___/ of speech http://www.sppas.org/ Use of this software is governed by the GNU Public License, version 3. SPPAS is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. SPPAS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with SPPAS. If not, see <http://www.gnu.org/licenses/>. This banner notice must not be removed. --------------------------------------------------------------------- src.models.acm.htkscripts.py ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ import os import os.path import logging # ---------------------------------------------------------------------------
[ 37811, 198, 220, 220, 220, 11485, 198, 220, 220, 220, 220, 220, 220, 220, 16529, 30934, 198, 220, 220, 220, 220, 220, 220, 220, 220, 46444, 220, 220, 11593, 220, 220, 220, 11593, 220, 220, 220, 11593, 220, 220, 220, 46444, 198, 220,...
2.732342
538
from mock import patch from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.contrib.auth import get_user_model from django.core import exceptions from django_dynamic_fixture import G from django_webtest import WebTest from icekit.models import Layout from icekit.page_types.layout_page.models import LayoutPage from icekit.utils import fluent_contents from . import models User = get_user_model()
[ 6738, 15290, 1330, 8529, 198, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 11299, 19199, 13, 27530, 1330, 14041, 6030, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 49315, 13, 27530, 1330, 14413, 198, 6738, 42625, 14208, 13, 3642, 822, 13,...
3.522727
132
from imshowtools import imshow import cv2 if __name__ == '__main__': image_lenna = cv2.imread("lenna.png") imshow(image_lenna, mode='BGR', window_title="LennaWindow", title="Lenna") image_lenna_bgr = cv2.imread("lenna_bgr.png") imshow(image_lenna, image_lenna_bgr, mode=['BGR', 'RGB'], title=['lenna_rgb', 'lenna_bgr']) imshow(*[image_lenna for _ in range(12)], title=["Lenna" for _ in range(12)], window_title="LennaWindow") imshow(*[image_lenna for _ in range(30)], title="Lenna", padding=(1, 1, 0, (0, 0, 0.8, 0.8)))
[ 6738, 545, 12860, 31391, 1330, 545, 12860, 198, 11748, 269, 85, 17, 628, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 628, 220, 220, 220, 2939, 62, 75, 13713, 796, 269, 85, 17, 13, 320, 961, 7203, 75, 13713, 13, ...
2.302521
238
# -*- coding: utf-8 -*- """Provides concept object.""" from __future__ import absolute_import from .. import t1types from ..entity import Entity
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 15946, 1460, 3721, 2134, 526, 15931, 198, 198, 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 11485, 1330, 256, 16, 19199, 198, 6738, 11485, 26858, 1330...
3.266667
45
""" Mock up a video feed pipeline """ import asyncio import logging import sys import cv2 logging.basicConfig(format="[%(thread)-5d]%(asctime)s: %(message)s") logger = logging.getLogger('async') logger.setLevel(logging.INFO) def main(): loop = asyncio.get_event_loop() loop.run_until_complete(process_video(sys.argv[1])) logger.info("Completed") if __name__ == '__main__': main()
[ 37811, 198, 44, 735, 510, 257, 2008, 3745, 11523, 198, 37811, 198, 11748, 30351, 952, 198, 11748, 18931, 198, 11748, 25064, 198, 198, 11748, 269, 85, 17, 198, 198, 6404, 2667, 13, 35487, 16934, 7, 18982, 2625, 58, 4, 7, 16663, 13219, ...
2.556962
158
#!/usr/bin/python3 """ Read "lspci -v" and "glxinfo" outputs """ import re from dataclasses import dataclass from InputFileNotFoundError import InputFileNotFoundError if __name__ == "__main__": import argparse import json parser = argparse.ArgumentParser(description="Parse lspci/glxinfo output") parser.add_argument("lspci", type=str, nargs=1, help="path to lspci output") parser.add_argument("glxinfo", type=str, nargs=1, help="path to glxinfo output") parser.add_argument( "-d", "--dedicated", action="store_true", default=False, help="computer has dedicated GPU", ) args = parser.parse_args() try: print( json.dumps( read_lspci_and_glxinfo(args.dedicated, args.lspci[0], args.glxinfo[0]), indent=2, ) ) except InputFileNotFoundError as e: print(str(e)) exit(1)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 198, 37811, 198, 5569, 366, 75, 2777, 979, 532, 85, 1, 290, 366, 4743, 87, 10951, 1, 23862, 198, 37811, 198, 198, 11748, 302, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 198, ...
2.197674
430
#!/usr/bin/python import sys from loglib import SNYLogger import ftplib import argparse import re import os import calendar import time #GET LOCAL FILELIST # # login to ftp server # # # get remote files # parser = argparse.ArgumentParser() parser.add_argument("-o", "--host", help="ftp hostname", required=True) parser.add_argument("-u", "--user", help="username on ftp server", required=True) parser.add_argument("-p", "--password", help="password", required=True) parser.add_argument("-d", "--debug", help="print debug to terminal, default 0, use multiple times to increase verbosity, i.e. -d -d", action="count") parser.add_argument("-b", "--basedir", help="Toplevel directory on ftp server, default www") parser.add_argument("-t", "--path", help="Local toplevel directory, default ., i.e. current dir") parser.add_argument("-s", "--skipfile", help="Do not upload files in <skipfile>, default name upload.skip") parser.set_defaults(debug=0) parser.set_defaults(skipfile="upload.skip") parser.set_defaults(basedir="www") parser.set_defaults(path=".") args = parser.parse_args() log = SNYLogger(basename="upload", size_limit=10, no_logfiles=2, stdout=args.debug) skiplines = read_skipfile(args.skipfile, log) ftp = ftp_login(args, log) sync_files(ftp, args, skiplines, args.path, args.basedir, log) ftp.quit()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 11748, 25064, 198, 6738, 2604, 8019, 1330, 11346, 56, 11187, 1362, 198, 11748, 10117, 489, 571, 198, 11748, 1822, 29572, 198, 11748, 302, 198, 11748, 28686, 198, 11748, 11845, 198, 11748, ...
2.59415
547
#!/usr/bin/env python """ Classify oncodrive gene results and prepare for combination * Configuration parameters: - The ones required by intogen.data.entity.EntityManagerFactory * Input: - oncodrive_ids: The mrna.oncodrive_genes to process * Output: - combinations: The mrna.combination prepared to be calculated * Entities: - mrna.oncodrive_genes - mrna.combination """ import uuid import json from wok.task import Task from wok.element import DataElement from intogen.data.entity.server import EntityServer from intogen.data.entity import types if __name__ == "__main__": Task(run).start()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 37811, 198, 9487, 1958, 319, 19815, 11590, 9779, 2482, 290, 8335, 329, 6087, 198, 198, 9, 28373, 10007, 25, 198, 198, 12, 383, 3392, 2672, 416, 493, 6644, 13, 7890, 13, 26858, 1...
3.112821
195
def test_get_ip_placeholder(): """placeholder so pytest does not fail""" pass
[ 4299, 1332, 62, 1136, 62, 541, 62, 5372, 13829, 33529, 198, 220, 220, 220, 37227, 5372, 13829, 523, 12972, 9288, 857, 407, 2038, 37811, 198, 220, 220, 220, 1208, 198 ]
2.866667
30
#!/usr/bin/env python from sklearn import svm import csv_io if __name__=="__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 6738, 1341, 35720, 1330, 264, 14761, 198, 11748, 269, 21370, 62, 952, 198, 198, 361, 11593, 3672, 834, 855, 1, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 3419, 198 ]
2.390244
41
# -*- coding: utf-8 -*- from matplotlib import colors # max = 148 _COLOR_Genarator = iter( sorted( [ color for name, color in colors.cnames.items() if name not in ["red", "white"] or not name.startswith("light") or "gray" in name ] ) )
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 2603, 29487, 8019, 1330, 7577, 198, 198, 2, 3509, 796, 22613, 198, 62, 46786, 62, 13746, 283, 1352, 796, 11629, 7, 198, 220, 220, 220, 23243, 7, 198, 220, ...
2.068966
145
from urllib.parse import urlencode from decouple import config import hashlib import requests BASE = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" auth_key = config('AUTH_KEY') url = 'http://sms.globehost.com/api/sendhttp.php?'
[ 6738, 2956, 297, 571, 13, 29572, 1330, 2956, 11925, 8189, 198, 6738, 875, 43846, 1330, 4566, 198, 11748, 12234, 8019, 198, 11748, 7007, 198, 198, 33, 11159, 796, 366, 486, 1954, 2231, 3134, 4531, 39305, 4299, 456, 2926, 41582, 10295, 40...
2.49505
101
import sys import os sys.path.append(os.pardir) import random import time import requests from contextlib import closing from help import utils from threading import Thread if __name__ == '__main__': t1 = Thread(target=main) t2 = Thread(target=main) t3 = Thread(target=main) t4 = Thread(target=main) t1.start() t2.start() t3.start() t4.start()
[ 11748, 25064, 198, 11748, 28686, 198, 198, 17597, 13, 6978, 13, 33295, 7, 418, 13, 26037, 343, 8, 198, 198, 11748, 4738, 198, 11748, 640, 198, 11748, 7007, 198, 6738, 4732, 8019, 1330, 9605, 198, 6738, 1037, 1330, 3384, 4487, 198, 673...
2.573333
150
import GeneralStats as gs import numpy as np from scipy.stats import skew from scipy.stats import kurtosistest import pandas as pd if __name__ == "__main__": gen=gs.GeneralStats() data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) print("data = ", data) print("data1 = ", data1) res=gen.average(data,rowvar=True) res1=gen.average(data1,rowvar=True) print("data = ",res) print("data1 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.median(data,rowvar=True) res1=gen.median(data1,rowvar=True) print("data = ",res) print("data1 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.mode(data,rowvar=True) res1=gen.mode(data1,rowvar=True) print("data = ",res) print("data1 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.quantile(data,0.5,rowvar=True,interpolation='lower') #'midpoint'0.5 res1=gen.quantile(data1,0.5,rowvar=True,interpolation='lower') #'lower'0.5 print("data 0.5 = ",res) print("data1 0.5 = ",res1) res=gen.quantile(data,0.25,rowvar=True,interpolation='lower') res1=gen.quantile(data1,0.25,rowvar=True,interpolation='lower') print("data 0.25s = ",res) print("data1 0.25 = ",res1) res=gen.quantile(data,0.75,rowvar=True,interpolation='lower') res1=gen.quantile(data1,0.75,rowvar=True,interpolation='lower') print("data 0.75 = ",res) print("data1 0.75 = ",res1) res=gen.quantile(data,1.0,rowvar=True,interpolation='lower') res1=gen.quantile(data1,1.0,rowvar=True,interpolation='lower') print("data 1.0 = ",res) print("data1 1.0 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.range(data,rowvar=True) res1=gen.range(data1,rowvar=True) print("data = ",res) print("data1 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.variance(data,rowvar=True) res1=gen.variance(data1,rowvar=True) print("data = ",res) print("data1 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.standard_dev(data,rowvar=True) res1=gen.standard_dev(data1,rowvar=True) print("data = ",res) print("data1 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.skewness(data,rowvar=True) res1=gen.skewness(data1,rowvar=True) print("data = ",res) print("data1 = ",res1) res=np.array([skew(data[0]),skew(data[1]),skew(data[2]),skew(data[3])]) print("scipy skewdata = ",res) res1=np.array(skew(data1)) print("scipy skewdata1 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([53, 61, 49, 66, 78, 47]) res=gen.kurtosis(data,rowvar=True) res1=gen.kurtosis(data1,rowvar=True) print("data = ",res) print("data1 = ",res1) data_0=pd.Series(data[0]) data_1=pd.Series(data[1]) data_2=pd.Series(data[2]) data_3=pd.Series(data[3]) print("pandas kurtdata = ",[data_0.kurt(),data_1.kurt(),data_2.kurt(),data_3.kurt()]) data1=pd.Series(data1) print("pandas kurtdata1 = ",data1.kurt())
[ 11748, 3611, 29668, 355, 308, 82, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 6738, 629, 541, 88, 13, 34242, 1330, 43370, 201, 198, 6738, 629, 541, 88, 13, 34242, 1330, 479, 3325, 418, 396, 395, 201, 198, 11748, 19798, 292, 3...
1.888386
1,989
#!/usr/bin/env python # encoding: utf-8 """ script to install all the necessary things for working on a linux machine with nothing Installing minimum dependencies """ import sys import os import logging import subprocess import xml.etree.ElementTree as ElementTree import xml.dom.minidom as minidom import socket import time import pwd ###---------------------------------------------------## # Configuration Section, will be modified by script # ###---------------------------------------------------## node_apt_packages = [ 'emacs', 'git', 'g++', 'make', 'python-numpy', 'libprotobuf-dev', 'libcurl4-openssl-dev'] # master only packages master_apt_packages = [ 'protobuf-compiler'] # List of r packages to be installed in master master_r_packages = [ 'r-base-dev', 'r-base', 'r-cran-statmod', 'r-cran-RCurl', 'r-cran-rjson' ] # download link of hadoop. hadoop_url = 'http://apache.claz.org/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz' hadoop_dir = 'hadoop-2.8.0' # customized installation script. # See optional installation scripts for options. # customized installation script for all nodes. ###---------------------------------------------------## # Automatically set by script # ###---------------------------------------------------## USER_NAME = 'ubuntu' # setup variables MASTER = os.getenv('MY_MASTER_DNS', '') # node type the type of current node NODE_TYPE = os.getenv('MY_NODE_TYPE', 'm3.xlarge') NODE_VMEM = int(os.getenv('MY_NODE_VMEM', str(1024*15))) NODE_VCPU = int(os.getenv('MY_NODE_VCPU', '4')) AWS_ID = os.getenv('AWS_ACCESS_KEY_ID', 'undefined') AWS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', 'undefined') JAVA_HOME = os.getenv('JAVA_HOME') HADOOP_HOME = os.getenv('HADOOP_HOME') DISK_LIST = [('xvd' + chr(ord('b') + i)) for i in range(10)] ENVIRON = os.environ.copy() ###--------------------------------## # Optional installation scripts. # ###--------------------------------## ### Script section ### ### Installation helpers ### # install g++4.9, needed for regex match. def install_java(): """ install java and setup environment variables Returns environment variables that needs to be exported """ if not os.path.exists('jdk1.8.0_131'): run('wget --no-check-certificate --no-cookies'\ ' --header \"Cookie: oraclelicense=accept-securebackup-cookie\"'\ ' http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz') run('tar xf jdk-8u131-linux-x64.tar.gz') run('rm -f jdk-8u131-linux-x64.tar.gz') global JAVA_HOME if JAVA_HOME is None: JAVA_HOME = os.path.abspath('jdk1.8.0_131') return [('JAVA_HOME', JAVA_HOME)] # main script to install all dependencies # Make startup script for bulding if __name__ == '__main__': pw_record = pwd.getpwnam(USER_NAME) user_name = pw_record.pw_name user_home_dir = pw_record.pw_dir user_uid = pw_record.pw_uid user_gid = pw_record.pw_gid env = os.environ.copy() cwd = user_home_dir ENVIRON['HOME'] = user_home_dir os.setgid(user_gid) os.setuid(user_uid) os.chdir(user_home_dir) main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 21004, 25, 3384, 69, 12, 23, 198, 37811, 198, 12048, 284, 2721, 477, 262, 3306, 1243, 198, 1640, 1762, 319, 257, 32639, 4572, 351, 2147, 198, 198, 6310, 9221, 5288, 20086, 198, 37...
2.510062
1,292
""" A simple, good-looking plot =========================== Demoing some simple features of matplotlib """ import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt fig = plt.figure(figsize=(5, 4), dpi=72) axes = fig.add_axes([0.01, 0.01, .98, 0.98]) X = np.linspace(0, 2, 200) Y = np.sin(2*np.pi*X) plt.plot(X, Y, lw=2) plt.ylim(-1.1, 1.1) plt.grid() plt.show()
[ 37811, 198, 32, 2829, 11, 922, 12, 11534, 7110, 198, 4770, 2559, 18604, 198, 198, 11522, 40519, 617, 2829, 3033, 286, 2603, 29487, 8019, 198, 37811, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 198, 6759, 2948...
2.247191
178
import os import re from typing import Tuple from pfio._typing import Union from pfio.container import Container from pfio.io import IO, create_fs_handler
[ 11748, 28686, 198, 11748, 302, 198, 6738, 19720, 1330, 309, 29291, 198, 198, 6738, 279, 69, 952, 13557, 774, 13886, 1330, 4479, 198, 6738, 279, 69, 952, 13, 34924, 1330, 43101, 198, 6738, 279, 69, 952, 13, 952, 1330, 24418, 11, 2251, ...
3.291667
48
import Analisis_Ascendente.Instrucciones.PLPGSQL.EjecutarFuncion as EjecutarFuncion from Analisis_Ascendente.Instrucciones.PLPGSQL.plasignacion import Plasignacion from Analisis_Ascendente.Instrucciones.instruccion import Instruccion from Analisis_Ascendente.Instrucciones.Create.createTable import CreateTable from Analisis_Ascendente.Instrucciones.Create.createDatabase import CreateReplace from Analisis_Ascendente.Instrucciones.Select.select import Select from Analisis_Ascendente.Instrucciones.Use_Data_Base.useDB import Use from Analisis_Ascendente.Instrucciones.Select.select1 import selectTime import Analisis_Ascendente.Instrucciones.Insert.insert as insert_import from Analisis_Ascendente.Instrucciones.Select.Select2 import Selectp3 from Analisis_Ascendente.Instrucciones.Select import selectInst from Analisis_Ascendente.Instrucciones.Expresiones.Expresion import Expresion from Analisis_Ascendente.Instrucciones.Drop.drop import Drop from Analisis_Ascendente.Instrucciones.Alter.alterDatabase import AlterDatabase from Analisis_Ascendente.Instrucciones.Alter.alterTable import AlterTable from Analisis_Ascendente.Instrucciones.Update.Update import Update from Analisis_Ascendente.Instrucciones.Delete.delete import Delete from Analisis_Ascendente.Instrucciones.Select import SelectDist from Analisis_Ascendente.Instrucciones.Type.type import CreateType #----------------------------------Imports FASE2-------------------------- from Analisis_Ascendente.Instrucciones.Index.Index import Index from Analisis_Ascendente.Instrucciones.PLPGSQL.createFunction import CreateFunction from Analisis_Ascendente.Instrucciones.Index.DropIndex import DropIndex from Analisis_Ascendente.Instrucciones.Index.AlterIndex import AlterIndex from Analisis_Ascendente.Instrucciones.PLPGSQL.DropProcedure import DropProcedure from Analisis_Ascendente.Instrucciones.PLPGSQL.CreateProcedure import CreateProcedure from Analisis_Ascendente.Instrucciones.PLPGSQL.CasePL import CasePL from Analisis_Ascendente.Instrucciones.PLPGSQL.plCall import plCall from Analisis_Ascendente.Instrucciones.PLPGSQL.dropFunction import DropFunction import C3D.GeneradorEtiquetas as GeneradorEtiquetas import C3D.GeneradorTemporales as GeneradorTemporales import Analisis_Ascendente.reportes.Reportes as Reportes
[ 11748, 1052, 27315, 271, 62, 32, 1416, 437, 21872, 13, 6310, 622, 535, 295, 274, 13, 6489, 6968, 17861, 13, 36, 73, 721, 315, 283, 37, 19524, 295, 355, 412, 73, 721, 315, 283, 37, 19524, 295, 201, 198, 6738, 1052, 27315, 271, 62, ...
2.847666
814
# -*- coding: utf-8 -*- from __future__ import unicode_literals
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 628 ]
2.6
25
from threading import current_thread from jsbeautifier.javascript.beautifier import remove_redundant_indentation from pyparser.oleparser import OleParser from pyparser.hwp_parser import HwpParser from scan.init_scan import init_hwp5_scan from scan.bindata_scanner import BinData_Scanner from scan.jscript_scanner import JS_Scanner from scan.paratext_scanner import ParaText_Scanner import zipfile import os import sys import platform from common.errors import * from utils.dumphex import print_hexdump js_scanner = None bindata_scanner = None paratext_scanner = None _platform = None binary_info = { "type": "", "p": None }
[ 6738, 4704, 278, 1330, 1459, 62, 16663, 198, 198, 6738, 44804, 40544, 7483, 13, 37495, 13, 40544, 7483, 1330, 4781, 62, 445, 917, 415, 62, 521, 298, 341, 198, 6738, 279, 4464, 28198, 13, 2305, 48610, 1330, 30093, 46677, 198, 6738, 279...
3.133663
202
""" Tests for plugins in core module. Only unit tests for now. """ from unittest.mock import patch import click from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit
[ 37811, 198, 51, 3558, 329, 20652, 287, 4755, 8265, 13, 198, 198, 10049, 4326, 5254, 329, 783, 13, 198, 37811, 198, 198, 6738, 555, 715, 395, 13, 76, 735, 1330, 8529, 198, 198, 11748, 3904, 198, 198, 6738, 299, 576, 13, 7295, 13, 3...
3.258065
62
from django.contrib.contenttypes.models import ContentType from django.test import TestCase from django.test.client import Client from model_mommy import mommy from devices.models import Device from users.models import Lageruser
[ 6738, 42625, 14208, 13, 3642, 822, 13, 11299, 19199, 13, 27530, 1330, 14041, 6030, 198, 6738, 42625, 14208, 13, 9288, 1330, 6208, 20448, 198, 6738, 42625, 14208, 13, 9288, 13, 16366, 1330, 20985, 198, 198, 6738, 2746, 62, 32542, 1820, 1...
3.803279
61
# -*- coding: utf-8 -*- from django.core.management.base import BaseCommand, CommandError from django_git_info import get_git_info
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 6738, 42625, 14208, 13, 7295, 13, 27604, 13, 8692, 1330, 7308, 21575, 11, 9455, 12331, 198, 198, 6738, 42625, 14208, 62, 18300, 62, 10951, 1330, 651, 62, 18300, 62, 1095...
2.933333
45
from collections.abc import Callable as _Callable import networkx as _nx from opencog.type_constructors import AtomSpace as _AtomSpace from .args import check_arg as _check_arg def convert(data, graph_annotated=True, graph_directed=True, node_label=None, node_color=None, node_opacity=None, node_size=None, node_shape=None, node_border_color=None, node_border_size=None, node_label_color=None, node_label_size=None, node_hover=None, node_click=None, node_image=None, node_properties=None, edge_label=None, edge_color=None, edge_opacity=None, edge_size=None, edge_label_color=None, edge_label_size=None, edge_hover=None, edge_click=None): """Convert an Atomspace or list of Atoms to a NetworkX graph with annotations. Several arguments accept a Callable. - In case of node annotations, the Callable gets an Atom as input, which the node represents in the graph. The Callable needs to return one of the other types accepted by the argument, e.g. ``str`` or ``int``/``float``. - In case of edge annotations, the Callable gets two Atoms as input, which the edge connects in the graph. The Callable needs to return one of the other types accepted by the argument, e.g. ``str`` or ``int``/``float``. Several arguments accept a color, which can be in following formats: - Name: ``"black"``, ``"red"``, ``"green"``, ... - Color code - 6 digit hex RGB code: ``"#05ac05"`` - 3 digit hex RGB code: ``"#0a0"`` (equivalent to ``"#00aa00"``) Parameters ---------- data : Atomspace, list of Atoms Input that gets converted to a graph. graph_annotated : bool If ``False``, no annotations are added to the graph. This could be used for converting large AtomSpaces quickly to graphs that use less RAM and can be exported to smaller files (e.g. also compressed as gml.gz) for inspection with other tools. graph_directed : bool If ``True``, a NetworkX DiGraph is created. If ``False``, a NetworkX Graph is created. node_label : str, Callable Set a label for each node, which is shown as text below it. node_color : str, Callable Set a color for each node, which becomes the fill color of its shape. node_opacity : float between 0.0 and 1.0 Set an opacity for each node, which becomes the opacity of its shape. Caution: This is only supported by d3. node_size : int, float, Callable Set a size for each node, which becomes the height and width of its shape. node_shape : str, Callable Set a shape for each node, which is some geometrical form that has the node coordinates in its center. Possible values: ``"circle"``, ``"rectangle"``, ``"hexagon"`` node_border_color : str, Callable Set a border color for each node, which influences the border drawn around its shape. node_border_size : int, float, Callable Set a border size for each node, which influences the border drawn around its shape. node_label_color : str, Callable Set a label color for each node, which determines the font color of the text below the node. node_label_size : int, float, Callable Set a label size for each node, which determines the font size of the text below the node. node_hover : str, Callable Set a hover text for each node, which shows up besides the mouse cursor when hovering over a node. node_click : str, Callable Set a click text for each node, which shows up in a div element below the plot when clicking on a node and can easily be copied and pasted. node_image : str, Callable Set an image for each node, which appears within its shape. Possible values: - URL pointing to an image - Data URL encoding the image node_properties : str, dict, Callable Set additional properties for each node, which may not immediately be translated into a visual element, but can be chosen in the data selection menu in the interactive HTML visualizations to map them on some plot element. These properties also appear when exporting a graph to a file in a format such as GML and may be recognized by external visualization tools. Note that a Callable needs to return a dict in this case, and each key becomes a property, which is equivalent to the other properties such as node_size and node_color. Special cases: - ``node_properties="tv"`` is a shortcut for using a function that returns ``{"mean": atom.tv.mean, "confidence": atom.tv.confidence}`` - Keys ``"x"``, ``"y"`` and ``"z"`` properties are translated into node coordinates. Examples: - ``dict(x=0.0)``: This fixes the x coordinate of each node to 0.0, so that the JavaScript layout algorithm does not influence it, but the nodes remain free to move in the y and z directions. - ``lambda atom: dict(x=2.0) if atom.is_node() else None``: This fixes the x coordinate of each Atom of type Node to 2.0 but allows each Atom of type Link to move freely. - ``lambda atom: dict(y=-len(atom.out)*100) if atom.is_link() else dict(y=0)`` This fixes the y coordinates of Atoms at different heights. Atoms of type Node are put at the bottom and Atoms of type Link are ordered by the number of their outgoing edges. The results is a hierarchical visualization that has some similarity with the "dot" layout. - ``lambda atom: dict(x=-100) if atom.is_node() else dict(x=100)``: This fixes the x coordinate of Node Atoms at -100 and of Link Atoms at 100. The results is a visualization with two lines of nodes that has some similarity with the "bipartite" layout. edge_label : str, Callable Set a label for each edge, which becomes the text plotted in the middle of the edge. edge_color : str, Callable Set a color for each edge, which becomes the color of the line representing the edge. edge_opacity : int, float, Callable Set an opacity for each edge, which allows to make it transparent to some degree. edge_size : int, float, Callable Set a size for each edge, which becomes the width of the line representing the edge. edge_label_color : str, Callable Set a color for each edge label, which becomes the color of the text in the midpoint of the edge. edge_label_size : int, float, Callable Set a size for each edge label, which becomes the size of the text in the midpoint of the edge. edge_hover : str, Callable edge_click : str, Callable Returns ------- graph : NetworkX Graph or DiGraph Whether an undirected or directed graph is created depends on the argument "directed". """ # Argument processing _check_arg(data, 'data', (list, _AtomSpace)) _check_arg(graph_annotated, 'graph_annotated', bool) _check_arg(graph_directed, 'graph_directed', bool) _check_arg(node_label, 'node_label', (str, _Callable), allow_none=True) _check_arg(node_color, 'node_color', (str, _Callable), allow_none=True) _check_arg(node_opacity, 'node_opacity', (int, float, _Callable), allow_none=True) _check_arg(node_size, 'node_size', (int, float, _Callable), allow_none=True) _check_arg(node_shape, 'node_shape', (str, _Callable), allow_none=True) _check_arg(node_border_color, 'node_border_color', (str, _Callable), allow_none=True) _check_arg(node_border_size, 'node_border_size', (int, float, _Callable), allow_none=True) _check_arg(node_label_color, 'node_label_color', (str, _Callable), allow_none=True) _check_arg(node_label_size, 'node_label_size', (int, float, _Callable), allow_none=True) _check_arg(node_hover, 'node_hover', (str, _Callable), allow_none=True) _check_arg(node_click, 'node_click', (str, _Callable), allow_none=True) _check_arg(node_image, 'node_image', (str, _Callable), allow_none=True) _check_arg(node_properties, 'node_properties', (str, dict, _Callable), allow_none=True) _check_arg(edge_label, 'edge_label', (str, _Callable), allow_none=True) _check_arg(edge_color, 'edge_color', (str, _Callable), allow_none=True) _check_arg(edge_opacity, 'edge_opacity', (int, float, _Callable), allow_none=True) _check_arg(edge_size, 'edge_size', (int, float, _Callable), allow_none=True) _check_arg(edge_label_color, 'edge_label_color', (str, _Callable), allow_none=True) _check_arg(edge_label_size, 'edge_label_size', (int, float, _Callable), allow_none=True) _check_arg(edge_hover, 'edge_hover', (str, _Callable), allow_none=True) _check_arg(edge_click, 'edge_click', (str, _Callable), allow_none=True) # Prepare annoation functions if graph_annotated: node_ann = prepare_node_func( node_label, node_color, node_opacity, node_size, node_shape, node_border_color, node_border_size, node_label_color, node_label_size, node_hover, node_click, node_image, node_properties) edge_ann = prepare_edge_func( edge_label, edge_color, edge_opacity, edge_size, edge_label_color, edge_label_size, edge_hover, edge_click) else: empty = dict() # Create the NetworkX graph graph = _nx.DiGraph() if graph_directed else _nx.Graph() # 0) Set graph annotations graph.graph['node_click'] = '$hover' # node_click will by default show content of node_hover # 1) Add vertices and their annotations for atom in data: graph.add_node(to_uid(atom), **node_ann(atom)) # 2) Add edges and their annotations (separate step to exclude edges to filtered vertices) for atom in data: uid = to_uid(atom) if atom.is_link(): # for all that is incoming to the Atom for atom2 in atom.incoming: uid2 = to_uid(atom2) if uid2 in graph.nodes: graph.add_edge(uid2, uid, **edge_ann(atom2, atom)) # for all that is outgoing of the Atom for atom2 in atom.out: uid2 = to_uid(atom2) if uid2 in graph.nodes: graph.add_edge(uid, uid2, **edge_ann(atom, atom2)) return graph def prepare_node_func(node_label, node_color, node_opacity, node_size, node_shape, node_border_color, node_border_size, node_label_color, node_label_size, node_hover, node_click, node_image, node_properties): """Prepare a function that calculates all annoations for a node representing an Atom.""" # individual node annotation functions node_label = use_node_def_or_str(node_label, node_label_default) node_color = use_node_def_or_str(node_color, node_color_default) node_opacity = use_node_def_or_num(node_opacity, node_opacity_default) node_size = use_node_def_or_num(node_size, node_size_default) node_shape = use_node_def_or_str(node_shape, node_shape_default) node_border_color = use_node_def_or_str(node_border_color, node_border_color_default) node_border_size = use_node_def_or_num(node_border_size, node_border_size_default) node_label_color = use_node_def_or_str(node_label_color, node_label_color_default) node_label_size = use_node_def_or_num(node_label_size, node_label_size_default) node_hover = use_node_def_or_str(node_hover, node_hover_default) node_click = use_node_def_or_str(node_click, node_click_default) node_image = use_node_def_or_str(node_image, node_image_default) # special case: additional user-defined node properties by a function that returns a dict if node_properties is None: node_properties = node_properties_default elif isinstance(node_properties, dict): val = node_properties elif node_properties == 'tv': node_properties = node_properties_tv # combined node annotation function: calls each of the individual ones name_func = ( ('label', node_label), ('color', node_color), ('opacity', node_opacity), ('size', node_size), ('shape', node_shape), ('border_color', node_border_color), ('border_size', node_border_size), ('label_color', node_label_color), ('label_size', node_label_size), ('hover', node_hover), ('click', node_click), ('image', node_image), ) return func def prepare_edge_func(edge_label, edge_color, edge_opacity, edge_size, edge_label_color, edge_label_size, edge_hover, edge_click): """Prepare a function that calculates all annoations for an edge between Atoms.""" # individual edge annotation functions edge_label = use_edge_def_or_str(edge_label, edge_label_default) edge_color = use_edge_def_or_str(edge_color, edge_color_default) edge_opacity = use_edge_def_or_num(edge_opacity, edge_opacity_default) edge_size = use_edge_def_or_num(edge_size, edge_size_default) edge_label_color = use_edge_def_or_str(edge_label_color, edge_label_color_default) edge_label_size = use_edge_def_or_num(edge_label_size, edge_label_size_default) edge_hover = use_edge_def_or_str(edge_hover, edge_hover_default) edge_click = use_edge_def_or_str(edge_click, edge_click_default) # combined edge annotation function: calls each of the individual ones name_func = ( ('label', edge_label), ('color', edge_color), ('opacity', edge_opacity), ('size', edge_size), ('label_color', edge_label_color), ('label_size', edge_label_size), ('hover', edge_hover), ('click', edge_click), ) return func def use_node_def_or_str(given_value, default_func): """Transform a value of type (None, str, Callable) to a node annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, str): given_value = str(given_value) # Passthrough: value itself is a function else: func = given_value return func def use_node_def_or_num(given_value, default_func): """Transform a value of type (None, int, float, Callable) to a node annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, (int, float)): given_value = float(given_value) # Passthrough: value itself is a function else: func = given_value return func def use_edge_def_or_str(given_value, default_func): """Transform a value of type (None, str, Callable) to an edge annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, str): given_value = str(given_value) # Passthrough: value itself is a function else: func = given_value return func def use_edge_def_or_num(given_value, default_func): """Transform a value of type (None, int, float, Callable) to an edge annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, (int, float)): given_value = float(given_value) # Passthrough: value itself is a function else: func = given_value return func def to_uid(atom): """Return a unique identifier for an Atom.""" return atom.id_string() # Default functions for node annotations # - "return None" means that the attribute and value won't be included # to the output data, so that defaults of the JS library are used and files get smaller # - A return of a value in some cases and None in other cases means that the # default value of the JS library is used in None cases and again files get smaller # Default functions for edge annotations
[ 6738, 17268, 13, 39305, 1330, 4889, 540, 355, 4808, 14134, 540, 198, 198, 11748, 3127, 87, 355, 4808, 77, 87, 198, 6738, 1280, 66, 519, 13, 4906, 62, 41571, 669, 1330, 33102, 14106, 355, 4808, 2953, 296, 14106, 198, 198, 6738, 764, ...
2.706939
6,067
from builtins import * from pydantic import BaseModel
[ 6738, 3170, 1040, 1330, 1635, 198, 198, 6738, 279, 5173, 5109, 1330, 7308, 17633, 628, 628 ]
3.625
16
# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import unicode_literals from collections import OrderedDict import os import types from mock import MagicMock from mock import Mock from mock import patch from mock import PropertyMock import msgpack import operator from oslo_config import cfg from oslotest.base import BaseTestCase from stevedore.driver import DriverManager from stevedore.extension import Extension from watcher_metering.agent.agent import Agent from watcher_metering.agent.measurement import Measurement from watcher_metering.tests.agent.agent_fixtures import ConfFixture from watcher_metering.tests.agent.agent_fixtures import DummyMetricPuller from watcher_metering.tests.agent.agent_fixtures import FakeMetricPuller
[ 2, 532, 9, 12, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 357, 66, 8, 1853, 275, 27, 29, 785, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743...
3.645946
370
from __future__ import absolute_import, division, print_function from cctbx.array_family import flex from scitbx import matrix import math from libtbx import adopt_init_args import scitbx.lbfgs from mmtbx.bulk_solvent import kbu_refinery from cctbx import maptbx import mmtbx.masks import boost_adaptbx.boost.python as bp asu_map_ext = bp.import_ext("cctbx_asymmetric_map_ext") from libtbx import group_args from mmtbx import bulk_solvent from mmtbx.ncs import tncs from collections import OrderedDict import mmtbx.f_model import sys from libtbx.test_utils import approx_equal from mmtbx import masks from cctbx.masks import vdw_radii_from_xray_structure ext = bp.import_ext("mmtbx_masks_ext") mosaic_ext = bp.import_ext("mmtbx_mosaic_ext") APPLY_SCALE_K1_TO_FOBS = False # Utilities used by algorithm 2 ------------------------------------------------ #------------------------------------------------------------------------------- def write_map_file(crystal_symmetry, map_data, file_name): from iotbx import mrcfile mrcfile.write_ccp4_map( file_name = file_name, unit_cell = crystal_symmetry.unit_cell(), space_group = crystal_symmetry.space_group(), map_data = map_data, labels = flex.std_string([""])) def algorithm_0(f_obs, F, kt): """ Grid search """ fc, f_masks = F[0], F[1:] k_mask_trial_range=[] s = -1 while s<1: k_mask_trial_range.append(s) s+=0.0001 r = [] fc_data = fc.data() for i, f_mask in enumerate(f_masks): #print("mask ",i) assert f_obs.data().size() == fc.data().size() assert f_mask.data().size() == fc.data().size() #print (bulk_solvent.r_factor(f_obs.data(),fc_data)) kmask_, k_ = \ bulk_solvent.k_mask_and_k_overall_grid_search( f_obs.data()*kt, fc_data*kt, f_mask.data()*kt, flex.double(k_mask_trial_range), flex.bool(fc.data().size(),True)) r.append(kmask_) fc_data += fc_data*k_ + kmask_*f_mask.data() #print (bulk_solvent.r_factor(f_obs.data(),fc_data + kmask_*f_mask.data(),k_)) r = [1,]+r return r def algorithm_2(i_obs, F, x, use_curvatures=True, macro_cycles=10): """ Unphased one-step search """ calculator = tg(i_obs = i_obs, F=F, x = x, use_curvatures=use_curvatures) for it in range(macro_cycles): if(use_curvatures): m = minimizer(max_iterations=100, calculator=calculator) else: #upper = flex.double([1.1] + [1]*(x.size()-1)) #lower = flex.double([0.9] + [-1]*(x.size()-1)) upper = flex.double([1.1] + [5]*(x.size()-1)) lower = flex.double([0.9] + [-5]*(x.size()-1)) #upper = flex.double([10] + [5]*(x.size()-1)) #lower = flex.double([0.1] + [-5]*(x.size()-1)) #upper = flex.double([10] + [0.65]*(x.size()-1)) #lower = flex.double([0.1] + [0]*(x.size()-1)) #upper = flex.double([1] + [0.65]*(x.size()-1)) #lower = flex.double([1] + [0]*(x.size()-1)) #upper = flex.double([1] + [5.65]*(x.size()-1)) #lower = flex.double([1] + [-5]*(x.size()-1)) m = tncs.minimizer( potential = calculator, use_bounds = 2, lower_bound = lower, upper_bound = upper, initial_values = x).run() calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures) if(use_curvatures): for it in range(10): m = minimizer(max_iterations=100, calculator=calculator) calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures) m = minimizer2(max_iterations=100, calculator=calculator).run(use_curvatures=True) calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures) return m.x def algorithm_3(i_obs, fc, f_masks): """ Unphased two-step search """ F = [fc]+f_masks Gnm = [] cs = {} cntr=0 nm=[] # Compute and store Gnm for n, Fn in enumerate(F): for m, Fm in enumerate(F): if m < n: continue Gnm.append( flex.real( Fn.data()*flex.conj(Fm.data()) ) ) cs[(n,m)] = cntr cntr+=1 nm.append((n,m)) # Keep track of indices for "upper triangular matrix vs full" for k,v in zip(list(cs.keys()), list(cs.values())): i,j=k if i==j: continue else: cs[(j,i)]=v # Generate and solve system Ax=b, x = A_1*b A = [] b = [] for u, Gnm_u in enumerate(Gnm): for v, Gnm_v in enumerate(Gnm): scale = 2 n,m=nm[v] if n==m: scale=1 A.append( flex.sum(Gnm_u*Gnm_v)*scale ) b.append( flex.sum(Gnm_u * i_obs.data()) ) A = matrix.sqr(A) A_1 = A.inverse() b = matrix.col(b) x = A_1 * b # Expand Xmn from solution x Xmn = [] for n, Fn in enumerate(F): rows = [] for m, Fm in enumerate(F): x_ = x[cs[(n,m)]] rows.append(x_) Xmn.append(rows) # Do formula (19) lnK = [] for j, Fj in enumerate(F): t1 = flex.sum( flex.log( flex.double(Xmn[j]) ) ) t2 = 0 for n, Fn in enumerate(F): for m, Fm in enumerate(F): t2 += math.log(Xmn[n][m]) t2 = t2 / (2*len(F)) lnK.append( 1/len(F)*(t1-t2) ) return [math.exp(x) for x in lnK] def algorithm_4(f_obs, F, phase_source, max_cycles=100, auto_converge_eps=1.e-7, use_cpp=True): """ Phased simultaneous search (alg4) """ fc, f_masks = F[0], F[1:] fc = fc.deep_copy() F = [fc]+F[1:] # C++ version if(use_cpp): return mosaic_ext.alg4( [f.data() for f in F], f_obs.data(), phase_source.data(), max_cycles, auto_converge_eps) # Python version (1.2-3 times slower, but much more readable!) cntr = 0 x_prev = None while True: f_obs_cmpl = f_obs.phase_transfer(phase_source = phase_source) A = [] b = [] for j, Fj in enumerate(F): A_rows = [] for n, Fn in enumerate(F): Gjn = flex.real( Fj.data()*flex.conj(Fn.data()) ) A_rows.append( flex.sum(Gjn) ) Hj = flex.real( Fj.data()*flex.conj(f_obs_cmpl.data()) ) b.append(flex.sum(Hj)) A.extend(A_rows) A = matrix.sqr(A) A_1 = A.inverse() b = matrix.col(b) x = A_1 * b # fc_d = flex.complex_double(phase_source.indices().size(), 0) for i, f in enumerate(F): fc_d += f.data()*x[i] phase_source = phase_source.customized_copy(data = fc_d) x_ = x[:] # cntr+=1 if(cntr>max_cycles): break if(x_prev is None): x_prev = x_[:] else: max_diff = flex.max(flex.abs(flex.double(x_prev)-flex.double(x_))) if(max_diff<=auto_converge_eps): break x_prev = x_[:] return x_
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 7297, 11, 3601, 62, 8818, 198, 6738, 269, 310, 65, 87, 13, 18747, 62, 17989, 1330, 7059, 198, 6738, 629, 270, 65, 87, 1330, 17593, 198, 11748, 10688, 198, 6738, 9195, 83, 65, 87, ...
2.082721
3,131
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 1999-2018 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import scipy.sparse as sps from mars.tensor.execution.core import Executor from mars import tensor as mt from mars.tensor.expressions.datasource import tensor, ones, zeros, arange from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, \ expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, \ hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, \ flip, flipud, fliplr, repeat, tile, isin from mars.tensor.expressions.merge import stack from mars.tensor.expressions.reduction import all as tall
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 7358, 12, 7908, 41992, 4912, 31703, 12052, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 36...
3.181159
414
import sys import matplotlib matplotlib.use('Agg') sys.path.insert(0, 'lib')
[ 11748, 25064, 198, 11748, 2603, 29487, 8019, 198, 6759, 29487, 8019, 13, 1904, 10786, 46384, 11537, 198, 17597, 13, 6978, 13, 28463, 7, 15, 11, 705, 8019, 11537, 198 ]
2.655172
29
import matplotlib matplotlib.use('Agg') import numpy as np import matplotlib.pyplot as plt from glob import glob from astropy.table import Table, join from os import chdir, system from scipy.stats import norm as gauss_norm from sys import argv from getopt import getopt # turn off polyfit ranking warnings import warnings warnings.filterwarnings('ignore') simulation_dir = '/shared/data-camelot/cotar/' data_dir_clusters = simulation_dir+'GaiaDR2_open_clusters_2001_GALAH/' data_dir = '/shared/ebla/cotar/' USE_DR3 = True Q_FLAGS = True P_INDIVIDUAL = False suffix = '' if len(argv) > 1: # parse input options opts, args = getopt(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual=']) # set parameters, depending on user inputs print(opts) for o, a in opts: if o == '--dr3': USE_DR3 = int(a) > 0 if o == '--suffix': suffix += str(a) if o == '--flags': Q_FLAGS = int(a) > 0 if o == '--individual': P_INDIVIDUAL = int(a) > 0 CG_data = Table.read(data_dir+'clusters/Cantat-Gaudin_2018/members.fits') tails_data = Table.read(data_dir+'clusters/cluster_tails/members_open_gaia_tails.fits') # remove cluster members from tails data print('Cluster members all:', len(CG_data), len(tails_data)) idx_not_in_cluster = np.in1d(tails_data['source_id'], CG_data['source_id'], invert=True) tails_data = tails_data[idx_not_in_cluster] print('Cluster members all:', len(CG_data), len(tails_data)) if USE_DR3: # cannon_data = Table.read(data_dir+'GALAH_iDR3_main_alpha_190529.fits') cannon_data = Table.read(data_dir+'GALAH_iDR3_main_191213.fits') fe_col = 'fe_h' teff_col = 'teff' q_flag = 'flag_sp' suffix += '_DR3' else: pass if Q_FLAGS: suffix += '_flag0' # determine all possible simulation subdirs chdir(data_dir_clusters) for cluster_dir in glob('Cluster_orbits_GaiaDR2_*'): chdir(cluster_dir) print('Working on clusters in ' + cluster_dir) for sub_dir in glob('*'): current_cluster = '_'.join(sub_dir.split('_')[0:2]) source_id_cg = CG_data[CG_data['cluster'] == current_cluster]['source_id'] source_id_tail = tails_data[tails_data['cluster'] == current_cluster]['source_id'] idx_cg_memb = np.in1d(cannon_data['source_id'], np.array(source_id_cg)) idx_tail = np.in1d(cannon_data['source_id'], np.array(source_id_tail)) if '.png' in sub_dir or 'individual-abund' in sub_dir: continue print(' ') print(sub_dir) chdir(sub_dir) try: g_init = Table.read('members_init_galah.csv', format='ascii', delimiter='\t') idx_init = np.in1d(cannon_data['source_id'], g_init['source_id']) except: idx_init = np.full(len(cannon_data), False) try: g_in_all = Table.read('possible_ejected-step1.csv', format='ascii', delimiter='\t') g_in = Table.read('possible_ejected-step1_galah.csv', format='ascii', delimiter='\t') # further refinement of results to be plotted here g_in_all = g_in_all[np.logical_and(g_in_all['time_in_cluster'] >= 1., # [Myr] longest time (of all incarnations) inside cluster g_in_all['in_cluster_prob'] >= 68.)] # percentage of reincarnations inside cluster g_in = g_in[np.logical_and(g_in['time_in_cluster'] >= 1., g_in['in_cluster_prob'] >= 68.)] idx_in = np.in1d(cannon_data['source_id'], g_in['source_id']) idx_in_no_CG = np.logical_and(idx_in, np.logical_not(np.in1d(cannon_data['source_id'], CG_data['source_id']))) except: idx_in = np.full(len(cannon_data), False) idx_in_no_CG = np.full(len(cannon_data), False) try: g_out = Table.read('possible_outside-step1_galah.csv', format='ascii', delimiter='\t') # further refinement of results to be plotted here g_out = g_out[np.logical_and(g_out['time_in_cluster'] <= 0, g_out['in_cluster_prob'] <= 0)] idx_out = np.in1d(cannon_data['source_id'], g_out['source_id']) except: idx_out = np.full(len(cannon_data), False) chdir('..') if np.sum(idx_init) == 0 or np.sum(idx_in) == 0 or np.sum(idx_out) == 0: print(' Some Galah lists are missing') if USE_DR3: abund_cols = [c for c in cannon_data.colnames if '_fe' in c and 'nr_' not in c and 'diff_' not in c and 'e_' not in c and 'Li' not in c and 'alpha' not in c] # and ('I' in c or 'II' in c or 'III' in c)] else: abund_cols = [c for c in cannon_data.colnames if '_abund' in c and len(c.split('_')) == 3] # abund_cols = ['e_' + cc for cc in abund_cols] # rg = (0., 0.35) # yt = [0., 0.1, 0.2, 0.3] # medfix = '-snr-sigma_' abund_cols = ['diff_' + cc for cc in abund_cols] rg = (-0.45, 0.45) yt = [-0.3, -0.15, 0.0, 0.15, 0.3] medfix = '-detrended-snr_' # ------------------------------------------------------------------------------ # NEW: plot with parameter dependency trends # ------------------------------------------------------------------------------ bs = 40 x_cols_fig = 7 y_cols_fig = 5 param_lims = {'snr_c2_iraf': [5, 175], 'age': [0., 14.], 'teff': [3000, 7000], 'logg': [0.0, 5.5], 'fe_h': [-1.2, 0.5]} for param in ['snr_c2_iraf']: #list(param_lims.keys()): cannon_data['abund_det'] = 0 cannon_data['abund_det_elems'] = 0 print('Estimating membership using parameter', param) fig, ax = plt.subplots(y_cols_fig, x_cols_fig, figsize=(15, 10)) for i_c, col in enumerate(abund_cols): # print(col) x_p = i_c % x_cols_fig y_p = int(1. * i_c / x_cols_fig) fit_x_param = 'teff' cur_abund_col = '_'.join(col.split('_')[1:]) cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col] idx_val = np.isfinite(cannon_data[col]) if Q_FLAGS: idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0) idx_u1 = np.logical_and(idx_out, idx_val) idx_u2 = np.logical_and(idx_init, idx_val) idx_u3 = np.logical_and(idx_in, idx_val) idx_u4 = np.logical_and(idx_cg_memb, idx_val) idx_u5 = np.logical_and(idx_tail, idx_val) fit_model, col_std = fit_abund_trend(cannon_data[fit_x_param][idx_u2], cannon_data[cur_abund_col][idx_u2], order=3, steps=2, func='poly', sigma_low=2.5, sigma_high=2.5, n_min_perc=10.) if fit_model is not None: cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col] - eval_abund_trend(cannon_data[fit_x_param], fit_model, func='poly') else: cannon_data['diff_' + cur_abund_col] = np.nan ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[col][idx_u1], lw=0, s=3, color='C2', label='Field') ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[col][idx_u2], lw=0, s=3, color='C0', label='Initial') ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[col][idx_u3], lw=0, s=3, color='C1', label='Ejected') if np.sum(idx_u5) > 0: print('Ejected in tail:', np.sum(np.logical_and(idx_u3, idx_u5))) ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[col][idx_u5], lw=0, s=3, color='C4', label='Tail') label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3)) ax[y_p, x_p].set(xlim=param_lims[param], title=' '.join(col.split('_')[:2]) + label_add, ylim=rg, yticks=yt,) ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black') rg = (-0.6, 0.6) idx_val = np.isfinite(cannon_data[teff_col]) if Q_FLAGS: idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0) x_p = -1 y_p = -1 idx_u1 = np.logical_and(idx_out, idx_val) idx_u2 = np.logical_and(idx_init, idx_val) idx_u3 = np.logical_and(idx_in, idx_val) idx_u5 = np.logical_and(idx_tail, idx_val) sl1 = ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[fe_col][idx_u1], lw=0, s=3, color='C2', label='Field') sl2 = ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2], lw=0, s=3, color='C0', label='Initial') sl3 = ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[fe_col][idx_u3], lw=0, s=3, color='C1', label='Ejected') fit_model, col_std = fit_abund_trend(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2], order=3, steps=2, sigma_low=2.5, sigma_high=2.5, n_min_perc=10., func='poly') if np.sum(idx_u5) > 0: sl5 = ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[fe_col][idx_u5], lw=0, s=3, color='C4', label='Tail') ax[-1, -3].legend(handles=[sl1, sl1, sl3, sl5]) else: ax[-1, -3].legend(handles=[sl1, sl1, sl3]) label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3)) ax[y_p, x_p].set(ylim=rg, title='Fe/H' + label_add, xlim=param_lims[param]) ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black') x_p = -2 y_p = -1 ax[y_p, x_p].scatter(cannon_data['age'][idx_u1], cannon_data[param][idx_u1], lw=0, s=3, color='C2', label='Field') ax[y_p, x_p].scatter(cannon_data['age'][idx_u2], cannon_data[param][idx_u2], lw=0, s=3, color='C0', label='Initial') ax[y_p, x_p].scatter(cannon_data['age'][idx_u3], cannon_data[param][idx_u3], lw=0, s=3, color='C1', label='Ejected') if np.sum(idx_u5) > 0: ax[y_p, x_p].scatter(cannon_data['age'][idx_u5], cannon_data[param][idx_u5], lw=0, s=3, color='C4', label='Tail') label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3)) ax[y_p, x_p].set(ylim=param_lims[param], title='age' + label_add, xlim=[0., 14.]) ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black') plt.subplots_adjust(top=0.97, bottom=0.02, left=0.04, right=0.98, hspace=0.3, wspace=0.3) # plt.show() plt.savefig('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix + '.png', dpi=250) plt.close(fig) chdir('..')
[ 11748, 2603, 29487, 8019, 198, 6759, 29487, 8019, 13, 1904, 10786, 46384, 11537, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 15095, 1330, 15095, 198, 6738, 6468, 28338, 13, ...
1.780906
6,536
import ast from python_minifier.transforms.suite_transformer import SuiteTransformer
[ 11748, 6468, 198, 198, 6738, 21015, 62, 1084, 7483, 13, 7645, 23914, 13, 2385, 578, 62, 7645, 16354, 1330, 26264, 8291, 16354, 628 ]
3.782609
23
import unittest from pkg import Linear_Algebra import numpy as np if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 6738, 279, 10025, 1330, 44800, 62, 2348, 29230, 198, 11748, 299, 32152, 355, 45941, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 555, 715, 395, 13, 12417, 3419 ]
2.756098
41
from backend.common.models.mytba import MyTBAModel
[ 6738, 30203, 13, 11321, 13, 27530, 13, 1820, 83, 7012, 1330, 2011, 22737, 2390, 375, 417, 628 ]
3.058824
17
#---------------------------------------------------------------------- # Name: wxPython.lib.filebrowsebutton # Purpose: Composite controls that provide a Browse button next to # either a wxTextCtrl or a wxComboBox. The Browse button # launches a wxFileDialog and loads the result into the # other control. # # Author: Mike Fletcher # # RCS-ID: $Id: filebrowsebutton.py 59674 2009-03-20 21:00:16Z RD $ # Copyright: (c) 2000 by Total Control Software # Licence: wxWindows license #---------------------------------------------------------------------- # 12/02/2003 - Jeff Grimmett (grimmtooth@softhome.net) # # o 2.5 Compatability changes # import os import types import wx #---------------------------------------------------------------------- #---------------------------------------------------------------------- if __name__ == "__main__": #from skeletonbuilder import rulesfile def test( ): app = DemoApp(0) app.MainLoop() print 'Creating dialog' test( )
[ 2, 10097, 23031, 198, 2, 6530, 25, 220, 220, 220, 220, 220, 220, 220, 266, 87, 37906, 13, 8019, 13, 7753, 25367, 325, 16539, 198, 2, 32039, 25, 220, 220, 220, 220, 49355, 6973, 326, 2148, 257, 44775, 4936, 1306, 284, 198, 2, 220, ...
3.139942
343
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 # arch: pacman -S python-pyserial # debian/ubuntu: apt-get install python-serial import serial import re import errors
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 43907, 25, 257, 72, 40379, 28, 19, 39747, 28, 19, 2123, 1509, 28, 19, 21004, 28, 40477, 12, 23, 198, 198, 2, 3934, 25, 23503, 805, 532, 50, 21015, 12, 79, 893, 48499, 198, 2, ...
2.846154
65
# while True: # # ejecuta esto # print("Hola") real = 7 print("Entre un numero entre el 1 y el 10") guess = int(input()) # =/= while guess != real: print("Ese no es el numero") print("Entre un numero entre el 1 y el 10") guess = int(input()) # el resto print("Yay! Lo sacastes!")
[ 2, 981, 6407, 25, 198, 2, 220, 220, 220, 220, 1303, 304, 73, 721, 29822, 1556, 78, 198, 2, 220, 220, 220, 220, 3601, 7203, 39, 5708, 4943, 198, 198, 5305, 796, 767, 198, 198, 4798, 7203, 14539, 260, 555, 997, 3529, 920, 260, 128...
2.283582
134
def rotCode(data): """ The rotCode function encodes/decodes data using string indexing :param data: A string :return: The rot-13 encoded/decoded string """ rot_chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] substitutions = [] # Walk through each individual character for c in data: # Walk through each individual character if c.isupper(): try: # Find the position of the character in rot_chars list index = rot_chars.index(c.lower()) except ValueError: substitutions.append(c) continue # Calculate the relative index that is 13 characters away from the index substitutions.append((rot_chars[(index-13)]).upper()) else: try: # Find the position of the character in rot_chars list index = rot_chars.index(c) except ValueError: substitutions.append(c) continue substitutions.append(rot_chars[((index-13))]) return ''.join(substitutions) if __name__ == '__main__': print rotCode('Jul, EBG-13?')
[ 4299, 5724, 10669, 7, 7890, 2599, 198, 220, 220, 220, 220, 220, 37227, 198, 220, 220, 220, 220, 220, 383, 5724, 10669, 2163, 2207, 4147, 14, 12501, 4147, 1366, 1262, 4731, 6376, 278, 198, 220, 220, 220, 220, 220, 1058, 17143, 1366, ...
1.888742
755
import pickle, sys sys.path.append('../src') import data_io, sim_algo, eval, params ## run # wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from John Wieting's github (https://github.com/jwieting/iclr2016) # '../data/glove.840B.300d.txt' # need to download it first # ] wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from John Wieting's github (https://github.com/jwieting/iclr2016) '../data/glove.6B.50d.txt' # need to download it first ] rmpcs = [0,1] comment4para = [ # need to align with the following loop ['word vector files', wordfiles], # comments and values, ['remove principal component or not', rmpcs] ] params = params.params() parr4para = {} sarr4para = {} for wordfile in wordfiles: (words, We) = data_io.getWordmap(wordfile) weight4ind = data_io.getIDFWeight(wordfile) for rmpc in rmpcs: print('word vectors loaded from %s' % wordfile) print('word weights computed from idf') params.rmpc = rmpc print('remove the first %d principal components' % rmpc) # eval just one example dataset parr, sarr = eval.sim_evaluate_one(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params) ## eval all datasets; need to obtained datasets from John Wieting (https://github.com/jwieting/iclr2016) # parr, sarr = eval.sim_evaluate_all(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params) paras = (wordfile, rmpc) parr4para[paras] = parr sarr4para[paras] = sarr ## save result save_result = False # True result_file = 'result/sim_tfidf.result' if save_result: with open(result_file, 'w') as f: pickle.dump([parr4para, sarr4para, comment4para] , f)
[ 11748, 2298, 293, 11, 25064, 198, 17597, 13, 6978, 13, 33295, 10786, 40720, 10677, 11537, 198, 11748, 1366, 62, 952, 11, 985, 62, 282, 2188, 11, 5418, 11, 42287, 628, 198, 2235, 1057, 198, 2, 1573, 16624, 796, 685, 2, 6, 40720, 7890...
2.471248
713
if __name__ == "__main__": f = Foo() b = f.bar() print(b)
[ 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 277, 796, 36080, 3419, 198, 220, 220, 220, 275, 796, 277, 13, 5657, 3419, 198, 220, 220, 220, 3601, 7, 65, 8 ]
1.868421
38
import numpy as np import os from random import shuffle datasets_dir = './../data/'
[ 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 198, 6738, 4738, 1330, 36273, 198, 19608, 292, 1039, 62, 15908, 796, 705, 19571, 40720, 7890, 14, 6, 198 ]
3.111111
27
import math import os import random import re import sys if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'] + 'solucion2.txt', 'w') print("Escribe las notas de a") a = list(map(int, input().rstrip().split())) print("Escribe las notas de b") b = list(map(int, input().rstrip().split())) result = compareTriplets(a, b) fptr.write(' '.join(map(str, result))) fptr.write('\n') fptr.close()
[ 11748, 10688, 198, 11748, 28686, 198, 11748, 4738, 198, 11748, 302, 198, 11748, 25064, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 220, 220, 277, 20692, 796, 1280, 7, 418, 13, 268, 2268, 17816, 2606, ...
2.405405
185
#=============================================================== # @author: nityanarayan44@live.com # @written: 08 December 2021 # @desc: Routes for the Backend server #=============================================================== # Import section with referecne of entry file or main file; from __main__ import application from flask import jsonify, render_template, url_for, request, redirect # Local sample data import from app.config.uiconfig import app_ui_config from app import sample_data # ============================================================== # App Routes/Gateways # ============================================================== # ============================================================== # Error Handlers Starts # ============================================================== # 404 Handler; We can also pass the specific request errors codes to the decorator; # Exception/Error handler; We can also pass the specific errors to the decorator; # Exception/Error handler; We can also pass the specific errors to the decorator; # ============================================================== # Error Handlers Ends # ============================================================== # Route For Sample data # ============================================================== # Extra routes starts # ==============================================================
[ 2, 10052, 4770, 25609, 18604, 201, 198, 2, 2488, 9800, 25, 220, 299, 414, 272, 283, 22931, 2598, 31, 12583, 13, 785, 201, 198, 2, 2488, 15266, 25, 8487, 3426, 33448, 201, 198, 2, 2488, 20147, 25, 220, 220, 220, 39602, 274, 329, 26...
4.473846
325
################################################################################# # The Institute for the Design of Advanced Energy Systems Integrated Platform # Framework (IDAES IP) was produced under the DOE Institute for the # Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 # by the software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University # Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.md and LICENSE.md for full copyright and # license information. ################################################################################# """ Air separation phase equilibrium package using Peng-Robinson EoS. Example property package using the Generic Property Package Framework. This example shows how to set up a property package to do air separation phase equilibrium in the generic framework using Peng-Robinson equation along with methods drawn from the pre-built IDAES property libraries. The example includes two dictionaries. 1. The dictionary named configuration contains parameters obtained from The Properties of Gases and Liquids (1987) 4th edition and NIST. 2. The dictionary named configuration_Dowling_2015 contains parameters used in A framework for efficient large scale equation-oriented flowsheet optimization (2015) Dowling. The parameters are extracted from Properties of Gases and Liquids (1977) 3rd edition for Antoine's vapor equation and acentric factors and converted values from the Properties of Gases and Liquids (1977) 3rd edition to j. """ # Import Python libraries import logging # Import Pyomo units from pyomo.environ import units as pyunits # Import IDAES cores from idaes.core import LiquidPhase, VaporPhase, Component from idaes.generic_models.properties.core.state_definitions import FTPx from idaes.generic_models.properties.core.eos.ceos import Cubic, CubicType from idaes.generic_models.properties.core.phase_equil import SmoothVLE from idaes.generic_models.properties.core.phase_equil.bubble_dew import \ LogBubbleDew from idaes.generic_models.properties.core.phase_equil.forms import log_fugacity from idaes.generic_models.properties.core.pure import RPP4 from idaes.generic_models.properties.core.pure import NIST from idaes.generic_models.properties.core.pure import RPP3 # Set up logger _log = logging.getLogger(__name__) # --------------------------------------------------------------------- # Configuration dictionary for a Peng-Robinson Oxygen-Argon-Nitrogen system # Data Sources: # [1] The Properties of Gases and Liquids (1987) # 4th edition, Chemical Engineering Series - Robert C. Reid # [2] NIST, https://webbook.nist.gov/ # Retrieved 16th August, 2020 # [3] The Properties of Gases and Liquids (1987) # 3rd edition, Chemical Engineering Series - Robert C. Reid # Cp parameters where converted to j in Dowling 2015 # [4] A framework for efficient large scale equation-oriented flowsheet optimization (2015) # Computers and Chemical Engineering - Alexander W. Dowling configuration = { # Specifying components "components": { "nitrogen": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": NIST, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [1] "pressure_crit": (34e5, pyunits.Pa), # [1] "temperature_crit": (126.2, pyunits.K), # [1] "omega": 0.037, # [1] "cp_mol_ig_comp_coeff": { "A": (3.115E1, pyunits.J/pyunits.mol/pyunits.K), # [1] "B": (-1.357E-2, pyunits.J/pyunits.mol/pyunits.K**2), "C": (2.680E-5, pyunits.J/pyunits.mol/pyunits.K**3), "D": (-1.168E-8, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 191.61, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": { "A": (3.7362, None), # [2] "B": (264.651, pyunits.K), "C": (-6.788, pyunits.K)}}}, "argon": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": NIST, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (39.948E-3, pyunits.kg/pyunits.mol), # [1] "pressure_crit": (48.98e5, pyunits.Pa), # [1] "temperature_crit": (150.86, pyunits.K), # [1] "omega": 0.001, # [1] "cp_mol_ig_comp_coeff": { "A": (2.050E1, pyunits.J/pyunits.mol/pyunits.K), # [1] "B": (0.0, pyunits.J/pyunits.mol/pyunits.K**2), "C": (0.0, pyunits.J/pyunits.mol/pyunits.K**3), "D": (0.0, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 154.8, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": {"A": (3.29555, None), # [2] "B": (215.24, pyunits.K), "C": (-22.233, pyunits.K)}}}, "oxygen": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": NIST, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (31.999E-3, pyunits.kg/pyunits.mol), # [1] "pressure_crit": (50.43e5, pyunits.Pa), # [1] "temperature_crit": (154.58, pyunits.K), # [1] "omega": 0.025, # [1] "cp_mol_ig_comp_coeff": { "A": (2.811E1, pyunits.J/pyunits.mol/pyunits.K), "B": (-3.680E-6, pyunits.J/pyunits.mol/pyunits.K**2), "C": (1.746E-5, pyunits.J/pyunits.mol/pyunits.K**3), "D": (-1.065E-8, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 205.152, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": { "A": (3.85845, None), # [2] "B": (325.675, pyunits.K), "C": (-5.667, pyunits.K)}}}}, # Specifying phases "phases": {"Liq": {"type": LiquidPhase, "equation_of_state": Cubic, "equation_of_state_options": { "type": CubicType.PR}}, "Vap": {"type": VaporPhase, "equation_of_state": Cubic, "equation_of_state_options": { "type": CubicType.PR}}}, # Set base units of measurement "base_units": {"time": pyunits.s, "length": pyunits.m, "mass": pyunits.kg, "amount": pyunits.mol, "temperature": pyunits.K}, # Specifying state definition "state_definition": FTPx, "state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s), "temperature": (10, 300, 350, pyunits.K), "pressure": (5e4, 1e5, 1e7, pyunits.Pa)}, "pressure_ref": (101325, pyunits.Pa), "temperature_ref": (298.15, pyunits.K), # Defining phase equilibria "phases_in_equilibrium": [("Vap", "Liq")], "phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE}, "bubble_dew_method": LogBubbleDew, "parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000, ("nitrogen", "argon"): -0.26e-2, ("nitrogen", "oxygen"): -0.119e-1, ("argon", "nitrogen"): -0.26e-2, ("argon", "argon"): 0.000, ("argon", "oxygen"): 0.104e-1, ("oxygen", "nitrogen"): -0.119e-1, ("oxygen", "argon"): 0.104e-1, ("oxygen", "oxygen"): 0.000}}} configuration_Dowling_2015 = { # Specifying components "components": { "nitrogen": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": RPP3, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [3] "pressure_crit": (33.943875e5, pyunits.Pa), # [4] "temperature_crit": (126.2, pyunits.K), # [4] "omega": 0.04, # [3] "cp_mol_ig_comp_coeff": { 'A': (3.112896E1, pyunits.J/pyunits.mol/pyunits.K), # [3] 'B': (-1.356E-2, pyunits.J/pyunits.mol/pyunits.K**2), 'C': (2.6878E-5, pyunits.J/pyunits.mol/pyunits.K**3), 'D': (-1.167E-8, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 191.61, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": { 'A': (14.9342, None), # [3] 'B': (588.72, pyunits.K), 'C': (-6.60, pyunits.K)}}}, "argon": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": RPP3, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (39.948E-3, pyunits.kg/pyunits.mol), # [3] "pressure_crit": (48.737325e5, pyunits.Pa), # [4] "temperature_crit": (150.86, pyunits.K), # [4] "omega": -0.004, # [1] "cp_mol_ig_comp_coeff": { 'A': (2.0790296E1, pyunits.J/pyunits.mol/pyunits.K), # [3] 'B': (-3.209E-05, pyunits.J/pyunits.mol/pyunits.K**2), 'C': (5.163E-08, pyunits.J/pyunits.mol/pyunits.K**3), 'D': (0.0, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [3] "entr_mol_form_vap_comp_ref": ( 154.8, pyunits.J/pyunits.mol/pyunits.K), # [3] "pressure_sat_comp_coeff": { 'A': (15.2330, None), # [3] 'B': (700.51, pyunits.K), 'C': (-5.84, pyunits.K)}}}, "oxygen": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": RPP3, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (31.999E-3, pyunits.kg/pyunits.mol), # [3] "pressure_crit": (50.45985e5, pyunits.Pa), # [4] "temperature_crit": (154.58, pyunits.K), # [4] "omega": 0.021, # [1] "cp_mol_ig_comp_coeff": { 'A': (2.8087192E1, pyunits.J/pyunits.mol/pyunits.K), # [3] 'B': (-3.678E-6, pyunits.J/pyunits.mol/pyunits.K**2), 'C': (1.745E-5, pyunits.J/pyunits.mol/pyunits.K**3), 'D': (-1.064E-8, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 205.152, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": { 'A': (15.4075, None), # [3] 'B': (734.55, pyunits.K), 'C': (-6.45, pyunits.K)}}}}, # Specifying phases "phases": {"Liq": {"type": LiquidPhase, "equation_of_state": Cubic, "equation_of_state_options": { "type": CubicType.PR}}, "Vap": {"type": VaporPhase, "equation_of_state": Cubic, "equation_of_state_options": { "type": CubicType.PR}}}, # Set base units of measurement "base_units": {"time": pyunits.s, "length": pyunits.m, "mass": pyunits.kg, "amount": pyunits.mol, "temperature": pyunits.K}, # Specifying state definition "state_definition": FTPx, "state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s), "temperature": (10, 300, 350, pyunits.K), "pressure": (5e4, 1e5, 1e7, pyunits.Pa)}, "pressure_ref": (101325, pyunits.Pa), "temperature_ref": (298.15, pyunits.K), # Defining phase equilibria "phases_in_equilibrium": [("Vap", "Liq")], "phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE}, "bubble_dew_method": LogBubbleDew, "parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000, ("nitrogen", "argon"): -0.26e-2, ("nitrogen", "oxygen"): -0.119e-1, ("argon", "nitrogen"): -0.26e-2, ("argon", "argon"): 0.000, ("argon", "oxygen"): 0.104e-1, ("oxygen", "nitrogen"): -0.119e-1, ("oxygen", "argon"): 0.104e-1, ("oxygen", "oxygen"): 0.000}}}
[ 29113, 29113, 14468, 2, 198, 2, 383, 5136, 329, 262, 8495, 286, 13435, 6682, 11998, 35432, 19193, 198, 2, 25161, 357, 41957, 1546, 6101, 8, 373, 4635, 739, 262, 46984, 5136, 329, 262, 198, 2, 8495, 286, 13435, 6682, 11998, 357, 41957,...
1.731191
9,025
"""TODO: Move the Threads Here"""
[ 37811, 51, 3727, 46, 25, 10028, 262, 14122, 82, 3423, 37811, 198 ]
2.833333
12
import functools import pickle import kerastuner import tensorflow as tf from tensorflow.python.util import nest from autokeras.hypermodel import base from autokeras.hypermodel import compiler def copy(old_instance): instance = old_instance.__class__() instance.set_state(old_instance.get_state()) return instance
[ 11748, 1257, 310, 10141, 198, 11748, 2298, 293, 198, 198, 11748, 41927, 459, 38886, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 6738, 11192, 273, 11125, 13, 29412, 13, 22602, 1330, 16343, 198, 198, 6738, 1960, 11020, 292, 13, 49229, ...
3.252427
103
''' Speed: 95.97% Memory: 24.96% Time complexity: O(n) Space complexity: O(n) '''
[ 7061, 6, 198, 22785, 25, 6957, 13, 5607, 4, 198, 30871, 25, 1987, 13, 4846, 4, 198, 7575, 13357, 25, 440, 7, 77, 8, 198, 14106, 13357, 25, 440, 7, 77, 8, 198, 7061, 6 ]
2.314286
35
from distutils.core import setup, Extension import os.path kw = { 'name':"PyOpenAES", 'version':"0.10.0", 'description':"OpenAES cryptographic library for Python.", 'ext_modules':[ Extension( 'openaes', include_dirs = ['inc', 'src/isaac'], # define_macros=[('ENABLE_PYTHON', '1')], sources = [ os.path.join('src/oaes_lib.c'), os.path.join('src/oaes_py.c'), os.path.join('src/isaac/rand.c') ] ) ] } setup(**kw)
[ 6738, 1233, 26791, 13, 7295, 1330, 9058, 11, 27995, 198, 11748, 28686, 13, 6978, 198, 198, 46265, 796, 1391, 198, 197, 6, 3672, 10354, 1, 20519, 11505, 32, 1546, 1600, 198, 197, 1053, 3808, 295, 10354, 1, 15, 13, 940, 13, 15, 1600, ...
2.118483
211
#!/usr/bin/env python # Example taken from: # http://www.mathworks.com/access/helpdesk/help/techdoc/visualize/f5-3371.html from scitools.easyviz import * from time import sleep from scipy import io setp(interactive=False) # Displaying an Isosurface: mri = io.loadmat('mri_matlab_v6.mat') D = mri['D'] #Ds = smooth3(D); isosurface(D,5,indexing='xy') #hiso = isosurface(Ds,5), # 'FaceColor',[1,.75,.65],... # 'EdgeColor','none'); shading('interp') # Adding an Isocap to Show a Cutaway Surface: #hcap = patch(isocaps(D,5),... # 'FaceColor','interp',... # 'EdgeColor','none'); #colormap(map) # Define the View: view(45,30) axis('tight') daspect([1,1,.4]) # Add Lighting: #lightangle(45,30); #set(gcf,'Renderer','zbuffer'); lighting phong #isonormals(Ds,hiso) #set(hcap,'AmbientStrength',.6) #set(hiso,'SpecularColorReflectance',0,'SpecularExponent',50) show() raw_input('Press Return key to quit: ') #savefig('tmp_isosurf2a.eps') #savefig('tmp_isosurf2a.png')
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 2, 17934, 2077, 422, 25, 198, 2, 2638, 1378, 2503, 13, 11018, 5225, 13, 785, 14, 15526, 14, 16794, 8906, 74, 14, 16794, 14, 13670, 15390, 14, 41464, 1096, 14, 69, 20, 12, 2091...
2.347087
412
import os import numpy as np import json import util_amira
[ 11748, 28686, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 33918, 198, 198, 11748, 7736, 62, 321, 8704, 628, 628, 628, 220, 220, 220, 220, 628 ]
2.730769
26
"""Blueprint definitions for maDMP integration.""" from flask import Blueprint, jsonify, request from invenio_db import db from .convert import convert_dmp from .models import DataManagementPlan def _summarize_dmp(dmp: DataManagementPlan) -> dict: """Create a summary dictionary for the given DMP.""" res = {"dmp_id": dmp.dmp_id, "datasets": []} for ds in dmp.datasets: dataset = {"dataset_id": ds.dataset_id, "record": None} if ds.record: dataset["record"] = ds.record.model.json res["datasets"].append(dataset) return res def create_rest_blueprint(app) -> Blueprint: """Create the blueprint for the REST endpoints using the current app extensions.""" # note: using flask.current_app isn't directly possible, because Invenio-MaDMP is # registered as an extension in the API app, not the "normal" app # (which is the one usually returned by current_app) rest_blueprint = Blueprint("invenio_madmp", __name__) auth = app.extensions["invenio-madmp"].auth return rest_blueprint
[ 37811, 14573, 4798, 17336, 329, 17266, 35, 7378, 11812, 526, 15931, 198, 198, 6738, 42903, 1330, 39932, 11, 33918, 1958, 11, 2581, 198, 6738, 287, 574, 952, 62, 9945, 1330, 20613, 198, 198, 6738, 764, 1102, 1851, 1330, 10385, 62, 67, ...
2.768638
389
from django.urls import path from . import views urlpatterns = [ path('', views.index, name='index'), path('retrieval_insert', views.retrieval_insert, name='retrieval_insert'), path('retrieval_get', views.retrieval_get, name='retrieval_get') ]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 198, 6738, 764, 1330, 5009, 198, 198, 6371, 33279, 82, 796, 685, 198, 220, 220, 220, 3108, 10786, 3256, 5009, 13, 9630, 11, 1438, 11639, 9630, 33809, 198, 220, 220, 220, 3108, 10786, ...
2.734043
94
import RPi.GPIO as GPIO import threading # r_en_a = 27 # r_en_b = 10 # l_en_a = 5 # l_en_b = 6 # enc_obj = Encoder(27,10,5,6) # def update_encoders(): # threading.Timer(1,update_encoders).start() # print(" looping ") # update_encoders()
[ 11748, 25812, 72, 13, 16960, 9399, 355, 50143, 198, 11748, 4704, 278, 198, 198, 2, 374, 62, 268, 62, 64, 796, 2681, 220, 198, 2, 374, 62, 268, 62, 65, 796, 838, 198, 198, 2, 300, 62, 268, 62, 64, 796, 642, 198, 2, 300, 62, 2...
2.066116
121
from django.conf.urls import url from django.urls import path, include from systori.apps.user.authorization import office_auth from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate urlpatterns = [ # two url rules to make the active_filter keyword optional url( r"^equipment/$", office_auth(EquipmentListView.as_view()), name="equipment.list" ), url( r"^equipment/(?P<active_filter>[\w-]+)$", office_auth(EquipmentListView.as_view()), name="equipment.list", ), url( r"^equipment-(?P<pk>\d+)$", office_auth(EquipmentView.as_view()), name="equipment.view", ), url( r"^create-equipment$", office_auth(EquipmentCreate.as_view()), name="equipment.create", ), url( r"^equipment-(?P<pk>\d+)/edit$", office_auth(EquipmentUpdate.as_view()), name="equipment.edit", ), url( r"^equipment-(?P<pk>\d+)/delete$", office_auth(EquipmentDelete.as_view()), name="equipment.delete", ), url( r"^equipment-(?P<pk>\d+)/create-refueling-stop$", office_auth(RefuelingStopCreate.as_view()), name="refueling_stop.create", ), url( r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/update$", office_auth(RefuelingStopUpdate.as_view()), name="refueling_stop.update", ), url( r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/delete", office_auth(RefuelingStopDelete.as_view()), name="refueling_stop.delete", ), url( r"^equipment-(?P<pk>\d+)/create-maintenance", office_auth(MaintenanceCreate.as_view()), name="maintenance.create", ), url( r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/update$", office_auth(MaintenanceUpdate.as_view()), name="maintenance.update", ), url( r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/delete", office_auth(MaintenanceDelete.as_view()), name="maintenance.delete", ), ]
[ 6738, 42625, 14208, 13, 10414, 13, 6371, 82, 1330, 19016, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 11, 2291, 198, 198, 6738, 827, 301, 10145, 13, 18211, 13, 7220, 13, 9800, 1634, 1330, 2607, 62, 18439, 198, 6738, 827, 301, ...
2.154571
1,061
#coding:utf-8 # Generated by the protocol buffer compiler. DO NOT EDIT! # source: check_info.proto import sys _b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='check_info.proto', package='paddlehub.module.checkinfo', syntax='proto3', serialized_pb=_b( '\n\x10\x63heck_info.proto\x12\x1apaddlehub.module.checkinfo\"\x85\x01\n\x08\x46ileInfo\x12\x11\n\tfile_name\x18\x01 \x01(\t\x12\x33\n\x04type\x18\x02 \x01(\x0e\x32%.paddlehub.module.checkinfo.FILE_TYPE\x12\x0f\n\x07is_need\x18\x03 \x01(\x08\x12\x0b\n\x03md5\x18\x04 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\"\x84\x01\n\x08Requires\x12>\n\x0crequire_type\x18\x01 \x01(\x0e\x32(.paddlehub.module.checkinfo.REQUIRE_TYPE\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x12\n\ngreat_than\x18\x03 \x01(\x08\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\"\xc8\x01\n\tCheckInfo\x12\x16\n\x0epaddle_version\x18\x01 \x01(\t\x12\x13\n\x0bhub_version\x18\x02 \x01(\t\x12\x1c\n\x14module_proto_version\x18\x03 \x01(\t\x12\x38\n\nfile_infos\x18\x04 \x03(\x0b\x32$.paddlehub.module.checkinfo.FileInfo\x12\x36\n\x08requires\x18\x05 \x03(\x0b\x32$.paddlehub.module.checkinfo.Requires*\x1e\n\tFILE_TYPE\x12\x08\n\x04\x46ILE\x10\x00\x12\x07\n\x03\x44IR\x10\x01*[\n\x0cREQUIRE_TYPE\x12\x12\n\x0ePYTHON_PACKAGE\x10\x00\x12\x0e\n\nHUB_MODULE\x10\x01\x12\n\n\x06SYSTEM\x10\x02\x12\x0b\n\x07\x43OMMAND\x10\x03\x12\x0e\n\nPY_VERSION\x10\x04\x42\x02H\x03\x62\x06proto3' )) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _FILE_TYPE = _descriptor.EnumDescriptor( name='FILE_TYPE', full_name='paddlehub.module.checkinfo.FILE_TYPE', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='FILE', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='DIR', index=1, number=1, options=None, type=None), ], containing_type=None, options=None, serialized_start=522, serialized_end=552, ) _sym_db.RegisterEnumDescriptor(_FILE_TYPE) FILE_TYPE = enum_type_wrapper.EnumTypeWrapper(_FILE_TYPE) _REQUIRE_TYPE = _descriptor.EnumDescriptor( name='REQUIRE_TYPE', full_name='paddlehub.module.checkinfo.REQUIRE_TYPE', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='PYTHON_PACKAGE', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='HUB_MODULE', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='SYSTEM', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='COMMAND', index=3, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='PY_VERSION', index=4, number=4, options=None, type=None), ], containing_type=None, options=None, serialized_start=554, serialized_end=645, ) _sym_db.RegisterEnumDescriptor(_REQUIRE_TYPE) REQUIRE_TYPE = enum_type_wrapper.EnumTypeWrapper(_REQUIRE_TYPE) FILE = 0 DIR = 1 PYTHON_PACKAGE = 0 HUB_MODULE = 1 SYSTEM = 2 COMMAND = 3 PY_VERSION = 4 _FILEINFO = _descriptor.Descriptor( name='FileInfo', full_name='paddlehub.module.checkinfo.FileInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='file_name', full_name='paddlehub.module.checkinfo.FileInfo.file_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='type', full_name='paddlehub.module.checkinfo.FileInfo.type', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='is_need', full_name='paddlehub.module.checkinfo.FileInfo.is_need', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='md5', full_name='paddlehub.module.checkinfo.FileInfo.md5', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='description', full_name='paddlehub.module.checkinfo.FileInfo.description', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=49, serialized_end=182, ) _REQUIRES = _descriptor.Descriptor( name='Requires', full_name='paddlehub.module.checkinfo.Requires', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='require_type', full_name='paddlehub.module.checkinfo.Requires.require_type', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='version', full_name='paddlehub.module.checkinfo.Requires.version', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='great_than', full_name='paddlehub.module.checkinfo.Requires.great_than', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='description', full_name='paddlehub.module.checkinfo.Requires.description', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=185, serialized_end=317, ) _CHECKINFO = _descriptor.Descriptor( name='CheckInfo', full_name='paddlehub.module.checkinfo.CheckInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='paddle_version', full_name='paddlehub.module.checkinfo.CheckInfo.paddle_version', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='hub_version', full_name='paddlehub.module.checkinfo.CheckInfo.hub_version', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='module_proto_version', full_name= 'paddlehub.module.checkinfo.CheckInfo.module_proto_version', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='file_infos', full_name='paddlehub.module.checkinfo.CheckInfo.file_infos', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='requires', full_name='paddlehub.module.checkinfo.CheckInfo.requires', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=320, serialized_end=520, ) _FILEINFO.fields_by_name['type'].enum_type = _FILE_TYPE _REQUIRES.fields_by_name['require_type'].enum_type = _REQUIRE_TYPE _CHECKINFO.fields_by_name['file_infos'].message_type = _FILEINFO _CHECKINFO.fields_by_name['requires'].message_type = _REQUIRES DESCRIPTOR.message_types_by_name['FileInfo'] = _FILEINFO DESCRIPTOR.message_types_by_name['Requires'] = _REQUIRES DESCRIPTOR.message_types_by_name['CheckInfo'] = _CHECKINFO DESCRIPTOR.enum_types_by_name['FILE_TYPE'] = _FILE_TYPE DESCRIPTOR.enum_types_by_name['REQUIRE_TYPE'] = _REQUIRE_TYPE FileInfo = _reflection.GeneratedProtocolMessageType( 'FileInfo', (_message.Message, ), dict( DESCRIPTOR=_FILEINFO, __module__='check_info_pb2' # @@protoc_insertion_point(class_scope:paddlehub.module.checkinfo.FileInfo) )) _sym_db.RegisterMessage(FileInfo) Requires = _reflection.GeneratedProtocolMessageType( 'Requires', (_message.Message, ), dict( DESCRIPTOR=_REQUIRES, __module__='check_info_pb2' # @@protoc_insertion_point(class_scope:paddlehub.module.checkinfo.Requires) )) _sym_db.RegisterMessage(Requires) CheckInfo = _reflection.GeneratedProtocolMessageType( 'CheckInfo', (_message.Message, ), dict( DESCRIPTOR=_CHECKINFO, __module__='check_info_pb2' # @@protoc_insertion_point(class_scope:paddlehub.module.checkinfo.CheckInfo) )) _sym_db.RegisterMessage(CheckInfo) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) # @@protoc_insertion_point(module_scope)
[ 2, 66, 7656, 25, 40477, 12, 23, 198, 2, 2980, 515, 416, 262, 8435, 11876, 17050, 13, 220, 8410, 5626, 48483, 0, 198, 2, 2723, 25, 2198, 62, 10951, 13, 1676, 1462, 198, 198, 11748, 25064, 198, 62, 65, 796, 25064, 13, 9641, 62, 10...
1.909522
7,173
from numpy import zeros # Define ab2 function # Define functions # Set initial conditions t0 = 0.0 tf = 1.0 y0 = 1.0 n = 5 # Execute AB2 t, yab2 = ab2(f,t0,tf,y0,n) # Print results print("%5s %8s" % ('t','y')) for i in range(n+1): print("%8.4f %8.4f" % (t[i],yab2[i]))
[ 6738, 299, 32152, 1330, 1976, 27498, 198, 198, 2, 2896, 500, 450, 17, 2163, 220, 198, 198, 2, 2896, 500, 5499, 198, 198, 2, 5345, 4238, 3403, 198, 83, 15, 796, 657, 13, 15, 198, 27110, 796, 352, 13, 15, 198, 88, 15, 796, 352, ...
1.951049
143
# # Copyright 2017 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from datetime import datetime, timedelta from lingua_franca import get_default_lang, set_default_lang, \ load_language, unload_language from lingua_franca.parse import extract_datetime from lingua_franca.parse import extract_duration from lingua_franca.parse import extract_number, extract_numbers from lingua_franca.parse import fuzzy_match from lingua_franca.parse import get_gender from lingua_franca.parse import match_one from lingua_franca.parse import normalize if __name__ == "__main__": unittest.main()
[ 2, 198, 2, 15069, 2177, 2011, 36714, 9552, 3457, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13...
3.482972
323
# !/usr/bin/python # -*- coding: utf-8 -*- # @Time : 2020/9/18 12:02 # @Author : WardenAllen # @File : pluto_ftp.py # @Brief : import paramiko
[ 2, 5145, 14, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2488, 7575, 220, 220, 220, 1058, 12131, 14, 24, 14, 1507, 1105, 25, 2999, 198, 2, 2488, 13838, 220, 1058, 38498, 399...
2.04
75
# Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors. # See LICENSE for details.
[ 2, 15069, 357, 66, 8, 3050, 12, 9804, 11, 4062, 7283, 317, 14, 50, 290, 25149, 276, 4935, 25767, 669, 13, 198, 2, 4091, 38559, 24290, 329, 3307, 13, 198 ]
3.3
30
import sys sys.path = ['', '..'] + sys.path[1:] import daemon from assistance_bot import core from functionality.voice_processing import speaking, listening from functionality.commands import * if __name__ == '__main__': speaking.setup_assistant_voice(core.ttsEngine, core.assistant) while True: # start speech recording and speech recognition recognized_speech = listening.get_listening_and_recognition_result( core.recognizer, core.microphone) # executing the given command execute_command(recognized_speech)
[ 11748, 25064, 220, 220, 198, 17597, 13, 6978, 796, 37250, 3256, 705, 492, 20520, 1343, 25064, 13, 6978, 58, 16, 47715, 198, 198, 11748, 33386, 198, 6738, 6829, 62, 13645, 1330, 4755, 198, 6738, 11244, 13, 38888, 62, 36948, 1330, 5486, ...
2.895
200
total = 0 partial_sums = [total := total + v for v in values] print("Total:", total) <ref>
[ 23350, 796, 657, 198, 47172, 62, 82, 5700, 796, 685, 23350, 19039, 2472, 1343, 410, 329, 410, 287, 3815, 60, 198, 4798, 7203, 14957, 25, 1600, 2472, 8, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220...
2.229167
48
# -*- coding: utf8 -*- ######################################################################################## # This file is part of exhale. Copyright (c) 2017-2022, Stephen McDowell. # # Full BSD 3-Clause license available here: # # # # https://github.com/svenevs/exhale/blob/master/LICENSE # ######################################################################################## ''' The deploy module is responsible for two primary actions: 1. Executing Doxygen (if requested in ``exhale_args``). 2. Launching the full API generation via the :func:`~exhale.deploy.explode` function. ''' from __future__ import unicode_literals from . import configs from . import utils from .graph import ExhaleRoot import os import sys import six import re import codecs import tempfile import textwrap from subprocess import PIPE, Popen, STDOUT def _generate_doxygen(doxygen_input): ''' This method executes doxygen based off of the specified input. By the time this method is executed, it is assumed that Doxygen is intended to be run in the **current working directory**. Search for ``returnPath`` in the implementation of :func:`~exhale.configs.apply_sphinx_configurations` for handling of this aspect. This method is intended to be called by :func:`~exhale.deploy.generateDoxygenXML`, which is in turn called by :func:`~exhale.configs.apply_sphinx_configurations`. Two versions of the doxygen command can be executed: 1. If ``doxygen_input`` is exactly ``"Doxyfile"``, then it is assumed that a ``Doxyfile`` exists in the **current working directory**. Meaning the command being executed is simply ``doxygen``. 2. For all other values, ``doxygen_input`` represents the arguments as to be specified on ``stdin`` to the process. **Parameters** ``doxygen_input`` (str) Either the string ``"Doxyfile"`` to run vanilla ``doxygen``, or the selection of doxygen inputs (that would ordinarily be in a ``Doxyfile``) that will be ``communicate``d to the ``doxygen`` process on ``stdin``. .. note:: If using Python **3**, the input **must** still be a ``str``. This method will convert the input to ``bytes`` as follows: .. code-block:: py if sys.version[0] == "3": doxygen_input = bytes(doxygen_input, "utf-8") **Return** ``str`` or ``None`` If an error occurs, a string describing the error is returned with the intention of the caller raising the exception. If ``None`` is returned, then the process executed without error. Example usage: .. code-block:: py status = _generate_doxygen("Doxygen") if status: raise RuntimeError(status) Though a little awkward, this is done to enable the intended caller of this method to restore some state before exiting the program (namely, the working directory before propagating an exception to ``sphinx-build``). ''' if not isinstance(doxygen_input, six.string_types): return "Error: the `doxygen_input` variable must be of type `str`." doxyfile = doxygen_input == "Doxyfile" try: # Setup the arguments to launch doxygen if doxyfile: args = ["doxygen"] kwargs = {} else: args = ["doxygen", "-"] kwargs = {"stdin": PIPE} if configs._on_rtd: # On RTD, any capturing of Doxygen output can cause buffer overflows for # even medium sized projects. So it is disregarded entirely to ensure the # build will complete (otherwise, it silently fails after `cat conf.py`) devnull_file = open(os.devnull, "w") kwargs["stdout"] = devnull_file kwargs["stderr"] = STDOUT else: # TL;DR: strictly enforce that (verbose) doxygen output doesn't cause the # `communicate` to hang due to buffer overflows. # # See excellent synopsis: # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/ if six.PY2: tempfile_kwargs = {} else: # encoding argument introduced in python 3 tempfile_kwargs = {"encoding": "utf-8"} tempfile_kwargs["mode"] = "r+" tmp_out_file = tempfile.TemporaryFile( prefix="doxygen_stdout_buff", **tempfile_kwargs ) tmp_err_file = tempfile.TemporaryFile( prefix="doxygen_stderr_buff", **tempfile_kwargs ) # Write to the tempfiles over PIPE to avoid buffer overflowing kwargs["stdout"] = tmp_out_file kwargs["stderr"] = tmp_err_file # Note: overload of args / kwargs, Popen is expecting a list as the first # parameter (aka no *args, just args)! doxygen_proc = Popen(args, **kwargs) # Communicate can only be called once, arrange whether or not stdin has value if not doxyfile: # In Py3, make sure we are communicating a bytes-like object which is no # longer interchangeable with strings (as was the case in Py2). if sys.version[0] == "3": doxygen_input = bytes(doxygen_input, "utf-8") comm_kwargs = {"input": doxygen_input} else: comm_kwargs = {} # Waits until doxygen has completed doxygen_proc.communicate(**comm_kwargs) # Print out what was written to the tmpfiles by doxygen if not configs._on_rtd and not configs.exhaleSilentDoxygen: # Doxygen output (some useful information, mostly just enumeration of the # configurations you gave it {useful for debugging...}) if tmp_out_file.tell() > 0: tmp_out_file.seek(0) print(tmp_out_file.read()) # Doxygen error (e.g. any warnings, or invalid input) if tmp_err_file.tell() > 0: # Making them stick out, ideally users would reduce this output to 0 ;) # This will print a yellow [~] before every line, but not make the # entire line yellow because it's definitively not helpful prefix = utils._use_color( utils.prefix("[~]", " "), utils.AnsiColors.BOLD_YELLOW, sys.stderr ) tmp_err_file.seek(0) sys.stderr.write(utils.prefix(prefix, tmp_err_file.read())) # Close the file handles opened for communication with subprocess if configs._on_rtd: devnull_file.close() else: # Delete the tmpfiles tmp_out_file.close() tmp_err_file.close() # Make sure we had a valid execution of doxygen exit_code = doxygen_proc.returncode if exit_code != 0: raise RuntimeError("Non-zero return code of [{0}] from 'doxygen'...".format(exit_code)) except Exception as e: return "Unable to execute 'doxygen': {0}".format(e) # returning None signals _success_ return None def _valid_config(config, required): ''' .. todo:: add documentation of this method ``config``: doxygen input we're looking for ``required``: if ``True``, must be present. if ``False``, NOT ALLOWED to be present ''' re_template = r"\s*{config}\s*=.*".format(config=config) found = re.search(re_template, configs.exhaleDoxygenStdin) if required: return found is not None else: return found is None ######################################################################################## # ## ### #### ##### Primary entry point. #### ### ## # ######################################################################################## def explode(): ''' This method **assumes** that :func:`~exhale.configs.apply_sphinx_configurations` has already been applied. It performs minimal sanity checking, and then performs in order 1. Creates a :class:`~exhale.graph.ExhaleRoot` object. 2. Executes :func:`~exhale.graph.ExhaleRoot.parse` for this object. 3. Executes :func:`~exhale.graph.ExhaleRoot.generateFullAPI` for this object. 4. Executes :func:`~exhale.graph.ExhaleRoot.toConsole` for this object (which will only produce output when :data:`~exhale.configs.verboseBuild` is ``True``). This results in the full API being generated, and control is subsequently passed back to Sphinx to now read in the source documents (many of which were just generated in :data:`~exhale.configs.containmentFolder`), and proceed to writing the final output. ''' # Quick sanity check to make sure the bare minimum have been set in the configs err_msg = "`configs.{config}` was `None`. Do not call `deploy.explode` directly." if configs.containmentFolder is None: raise RuntimeError(err_msg.format(config="containmentFolder")) if configs.rootFileName is None: raise RuntimeError(err_msg.format(config="rootFileName")) if configs.doxygenStripFromPath is None: raise RuntimeError(err_msg.format(config="doxygenStripFromPath")) # From here on, we assume that everything else has been checked / configured. try: textRoot = ExhaleRoot() except: utils.fancyError("Unable to create an `ExhaleRoot` object:") try: sys.stdout.write("{0}\n".format(utils.info("Exhale: parsing Doxygen XML."))) start = utils.get_time() textRoot.parse() end = utils.get_time() sys.stdout.write("{0}\n".format( utils.progress("Exhale: finished parsing Doxygen XML in {0}.".format( utils.time_string(start, end) )) )) except: utils.fancyError("Exception caught while parsing:") try: sys.stdout.write("{0}\n".format( utils.info("Exhale: generating reStructuredText documents.") )) start = utils.get_time() textRoot.generateFullAPI() end = utils.get_time() sys.stdout.write("{0}\n".format( utils.progress("Exhale: generated reStructuredText documents in {0}.".format( utils.time_string(start, end) )) )) except: utils.fancyError("Exception caught while generating:") # << verboseBuild # toConsole only prints if verbose mode is enabled textRoot.toConsole() # allow access to the result after-the-fact configs._the_app.exhale_root = textRoot
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 23, 532, 9, 12, 198, 29113, 29113, 14468, 7804, 198, 2, 770, 2393, 318, 636, 286, 21847, 1000, 13, 220, 15069, 357, 66, 8, 2177, 12, 1238, 1828, 11, 7970, 23394, 32829, 13, 220, 220, 220, 220,...
2.396442
4,553
import argparse import multiprocessing import os import random import numpy as np from data_utils import DATAFILE_LIST, DATASET_LIST, prepare_data, RESULTS_DIR from models import SumOfBetaEce random.seed(2020) num_cores = multiprocessing.cpu_count() NUM_BINS = 10 NUM_RUNS = 100 N_list = [100, 200, 500, 1000, 2000, 5000, 10000] OUTPUT_DIR = RESULTS_DIR + "bayesian_reliability_comparison/" if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('dataset', type=str, default='cifar100', help='input dataset') parser.add_argument('-pseudocount', type=int, default=1, help='strength of prior') parser.add_argument('-ground_truth_type', type=str, default='bayesian', help='compute ground truth in a Bayesian or frequentist way, bayesian or frequentist') parser.add_argument('-weight_type', type=str, default='pool', help='weigh each bin with all data or only data seen so far, online or pool') parser.add_argument('--num_runs', type=int, default=NUM_RUNS, help='number of runs') parser.add_argument('--num_bins', type=int, default=NUM_BINS, help='number of bins in reliability diagram') args, _ = parser.parse_known_args() if args.dataset not in DATASET_LIST: raise ValueError("%s is not in DATASET_LIST." % args.dataset) main(args)
[ 11748, 1822, 29572, 198, 11748, 18540, 305, 919, 278, 198, 11748, 28686, 198, 11748, 4738, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 1366, 62, 26791, 1330, 360, 1404, 8579, 41119, 62, 45849, 11, 360, 1404, 1921, 2767, 62,...
2.67451
510
# # Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. You may obtain a copy of the License at: # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and limitations under the License. # # # ***************************************** STATEFUL FUNCTION ********************************************************* """ * `StatefulFunction` * `IntegratorFunctions` * `MemoryFunctions` """ import abc import typecheck as tc import warnings import numbers import numpy as np from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import DefaultsFlexibility, _has_initializers_setter from psyneulink.core.components.functions.function import Function_Base, FunctionError from psyneulink.core.components.functions.distributionfunctions import DistributionFunction from psyneulink.core.globals.keywords import STATEFUL_FUNCTION_TYPE, STATEFUL_FUNCTION, NOISE, RATE from psyneulink.core.globals.parameters import Parameter from psyneulink.core.globals.utilities import parameter_spec, iscompatible, object_has_single_value, convert_to_np_array from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.context import ContextFlags, handle_external_context __all__ = ['StatefulFunction']
[ 2, 198, 2, 23173, 2059, 16625, 428, 2393, 284, 921, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 220, 921, 743, 7330, 2...
3.610503
457
from ee.clickhouse.sql.clickhouse import KAFKA_COLUMNS, STORAGE_POLICY, kafka_engine from ee.clickhouse.sql.table_engines import CollapsingMergeTree, ReplacingMergeTree from ee.kafka_client.topics import KAFKA_PERSON, KAFKA_PERSON_DISTINCT_ID, KAFKA_PERSON_UNIQUE_ID from posthog.settings import CLICKHOUSE_CLUSTER, CLICKHOUSE_DATABASE TRUNCATE_PERSON_TABLE_SQL = f"TRUNCATE TABLE IF EXISTS person ON CLUSTER '{CLICKHOUSE_CLUSTER}'" DROP_PERSON_TABLE_SQL = f"DROP TABLE IF EXISTS person ON CLUSTER '{CLICKHOUSE_CLUSTER}'" TRUNCATE_PERSON_DISTINCT_ID_TABLE_SQL = f"TRUNCATE TABLE IF EXISTS person_distinct_id ON CLUSTER '{CLICKHOUSE_CLUSTER}'" TRUNCATE_PERSON_DISTINCT_ID2_TABLE_SQL = ( f"TRUNCATE TABLE IF EXISTS person_distinct_id2 ON CLUSTER '{CLICKHOUSE_CLUSTER}'" ) PERSONS_TABLE = "person" PERSONS_TABLE_BASE_SQL = """ CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}' ( id UUID, created_at DateTime64, team_id Int64, properties VARCHAR, is_identified Int8, is_deleted Int8 DEFAULT 0 {extra_fields} ) ENGINE = {engine} """ PERSONS_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSONS_TABLE, ver="_timestamp") PERSONS_TABLE_SQL = lambda: ( PERSONS_TABLE_BASE_SQL + """Order By (team_id, id) {storage_policy} """ ).format( table_name=PERSONS_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=PERSONS_TABLE_ENGINE(), extra_fields=KAFKA_COLUMNS, storage_policy=STORAGE_POLICY(), ) KAFKA_PERSONS_TABLE_SQL = lambda: PERSONS_TABLE_BASE_SQL.format( table_name="kafka_" + PERSONS_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=kafka_engine(KAFKA_PERSON), extra_fields="", ) # You must include the database here because of a bug in clickhouse # related to https://github.com/ClickHouse/ClickHouse/issues/10471 PERSONS_TABLE_MV_SQL = """ CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}' TO {database}.{table_name} AS SELECT id, created_at, team_id, properties, is_identified, is_deleted, _timestamp, _offset FROM {database}.kafka_{table_name} """.format( table_name=PERSONS_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE, ) GET_LATEST_PERSON_SQL = """ SELECT * FROM person JOIN ( SELECT id, max(_timestamp) as _timestamp, max(is_deleted) as is_deleted FROM person WHERE team_id = %(team_id)s GROUP BY id ) as person_max ON person.id = person_max.id AND person._timestamp = person_max._timestamp WHERE team_id = %(team_id)s AND person_max.is_deleted = 0 {query} """ GET_LATEST_PERSON_ID_SQL = """ (select id from ( {latest_person_sql} )) """.format( latest_person_sql=GET_LATEST_PERSON_SQL ) # # person_distinct_id table - use this still in queries, but this will eventually get removed. # PERSONS_DISTINCT_ID_TABLE = "person_distinct_id" PERSONS_DISTINCT_ID_TABLE_BASE_SQL = """ CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}' ( distinct_id VARCHAR, person_id UUID, team_id Int64, _sign Int8 DEFAULT 1, is_deleted Int8 ALIAS if(_sign==-1, 1, 0) {extra_fields} ) ENGINE = {engine} """ PERSONS_DISTINCT_ID_TABLE_SQL = lambda: ( PERSONS_DISTINCT_ID_TABLE_BASE_SQL + """Order By (team_id, distinct_id, person_id) {storage_policy} """ ).format( table_name=PERSONS_DISTINCT_ID_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=CollapsingMergeTree(PERSONS_DISTINCT_ID_TABLE, ver="_sign"), extra_fields=KAFKA_COLUMNS, storage_policy=STORAGE_POLICY(), ) # :KLUDGE: We default is_deleted to 0 for backwards compatibility for when we drop `is_deleted` from message schema. # Can't make DEFAULT if(_sign==-1, 1, 0) because Cyclic aliases error. KAFKA_PERSONS_DISTINCT_ID_TABLE_SQL = lambda: """ CREATE TABLE {table_name} ON CLUSTER '{cluster}' ( distinct_id VARCHAR, person_id UUID, team_id Int64, _sign Nullable(Int8), is_deleted Nullable(Int8) ) ENGINE = {engine} """.format( table_name="kafka_" + PERSONS_DISTINCT_ID_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=kafka_engine(KAFKA_PERSON_UNIQUE_ID), ) # You must include the database here because of a bug in clickhouse # related to https://github.com/ClickHouse/ClickHouse/issues/10471 PERSONS_DISTINCT_ID_TABLE_MV_SQL = """ CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}' TO {database}.{table_name} AS SELECT distinct_id, person_id, team_id, coalesce(_sign, if(is_deleted==0, 1, -1)) AS _sign, _timestamp, _offset FROM {database}.kafka_{table_name} """.format( table_name=PERSONS_DISTINCT_ID_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE, ) # # person_distinct_ids2 - new table! # PERSON_DISTINCT_ID2_TABLE = "person_distinct_id2" PERSON_DISTINCT_ID2_TABLE_BASE_SQL = """ CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}' ( team_id Int64, distinct_id VARCHAR, person_id UUID, is_deleted Int8, version Int64 DEFAULT 1 {extra_fields} ) ENGINE = {engine} """ PERSON_DISTINCT_ID2_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSON_DISTINCT_ID2_TABLE, ver="version") PERSON_DISTINCT_ID2_TABLE_SQL = lambda: ( PERSON_DISTINCT_ID2_TABLE_BASE_SQL + """ ORDER BY (team_id, distinct_id) SETTINGS index_granularity = 512 """ ).format( table_name=PERSON_DISTINCT_ID2_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=PERSON_DISTINCT_ID2_TABLE_ENGINE(), extra_fields=KAFKA_COLUMNS + "\n, _partition UInt64", ) KAFKA_PERSON_DISTINCT_ID2_TABLE_SQL = lambda: PERSON_DISTINCT_ID2_TABLE_BASE_SQL.format( table_name="kafka_" + PERSON_DISTINCT_ID2_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=kafka_engine(KAFKA_PERSON_DISTINCT_ID), extra_fields="", ) # You must include the database here because of a bug in clickhouse # related to https://github.com/ClickHouse/ClickHouse/issues/10471 PERSON_DISTINCT_ID2_MV_SQL = """ CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}' TO {database}.{table_name} AS SELECT team_id, distinct_id, person_id, is_deleted, version, _timestamp, _offset, _partition FROM {database}.kafka_{table_name} """.format( table_name=PERSON_DISTINCT_ID2_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE, ) # # Static Cohort # PERSON_STATIC_COHORT_TABLE = "person_static_cohort" PERSON_STATIC_COHORT_BASE_SQL = """ CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}' ( id UUID, person_id UUID, cohort_id Int64, team_id Int64 {extra_fields} ) ENGINE = {engine} """ PERSON_STATIC_COHORT_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSON_STATIC_COHORT_TABLE, ver="_timestamp") PERSON_STATIC_COHORT_TABLE_SQL = lambda: ( PERSON_STATIC_COHORT_BASE_SQL + """Order By (team_id, cohort_id, person_id, id) {storage_policy} """ ).format( table_name=PERSON_STATIC_COHORT_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=PERSON_STATIC_COHORT_TABLE_ENGINE(), storage_policy=STORAGE_POLICY(), extra_fields=KAFKA_COLUMNS, ) TRUNCATE_PERSON_STATIC_COHORT_TABLE_SQL = ( f"TRUNCATE TABLE IF EXISTS {PERSON_STATIC_COHORT_TABLE} ON CLUSTER '{CLICKHOUSE_CLUSTER}'" ) INSERT_PERSON_STATIC_COHORT = ( f"INSERT INTO {PERSON_STATIC_COHORT_TABLE} (id, person_id, cohort_id, team_id, _timestamp) VALUES" ) # # Other queries # GET_TEAM_PERSON_DISTINCT_IDS = """ SELECT distinct_id, argMax(person_id, _timestamp) as person_id FROM ( SELECT distinct_id, person_id, max(_timestamp) as _timestamp FROM person_distinct_id WHERE team_id = %(team_id)s %(extra_where)s GROUP BY person_id, distinct_id, team_id HAVING max(is_deleted) = 0 ) GROUP BY distinct_id """ # Query to query distinct ids using the new table, will be used if 0003_fill_person_distinct_id2 migration is complete GET_TEAM_PERSON_DISTINCT_IDS_NEW_TABLE = """ SELECT distinct_id, argMax(person_id, version) as person_id FROM person_distinct_id2 WHERE team_id = %(team_id)s %(extra_where)s GROUP BY distinct_id HAVING argMax(is_deleted, version) = 0 """ GET_PERSON_IDS_BY_FILTER = """ SELECT DISTINCT p.id FROM ({latest_person_sql}) AS p INNER JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) AS pdi ON p.id = pdi.person_id WHERE team_id = %(team_id)s {distinct_query} {limit} {offset} """.format( latest_person_sql=GET_LATEST_PERSON_SQL, distinct_query="{distinct_query}", limit="{limit}", offset="{offset}", GET_TEAM_PERSON_DISTINCT_IDS="{GET_TEAM_PERSON_DISTINCT_IDS}", ) INSERT_PERSON_SQL = """ INSERT INTO person (id, created_at, team_id, properties, is_identified, _timestamp, _offset, is_deleted) SELECT %(id)s, %(created_at)s, %(team_id)s, %(properties)s, %(is_identified)s, %(_timestamp)s, 0, 0 """ INSERT_PERSON_DISTINCT_ID = """ INSERT INTO person_distinct_id SELECT %(distinct_id)s, %(person_id)s, %(team_id)s, %(_sign)s, now(), 0 VALUES """ INSERT_PERSON_DISTINCT_ID2 = """ INSERT INTO person_distinct_id2 (distinct_id, person_id, team_id, is_deleted, version, _timestamp, _offset, _partition) SELECT %(distinct_id)s, %(person_id)s, %(team_id)s, 0, %(version)s, now(), 0, 0 VALUES """ DELETE_PERSON_BY_ID = """ INSERT INTO person (id, created_at, team_id, properties, is_identified, _timestamp, _offset, is_deleted) SELECT %(id)s, %(created_at)s, %(team_id)s, %(properties)s, %(is_identified)s, %(_timestamp)s, 0, 1 """ DELETE_PERSON_EVENTS_BY_ID = """ ALTER TABLE events DELETE WHERE distinct_id IN ( SELECT distinct_id FROM person_distinct_id WHERE person_id=%(id)s AND team_id = %(team_id)s ) AND team_id = %(team_id)s """ INSERT_COHORT_ALL_PEOPLE_THROUGH_PERSON_ID = """ INSERT INTO {cohort_table} SELECT generateUUIDv4(), actor_id, %(cohort_id)s, %(team_id)s, %(_timestamp)s, 0 FROM ( SELECT actor_id FROM ({query}) ) """ INSERT_COHORT_ALL_PEOPLE_SQL = """ INSERT INTO {cohort_table} SELECT generateUUIDv4(), id, %(cohort_id)s, %(team_id)s, %(_timestamp)s, 0 FROM ( SELECT id FROM ( {latest_person_sql} ) as person INNER JOIN ( SELECT person_id, distinct_id FROM ({GET_TEAM_PERSON_DISTINCT_IDS}) WHERE person_id IN ({content_sql}) ) as pdi ON person.id = pdi.person_id WHERE team_id = %(team_id)s GROUP BY id ) """ GET_DISTINCT_IDS_BY_PROPERTY_SQL = """ SELECT distinct_id FROM ( {GET_TEAM_PERSON_DISTINCT_IDS} ) WHERE person_id IN ( SELECT id FROM ( SELECT id, argMax(properties, person._timestamp) as properties, max(is_deleted) as is_deleted FROM person WHERE team_id = %(team_id)s GROUP BY id HAVING is_deleted = 0 ) WHERE {filters} ) """ GET_DISTINCT_IDS_BY_PERSON_ID_FILTER = """ SELECT distinct_id FROM ({GET_TEAM_PERSON_DISTINCT_IDS}) WHERE {filters} """ GET_PERSON_PROPERTIES_COUNT = """ SELECT tupleElement(keysAndValues, 1) as key, count(*) as count FROM person ARRAY JOIN JSONExtractKeysAndValuesRaw(properties) as keysAndValues WHERE team_id = %(team_id)s GROUP BY tupleElement(keysAndValues, 1) ORDER BY count DESC, key ASC """ GET_ACTORS_FROM_EVENT_QUERY = """ SELECT {id_field} AS actor_id {matching_events_select_statement} FROM ({events_query}) GROUP BY actor_id {limit} {offset} """ COMMENT_DISTINCT_ID_COLUMN_SQL = ( lambda: f"ALTER TABLE person_distinct_id ON CLUSTER '{CLICKHOUSE_CLUSTER}' COMMENT COLUMN distinct_id 'skip_0003_fill_person_distinct_id2'" ) SELECT_PERSON_PROP_VALUES_SQL = """ SELECT value, count(value) FROM ( SELECT {property_field} as value FROM person WHERE team_id = %(team_id)s AND is_deleted = 0 AND {property_field} IS NOT NULL AND {property_field} != '' ORDER BY id DESC LIMIT 100000 ) GROUP BY value ORDER BY count(value) DESC LIMIT 20 """ SELECT_PERSON_PROP_VALUES_SQL_WITH_FILTER = """ SELECT value, count(value) FROM ( SELECT {property_field} as value FROM person WHERE team_id = %(team_id)s AND is_deleted = 0 AND {property_field} ILIKE %(value)s ORDER BY id DESC LIMIT 100000 ) GROUP BY value ORDER BY count(value) DESC LIMIT 20 """
[ 6738, 304, 68, 13, 12976, 4803, 13, 25410, 13, 12976, 4803, 1330, 509, 8579, 25123, 62, 25154, 5883, 8035, 11, 46366, 11879, 62, 45472, 2149, 56, 11, 479, 1878, 4914, 62, 18392, 198, 6738, 304, 68, 13, 12976, 4803, 13, 25410, 13, 11...
2.343155
5,091
from sigvisa.learn.train_coda_models import get_shape_training_data import numpy as np X, y, evids = get_shape_training_data(runid=4, site="AS12", chan="SHZ", band="freq_2.0_3.0", phases=["P",], target="amp_transfer", max_acost=np.float("inf"), min_amp=-2) np.savetxt("X.txt", X) np.savetxt("y.txt", y) np.savetxt("evids.txt", evids)
[ 6738, 43237, 4703, 64, 13, 35720, 13, 27432, 62, 66, 11329, 62, 27530, 1330, 651, 62, 43358, 62, 34409, 62, 7890, 198, 11748, 299, 32152, 355, 45941, 198, 198, 55, 11, 331, 11, 819, 2340, 796, 651, 62, 43358, 62, 34409, 62, 7890, ...
2.375887
141
import math import numpy as np # plt.style.use('seaborn') # plt.rcParams['figure.figsize'] = (12, 8)
[ 11748, 10688, 198, 11748, 299, 32152, 355, 45941, 628, 198, 198, 2, 458, 83, 13, 7635, 13, 1904, 10786, 325, 397, 1211, 11537, 198, 2, 458, 83, 13, 6015, 10044, 4105, 17816, 26875, 13, 5647, 7857, 20520, 796, 357, 1065, 11, 807, 8, ...
2.355556
45
greet() greet_again("Hello Again") greet_again_with_type("One Last Time") greet_again_with_type(1234) # multiple types print(multiple_types(-2)) print(multiple_types(10)) # variable arguments var_arguments(1, 2, 3) a = [1, 2, 3] var_arguments(a) var_arguments(*a) # expanding v b = {"first" : "python", "second" : "python again"} key_arg(b)
[ 628, 198, 198, 70, 2871, 3419, 198, 70, 2871, 62, 17776, 7203, 15496, 6521, 4943, 198, 70, 2871, 62, 17776, 62, 4480, 62, 4906, 7203, 3198, 4586, 3862, 4943, 198, 70, 2871, 62, 17776, 62, 4480, 62, 4906, 7, 1065, 2682, 8, 628, 198...
2.514085
142
from ad9833 import AD9833 # DUMMY classes for testing without board # Code SBI1 = SBI() PIN3 = Pin() wave = AD9833(SBI1, PIN3) wave.set_freq(14500) wave.set_type(2) wave.send() print(wave.shape_type)
[ 6738, 512, 4089, 2091, 1330, 5984, 4089, 2091, 198, 198, 2, 360, 5883, 26708, 6097, 329, 4856, 1231, 3096, 628, 628, 198, 2, 6127, 198, 198, 50, 3483, 16, 796, 311, 3483, 3419, 198, 44032, 18, 796, 13727, 3419, 198, 198, 19204, 796,...
2.311111
90
# Copyright (C) 2018 DataArt # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from six.moves import range
[ 2, 15069, 357, 34, 8, 2864, 6060, 8001, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 9...
4.150602
166
import logging import abstracthandler import os
[ 11748, 18931, 198, 11748, 12531, 30281, 198, 11748, 28686, 628 ]
4.9
10
# Copyright 2019, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A simple implementation of federated evaluation.""" import collections from typing import Callable, Optional import tensorflow as tf from tensorflow_federated.python.core.api import computation_base from tensorflow_federated.python.core.api import computations from tensorflow_federated.python.core.impl.federated_context import intrinsics from tensorflow_federated.python.core.impl.types import computation_types from tensorflow_federated.python.core.templates import measured_process from tensorflow_federated.python.learning import model as model_lib from tensorflow_federated.python.learning import model_utils from tensorflow_federated.python.learning.framework import dataset_reduce from tensorflow_federated.python.learning.framework import optimizer_utils # Convenience aliases. SequenceType = computation_types.SequenceType def build_federated_evaluation( model_fn: Callable[[], model_lib.Model], broadcast_process: Optional[measured_process.MeasuredProcess] = None, use_experimental_simulation_loop: bool = False, ) -> computation_base.Computation: """Builds the TFF computation for federated evaluation of the given model. Args: model_fn: A no-arg function that returns a `tff.learning.Model`. This method must *not* capture TensorFlow tensors or variables and use them. The model must be constructed entirely from scratch on each invocation, returning the same pre-constructed model each call will result in an error. broadcast_process: A `tff.templates.MeasuredProcess` that broadcasts the model weights on the server to the clients. It must support the signature `(input_values@SERVER -> output_values@CLIENTS)` and have empty state. If set to default None, the server model is broadcast to the clients using the default tff.federated_broadcast. use_experimental_simulation_loop: Controls the reduce loop function for input dataset. An experimental reduce loop is used for simulation. Returns: A federated computation (an instance of `tff.Computation`) that accepts model parameters and federated data, and returns the evaluation metrics as aggregated by `tff.learning.Model.federated_output_computation`. """ if broadcast_process is not None: if not isinstance(broadcast_process, measured_process.MeasuredProcess): raise ValueError('`broadcast_process` must be a `MeasuredProcess`, got ' f'{type(broadcast_process)}.') if optimizer_utils.is_stateful_process(broadcast_process): raise ValueError( 'Cannot create a federated evaluation with a stateful ' 'broadcast process, must be stateless, has state: ' f'{broadcast_process.initialize.type_signature.result!r}') # Construct the model first just to obtain the metadata and define all the # types needed to define the computations that follow. # TODO(b/124477628): Ideally replace the need for stamping throwaway models # with some other mechanism. with tf.Graph().as_default(): model = model_fn() model_weights_type = model_utils.weights_type_from_model(model) batch_type = computation_types.to_type(model.input_spec) return server_eval
[ 2, 15069, 13130, 11, 383, 309, 22854, 37535, 35089, 515, 46665, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, ...
3.352423
1,135
from __future__ import print_function import os import shutil import hashlib import requests import click from tempfile import NamedTemporaryFile from hashlib import sha256 from os.path import expanduser, join, exists, basename from .utils import HumanSize from .tar import extract_layer from . import trust from . import container from .colorhelper import print_info, print_error, print_warn, print_success from .colorhelper import success from .image_index import get_url from clint.textui import progress from dateutil.parser import parse as parsedate from datetime import datetime CACHE_PATH = join(expanduser("~"), ".pylibcontainer", "images_cache") def download(image_url): """ Download image (if not found in cache) and return it's filename """ response = requests.head(image_url) file_size = remote_file_size = int(response.headers.get("Content-Length")) remote_last_modified = parsedate(response.headers.get("Last-Modified")).replace( tzinfo=None ) remote_is_valid = response.status_code == 200 and file_size and remote_last_modified # Check if image is on cache cache = Cache() cached_image = cache.get(image_url) if cached_image: if remote_is_valid: cache_fn, cache_hash, last_modified, file_size = cached_image if remote_file_size == file_size and remote_last_modified < last_modified: print_info("Using file from cache", CACHE_PATH) return cache_hash, cache_fn print_info("Downloading new remote file because an update was found") else: print_warn("Unable to check the status for " + image_url) print_warn("Assuming local cache is valid") # Not cached, and no valid remote information was found if not remote_is_valid: print_error( "Unable to get file, http_code=%s, size=%s, last_modified=%s" % (response.status_code, remote_file_size, remote_last_modified) ) exit(2) # Dowload image print_info( "Downloading image... ", "{0} [{1:.2S}]".format(basename(image_url), HumanSize(file_size)), ) remote_sha256 = hashlib.sha256() response = requests.get(image_url, stream=True) with NamedTemporaryFile(delete=False) as tmp_file: for chunk in progress.bar( response.iter_content(chunk_size=1024), expected_size=(file_size / 1024) + 1 ): if chunk: remote_sha256.update(chunk) tmp_file.write(chunk) tmp_file.flush() # Verify image integrity trust_verify = trust.verify(image_url, tmp_file.name, remote_sha256.hexdigest()) if not trust_verify or not trust_verify.valid or not trust_verify.username: print_error("Integrity/authenticity error - GPG signature mismatch!") exit(3) print("{0:>10}: {1}".format("GPG Signer", success(trust_verify.username))) print("{0:>10}: {1}".format("GPG ID", success(trust_verify.pubkey_fingerprint))) print("{0:>10}: {1}".format("Creation", success(trust_verify.creation_date))) return cache.put(tmp_file.name, image_url)
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 28686, 198, 11748, 4423, 346, 198, 11748, 12234, 8019, 198, 11748, 7007, 198, 11748, 3904, 198, 6738, 20218, 7753, 1330, 34441, 12966, 5551, 8979, 198, 6738, 12234, 8019, 1330, 42...
2.602808
1,211
from DocTest.CompareImage import CompareImage import pytest from pathlib import Path import numpy
[ 6738, 14432, 14402, 13, 41488, 5159, 1330, 27814, 5159, 198, 11748, 12972, 9288, 198, 6738, 3108, 8019, 1330, 10644, 198, 11748, 299, 32152, 198 ]
4.083333
24
from .labels_tableview import LabelsTableView
[ 6738, 764, 23912, 1424, 62, 11487, 1177, 1330, 3498, 1424, 10962, 7680, 198 ]
3.538462
13
import functools import numpy as np import math import argparse import ags_solver import go_problems import nlopt import sys from Simple import SimpleTuner import itertools from scipy.spatial import Delaunay from scipy.optimize import differential_evolution from scipy.optimize import basinhopping from sdaopt import sda from stochopy import Evolutionary from pyOpt import Optimization from pyOpt import MIDACO import pyOpt from shgo import shgo from benchmark_tools.core import Solver, solve_class, GrishClass, GKLSClass from benchmark_tools.plot import plot_cmcs from benchmark_tools.stats import save_stats, compute_stats from bayes_opt import BayesianOptimization algos = {'scd': SCDEWrapper, 'ags': AGSWrapper, 'agsd': functools.partial(AGSWrapper, mixedFast=True), 'direct': functools.partial(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT), 'directl': functools.partial(NLOptWrapper, method=nlopt.GN_ORIG_DIRECT_L), 'stogo': functools.partial(NLOptWrapper, method=nlopt.GD_STOGO), 'mlsl': functools.partial(NLOptWrapper, method=nlopt.G_MLSL_LDS), 'crs': functools.partial(NLOptWrapper, method=nlopt.GN_CRS2_LM), 'simple': SimpleWrapper, 'scb': SCBasinhoppingWrapper, 'sda': SDAWrapper, 'stochopy': StochOpyWrapper, 'shgo': SHGOWrapper, 'pyopt': PyOptWrapper} algo2cature = {'scd': 'Scipy DE', 'ags': 'AGS', 'direct': 'DIRECT', 'agsd': 'AGSd', 'directl': 'DIRECTl', 'simple': 'Simple', 'stogo': 'StoGO', 'mlsl': 'MLSL', 'crs':'CRS', 'scb': 'Scipy B-H', 'sda': 'SDA', 'stochopy': 'Stochopy', 'pysot': 'PySOT', 'pyopt': 'PyOpt', 'shgo': 'SHGO'} serg_eps = {2: 0.01, 3: 0.01, 4: math.pow(1e-6, 1./4), 5: math.pow(1e-7, 1./5)} if __name__ == '__main__': parser = argparse.ArgumentParser(description='Sample for AGS solver') parser.add_argument('--max_iters', type=int, default=10000, help='limit of iterations for the method') parser.add_argument('--problems_class', type=str, choices=['grish','gklss','gklsh'], default='grish') parser.add_argument('--algo', type=str, choices=algos.keys(), default='scd') parser.add_argument('--problems_dim', type=int, default=2) parser.add_argument('--verbose', action='store_true', help='Print additional info to console') parser.add_argument('--dist_stop', action='store_true', help='Stop algorithm then the next point is close enough to the optimum') parser.add_argument('--serg_eps', action='store_true') parser.add_argument('--stats_fname', type=str, default='') main(parser.parse_args())
[ 11748, 1257, 310, 10141, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 10688, 198, 11748, 1822, 29572, 198, 11748, 556, 82, 62, 82, 14375, 198, 11748, 467, 62, 1676, 22143, 198, 11748, 299, 75, 8738, 198, 11748, 25064, 198, 6738, 1742...
2.5
1,040
import arcpy import logging import pathlib import subprocess import gdb import cx_sde
[ 11748, 10389, 9078, 198, 11748, 18931, 198, 11748, 3108, 8019, 198, 11748, 850, 14681, 198, 198, 11748, 308, 9945, 198, 11748, 43213, 62, 82, 2934, 628, 628 ]
3.333333
27
n = input('Digite algo: ') print('O tipo primitivo da varivel : ', type(n)) print('O que foi digitado alfa numrico? ', n.isalnum()) print('O que foi digitado alfabtico? ', n.isalpha()) print('O que foi digitado um decimal? ', n.isdecimal()) print('O que foi digitado minsculo? ', n.islower()) print('O que foi digitado numrico? ', n.isnumeric()) print('O que foi digitado pode ser impresso? ', n.isprintable()) print('O que foi digitado apenas espao? ', n.isspace()) print('O que foi digitado est capitalizada? ', n.istitle()) print('O que foi digitado maisculo? ', n.isupper())
[ 77, 796, 5128, 10786, 19511, 578, 435, 2188, 25, 705, 8, 198, 4798, 10786, 46, 8171, 78, 2684, 270, 23593, 12379, 1401, 425, 75, 1058, 46083, 2099, 7, 77, 4008, 198, 4798, 10786, 46, 8358, 11511, 72, 16839, 4533, 220, 435, 13331, 99...
2.675799
219
from numpy import * import numpy as np import matplotlib.pyplot as plt from mlp import mlp x = ones((1, 40)) * linspace(0, 1, 40) t = sin(2 * pi * x) + cos(2 * pi * x) + np.random.randn(40) * 0.2 x = transpose(x) t = transpose(t) n_hidden = 3 eta = 0.25 n_iterations = 101 plt.plot(x, t, '.') plt.show() train = x[0::2, :] test = x[1::4, :] valid = x[3::4, :] train_targets = t[0::2, :] test_targets = t[1::4, :] valid_targets = t[3::4, :] net = mlp(train, train_targets, n_hidden, out_type='linear') net.mlptrain(train, train_targets, eta, n_iterations) best_err = net.earlystopping(train, train_targets, valid, valid_targets, eta)
[ 6738, 299, 32152, 1330, 1635, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 198, 6738, 25962, 79, 1330, 25962, 79, 198, 198, 87, 796, 3392, 19510, 16, 11, 2319, 4008, 1635, 300, 1040...
2.133333
300
# 2020 . all rights reserved. # <llllllllll@kakao.com> # Apache License 2.0 from .small import * from .medium import * from .large import *
[ 2, 220, 12131, 764, 477, 2489, 10395, 13, 198, 2, 1279, 297, 297, 297, 297, 297, 31, 74, 461, 5488, 13, 785, 29, 198, 2, 24843, 13789, 362, 13, 15, 198, 198, 6738, 764, 17470, 1330, 1635, 198, 6738, 764, 24132, 1330, 1635, 198, ...
2.877551
49
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import *
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 33918, 198, 198, 6738, 435, 541, 323, 13, 64, 404, 13, 15042, 13, 9979, 415, 13, 22973, 34184, 1187, 1330, 163...
2.446809
47
# Copyright 2020 The Tekton Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # coding: utf-8 """ Tekton Tekton Pipeline # noqa: E501 The version of the OpenAPI document: v0.17.2 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from tekton_pipeline.configuration import Configuration def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1beta1EmbeddedTask): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1beta1EmbeddedTask): return True return self.to_dict() != other.to_dict()
[ 2, 15069, 12131, 383, 33516, 1122, 46665, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 9...
2.869318
528
import zmq import curses import argparse import configparser import threading import time from curses import wrapper from client import Client from ui import UI if '__main__' == __name__: try: args = parse_args() wrapper(main) except KeyboardInterrupt as e: pass except: raise
[ 11748, 1976, 76, 80, 198, 11748, 43878, 198, 198, 11748, 1822, 29572, 198, 11748, 4566, 48610, 198, 11748, 4704, 278, 198, 11748, 640, 198, 198, 6738, 43878, 1330, 29908, 198, 198, 6738, 5456, 1330, 20985, 198, 6738, 334, 72, 1330, 1245...
2.682927
123
import anonlink from anonlink.candidate_generation import _merge_similarities from entityservice.object_store import connect_to_object_store from entityservice.async_worker import celery, logger from entityservice.settings import Config as config from entityservice.tasks.base_task import TracedTask from entityservice.tasks.permutation import save_and_permute
[ 11748, 281, 261, 8726, 198, 6738, 281, 261, 8726, 13, 46188, 20540, 62, 20158, 1330, 4808, 647, 469, 62, 38610, 871, 198, 198, 6738, 9312, 15271, 13, 15252, 62, 8095, 1330, 2018, 62, 1462, 62, 15252, 62, 8095, 198, 6738, 9312, 15271, ...
3.821053
95
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-08-24 13:41 from __future__ import unicode_literals from django.db import migrations, models
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 37770, 352, 13, 1157, 13, 18, 319, 2177, 12, 2919, 12, 1731, 1511, 25, 3901, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, ...
2.736842
57