content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from collections import defaultdict, deque
from functools import partial, wraps
import logging
import os
import socket
from threading import Event, Lock, Thread
import traceback
from Queue import Queue
from cassandra.connection import (Connection, ResponseWaiter, ConnectionShutdown,
ConnectionBusy, NONBLOCKING)
from cassandra.decoder import RegisterMessage
from cassandra.marshal import int32_unpack
import cassandra.io.libevwrapper as libev
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO # ignore flake8 warning: # NOQA
log = logging.getLogger(__name__)
_loop = libev.Loop()
_loop_notifier = libev.Async(_loop)
_loop_notifier.start()
# prevent _loop_notifier from keeping the loop from returning
_loop.unref()
_loop_started = None
_loop_lock = Lock()
class LibevConnection(Connection):
"""
An implementation of :class:`.Connection` that utilizes libev.
"""
_total_reqd_bytes = 0
_read_watcher = None
_write_watcher = None
_socket = None
@classmethod
| [
6738,
17268,
1330,
4277,
11600,
11,
390,
4188,
198,
6738,
1257,
310,
10141,
1330,
13027,
11,
27521,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
17802,
198,
6738,
4704,
278,
1330,
8558,
11,
13656,
11,
14122,
198,
11748,
12854,
1891,
... | 2.977901 | 362 |
# Generated by Django 3.0.7 on 2020-11-13 06:46
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
22,
319,
12131,
12,
1157,
12,
1485,
9130,
25,
3510,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/env python
import os.path
import argparse
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
plt.style.use('bmh')
plt.rcParams['font.sans-serif'] = 'Noto Sans'
plt.rcParams['mathtext.fontset'] = 'stixsans'
import pandas as pd
import util
from util import IMAGE_HEIGHT_PX, IMAGE_WIDTH_PX
from util import length_name, time_name, speed_name
from util import displacement_component_name, velocity_component_name
from util import convert_length
LENGTH_UNIT = 'mm'
TIME_UNIT = 'min'
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
13,
6978,
198,
11748,
1822,
29572,
198,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
48,
83,
20,
46384,
11537,
198,
11748,
2603,
29487,
8019... | 2.652381 | 210 |
import unittest
import time
from m2m_token.errors import ConfigError
from m2m_token.token import generate
| [
11748,
555,
715,
395,
201,
198,
11748,
640,
201,
198,
201,
198,
6738,
285,
17,
76,
62,
30001,
13,
48277,
1330,
17056,
12331,
201,
198,
6738,
285,
17,
76,
62,
30001,
13,
30001,
1330,
7716,
201,
198,
201,
198
] | 2.923077 | 39 |
import logging
| [
11748,
18931,
628,
628,
628,
628,
628,
628,
628
] | 3.111111 | 9 |
from django.urls import path, include
from .views import AllProductsView, OrderView
from rest_framework.routers import DefaultRouter, SimpleRouter
app_name = 'shop'
router = SimpleRouter()
router.register('products', AllProductsView)
router.register('order', OrderView)
urlpatterns = [
]
urlpatterns += router.urls
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
11,
2291,
198,
6738,
764,
33571,
1330,
1439,
48650,
7680,
11,
8284,
7680,
198,
6738,
1334,
62,
30604,
13,
472,
1010,
1330,
15161,
49,
39605,
11,
17427,
49,
39605,
198,
198,
1324,
62,
3672,... | 3.278351 | 97 |
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.setup.install import create_default_cash_flow_mapper_templates
| [
2,
15069,
357,
66,
8,
2177,
11,
39313,
27768,
290,
25767,
669,
198,
2,
13789,
25,
22961,
3611,
5094,
13789,
410,
18,
13,
4091,
5964,
13,
14116,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
5... | 3.485294 | 68 |
import os
from fabric.api import *
try:
from config.fabric import github_username, github_password
github_username = github_username
github_access_token = github_password
except ImportError:
print("Set up config/fabric.py file")
env.hosts = ['localhost']
env.origin = "https://github.com/MaxASchwarzer/RedditClassifier"
env.branch = "master"
git_repository = "/home/max/RedditClassifier"
| [
11748,
28686,
198,
6738,
9664,
13,
15042,
1330,
1635,
198,
198,
28311,
25,
198,
220,
220,
220,
422,
4566,
13,
36434,
1173,
1330,
33084,
62,
29460,
11,
33084,
62,
28712,
198,
220,
220,
220,
33084,
62,
29460,
796,
33084,
62,
29460,
198,... | 3.037313 | 134 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.x profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import threading
import portpicker
from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler import profiler_client
from tensorflow.python.profiler import profiler_v2 as profiler
from tensorflow.python.profiler.integration_test import mnist_testing_utils
def _model_setup():
"""Set up a MNIST Keras model for testing purposes.
Builds a MNIST Keras model and returns model information.
Returns:
A tuple of (batch_size, steps, train_dataset, mode)
"""
context.set_log_device_placement(True)
batch_size = 64
steps = 2
with collective_strategy.CollectiveAllReduceStrategy().scope():
# TODO(b/142509827): In rare cases this errors out at C++ level with the
# "Connect failed" error message.
train_ds, _ = mnist_testing_utils.mnist_synthetic_dataset(batch_size, steps)
model = mnist_testing_utils.get_mnist_model((28, 28, 1))
return batch_size, steps, train_ds, model
if __name__ == '__main__':
multi_process_runner.test_main()
| [
2,
15069,
12131,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 3.469622 | 609 |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from featnames import ACCEPT, REJECT, NORM, CON
from plot.const import BIN_TICKS, TRICOLOR, COLORS, SLRBO_TICKS
from plot.util import save_fig, get_name
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
6738,
2218,
14933,
1330,
15859,
8905,
51,
11,
4526,
23680,
11,
25273,
44,
11,
7102,
198,
6738,
7... | 2.829268 | 82 |
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
27741,
12982,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
113... | 3.541667 | 48 |
from functools import wraps
from itertools import count
from typing import Callable
from logzero import logger
from chaoslib.types import Journal
counter = None
@keepcount
@initcounter
| [
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
340,
861,
10141,
1330,
954,
198,
6738,
19720,
1330,
4889,
540,
198,
198,
6738,
2604,
22570,
1330,
49706,
198,
198,
6738,
11918,
8019,
13,
19199,
1330,
4913,
198,
198,
24588,
796,
6045,
628... | 3.75 | 52 |
# -*- coding: utf-8 -*-
import numpy as np
from PIL import Image
ACC_PIXEL = 3
RELATE_ERR = 0.05
# image's width
IMAGE_WIDTH = 416
# image's height
IMAGE_HEIGHT = 128
# image's width
IMAGE_ORG_WIDTH = 1248
# image's height
IMAGE_ORG_HEIGHT = 384
DEPTH_DIVIDING = 256.0
if __name__ == "__main__":
total = 0
num = 0
errTotal = 0
for i in xrange(200):
imgPath = './ResultImg/%06d_10.png' % (i)
groundPath = '/home1/Documents/Database/Kitti/testing/disp_occ_0/%06d_10.png' % (i)
# if i % 5 != 0:
# continue
img = Image.open(imgPath)
imgGround = Image.open(groundPath)
acc, mas = Acc(img, imgGround)
total = total + acc
errTotal = mas + errTotal
num = num + 1
print str(i) + ':' + str(acc) + ',' + str(mas)
print 'total :%f,%f' % (total/num, errTotal/num)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
628,
198,
26861,
62,
47,
10426,
3698,
796,
513,
198,
16448,
6158,
62,
1137,
49,
796,
657,
13,
2713,
19... | 2.094203 | 414 |
import os
import sys
import numpy as np
import random
from metrics_ddie import ddie_compute_metrics
from statsmodels.stats import contingency_tables
_, output_dir1, output_dir2 = sys.argv
preds1 = np.load(os.path.join(output_dir1, 'preds.npy'))
preds2 = np.load(os.path.join(output_dir2, 'preds.npy'))
labels = np.load(os.path.join(output_dir1, 'labels.npy'))
preds1 = np.argmax(preds1, axis=1)
preds2 = np.argmax(preds2, axis=1)
result1 = ddie_compute_metrics('ddie', preds1, labels, every_type=True)
result2 = ddie_compute_metrics('ddie', preds2, labels, every_type=True)
print(result1, result2)
apbp, apbn, anbp, anbn = 0, 0, 0, 0
for p1, p2, label in zip(preds1, preds2, labels):
if p1 == label:
if p2 == label:
apbp += 1
else:
apbn += 1
else:
if p2 == label:
anbp += 1
else:
anbn += 1
ar = np.array([[apbp, anbp], [apbn, anbn]])
print(ar)
p = contingency_tables.mcnemar(ar).pvalue
print(p)
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
6738,
20731,
62,
1860,
494,
1330,
288,
11979,
62,
5589,
1133,
62,
4164,
10466,
198,
6738,
9756,
27530,
13,
34242,
1330,
38820,
62,
83,
2977,
198... | 2.115385 | 468 |
# Denma the Owner (1052004) | Henesys Plastic Surgery (100000103)
# Male: 20000 - 20099 (Motivated to Tess)
# Female: 21000 - 21099 (Defiant to Futuroid)
from net.swordie.ms.loaders import StringData
options = []
al = chr.getAvatarData().getAvatarLook()
faceColour = al.getFace() % 1000 - al.getFace() % 100
if al.getGender() == 0:
baseID = 20000
else:
baseID = 21000
for i in range(100):
face = baseID + faceColour + i
if not StringData.getItemStringById(face) is None:
options.append(face)
answer = sm.sendAskAvatar("Choose your new face!", False, False, options)
if answer < len(options):
sm.changeCharacterLook(options[answer])
| [
2,
5601,
2611,
262,
23853,
357,
13348,
15724,
8,
930,
6752,
274,
893,
33731,
39037,
357,
49388,
486,
3070,
8,
198,
2,
12674,
25,
939,
405,
532,
939,
2079,
357,
47733,
30829,
284,
39412,
8,
198,
2,
15396,
25,
2310,
830,
532,
20064,
... | 2.744856 | 243 |
import sys
sys.path.append('./Controller')
from TicTacToe import TicTacToe
from HumanPlayer import HumanPlayer
from MonteCarloTreeSearchPlayer import MonteCarloTreeSearchPlayer
from MonteCarloTreeSearchPlayerV2 import MonteCarloTreeSearchPlayerV2
if __name__ == '__main__':
p1 = MonteCarloTreeSearchPlayer("MCTS1")
p2 = MonteCarloTreeSearchPlayerV2("MCTS2")
hp = HumanPlayer("Nugusha")
A_TicTacToe = TicTacToe(3,3,p1,p2)
A_TicTacToe.run() | [
11748,
25064,
198,
17597,
13,
6978,
13,
33295,
7,
4458,
14,
22130,
11537,
198,
6738,
309,
291,
51,
330,
2514,
68,
1330,
309,
291,
51,
330,
2514,
68,
198,
6738,
5524,
14140,
1330,
5524,
14140,
198,
6738,
22489,
9914,
5439,
27660,
18243... | 2.564246 | 179 |
from setuptools import setup
setup(
name='dictlisttools',
version='1.0.0',
packages=['dictlisttools', 'dictlisttools.tests'],
test_suite='dictlisttools.tests',
url='https://github.com/anthonyblanchflower/dictlisttools',
author='Anthony Blanchflower',
author_email='anthony.blanchflower@btinternet.com',
description='Functions for manipulating a list of dictionaries'
)
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
11600,
4868,
31391,
3256,
198,
220,
220,
220,
2196,
11639,
16,
13,
15,
13,
15,
3256,
198,
220,
220,
220,
10392,
28,
17816,
11600,
4868,
31391,
... | 2.913043 | 138 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-27 08:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
24,
13,
17,
319,
1584,
12,
2999,
12,
1983,
8487,
25,
1731,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
1... | 2.724638 | 69 |
from lark import Lark
import turtle
| [
6738,
300,
668,
1330,
406,
668,
198,
11748,
28699,
198,
197,
197,
197,
198
] | 2.857143 | 14 |
from enum import Enum
from common.direction import Direction
from robot.robot import Robot
from algorithm.node import *
from math import trunc, pi
import dearpygui.dearpygui as dpg | [
6738,
33829,
1330,
2039,
388,
198,
6738,
2219,
13,
37295,
1330,
41837,
198,
6738,
9379,
13,
305,
13645,
1330,
16071,
198,
6738,
11862,
13,
17440,
1330,
1635,
198,
6738,
10688,
1330,
40122,
11,
31028,
198,
198,
11748,
390,
5117,
88,
4831... | 3.693878 | 49 |
from sentry.processors import Processor
| [
6738,
1908,
563,
13,
14681,
669,
1330,
32893,
628
] | 4.555556 | 9 |
from rttapi.factory import LocationList, ServiceList
AVAIL_RESOURCES = ["location", "service"]
def resource(rtype, credenvelope):
"""Resource factory method."""
if rtype == "location":
resource = LocationList(credenvelope)
elif rtype == "service":
resource = ServiceList(credenvelope)
else:
raise UnknownResource('{0} not found. Please select from {1}'.format(rtype,
AVAIL_RESOURCES))
return resource
| [
6738,
374,
926,
15042,
13,
69,
9548,
1330,
13397,
8053,
11,
4809,
8053,
220,
628,
198,
10116,
32,
4146,
62,
19535,
2606,
7397,
1546,
796,
14631,
24886,
1600,
366,
15271,
8973,
628,
198,
198,
4299,
8271,
7,
81,
4906,
11,
2600,
268,
1... | 2.429293 | 198 |
from pyChemometrics import ChemometricsScaler, ChemometricsPLS
import numpy as np
np.random.seed(0)
import pandas as pds
# Use the standard datasets
t_dset = pds.read_csv('./tests/test_data/regression.csv')
xmat = t_dset.iloc[:, 1:4].values
y = t_dset.iloc[:, 0].values
y = y[np.newaxis].T
mc_scaler = ChemometricsScaler(0)
uv_scaler = ChemometricsScaler(1)
par_scaler = ChemometricsScaler(1/2)
xmat_mc = mc_scaler.fit_transform(xmat)
y_mc = mc_scaler.fit_transform(y)
xmat_uv = uv_scaler.fit_transform(xmat)
y_uv = uv_scaler.fit_transform(y)
xmat_par = par_scaler.fit_transform(xmat)
y_par = par_scaler.fit_transform(y)
np.savetxt('./tests/test_data/scaler_xmat_mc.csv', xmat_mc, fmt='%.18e', delimiter=',', newline='\n',
header='', footer='', comments='#')
np.savetxt('./tests/test_data/scaler_xmat_uv.csv', xmat_uv, fmt='%.18e', delimiter=',', newline='\n',
header='', footer='', comments='#')
np.savetxt('./tests/test_data/scaler_xmat_par.csv', xmat_par, fmt='%.18e', delimiter=',', newline='\n',
header='', footer='', comments='#')
np.savetxt('./tests/test_data/scaler_y_mc.csv', y_mc, fmt='%.18e', delimiter=',', newline='\n',
header='', footer='', comments='#')
np.savetxt('./tests/test_data/scaler_y_uv.csv', y_uv, fmt='%.18e', delimiter=',', newline='\n',
header='', footer='', comments='#')
np.savetxt('./tests/test_data/scaler_y_par.csv', y_par, fmt='%.18e', delimiter=',', newline='\n',
header='', footer='', comments='#')
| [
6738,
12972,
41829,
908,
10466,
1330,
12870,
908,
10466,
3351,
36213,
11,
12870,
908,
10466,
6489,
50,
198,
11748,
299,
32152,
355,
45941,
198,
198,
37659,
13,
25120,
13,
28826,
7,
15,
8,
198,
198,
11748,
19798,
292,
355,
279,
9310,
1... | 2.231222 | 679 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
os.chdir('/Users/pablobottero/github/master/python/data_analysis')
bikes2011 = pd.read_csv('rentals_weather_2011.csv')
del rentals_weather_2011['index2']
del rentals_weather_2011['dteday_y']
bikes2011 = bikes2011.rename(columns={'dteday_x':'dteday'})
bikes2012 = pd.read_csv('rentals_weather_2012.csv', sep=';', decimal=',')
bikes11_12 = bikes2011.append(bikes2012, ignore_index=True)
rentals_weather_11_12 = rentals_weather_2011.append(rentals_weather_2012, ignore_index=True)
wbr=rentals_weather_11_12
mytable = wbr.groupby(['weathersit']).size()
print(mytable)
mytable.sum()
mytable2 = (mytable/n)*100
print(mytable2)
mytable3 = round(mytable2,1)
#Barchart1
bar_list = ['Sunny', 'Cloudy', 'Rainy']
plt.bar(bar_list, mytable2)
#Barchart 2
bar_list = ['Sunny', 'Cloudy', 'Rainy']
plt.bar(bar_list, mytable2, edgecolor='black')
plt.ylabel('Percentage')
plt.title('Figure 1. Percentage of weather situations')
plt.text( 1.7 , 50, 'n: 731') | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
2948... | 2.478161 | 435 |
from uvicontainer.config import Config
from uvicontainer.main import BaseServer, TCPServer, UDPServer, main, run
__version__ = "0.1.1rc1"
__all__ = ["main", "run", "Config", "BaseServer", "TCPServer", "UDPServer"]
| [
6738,
334,
25531,
756,
10613,
13,
11250,
1330,
17056,
198,
6738,
334,
25531,
756,
10613,
13,
12417,
1330,
7308,
10697,
11,
17283,
3705,
18497,
11,
43700,
3705,
18497,
11,
1388,
11,
1057,
198,
198,
834,
9641,
834,
796,
366,
15,
13,
16,... | 2.828947 | 76 |
from .dataset import dataset
from ..image import aflw_image
| [
6738,
764,
19608,
292,
316,
1330,
27039,
198,
6738,
11485,
9060,
1330,
257,
2704,
86,
62,
9060,
198
] | 3.333333 | 18 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 21 19:55:46 2013
@author: edouard.duchesnay@cea.fr
@author: benoit.da_mota@inria.fr
@author: jinpeng.li@cea.fr
"""
from sklearn import datasets
from sklearn.svm import LinearSVC as SVM
from sklearn.lda import LDA
from sklearn.feature_selection import SelectKBest
X, y = datasets.make_classification(n_samples=12, n_features=10,
n_informative=2, random_state=1)
# Build sequential Pipeline
# -------------------------
# 2 SelectKBest (Estimator)
# |
# SVM Classifier (Estimator)
from epac import Pipe
pipe = Pipe(SelectKBest(k=2), SVM())
pipe.run(X=X, y=y)
# The downstream data-flow is a keyword arguments (dict) containing X and y.
# It will pass through each processing node, SelectKBest(k=2) and SVM.
# Each node calls the "transform" method, that take a dictionary as input
# and produces a dictionary as output. The output is passed to the next node.
# The return value of the run is simply agregation of the outputs (dict) of
# the leaf nodes
for leaf in pipe.walk_leaves():
print leaf.load_results()
# The result of each branch of the tree is stored in the corresponding leaf.
# An iteration on all the leaves of a tree can return all the results
# of the previous top-down operation on the tree
# To save the results of the top-down operation (run) on the disk, it is
# possible to convert it to CSV format
from epac import export_leaves_csv
export_leaves_csv(pipe, 'my_result_run.csv')
## Parallelization
## ===============
# Multi-classifiers
# -----------------
# Methods Methods (Splitter)
# / \
# SVM(C=1) SVM(C=10) Classifiers (Estimator)
from epac import Methods
multi = Methods(SVM(C=1), SVM(C=10))
multi.run(X=X, y=y)
print multi.reduce()
# Reduce format outputs into "ResultSet" which is a dict-like structure
# which contains the "keys" of the methods that have beeen used.
# You can also export the results of the bottom-up operation (reduce) to CSV
from epac import export_resultset_csv
export_resultset_csv(multi.reduce(), 'my_result_reduce.csv')
# Methods Methods (Splitter)
# / \
# SVM(l1, C=1) SVM(l1, C=10) ..... SVM(l2, C=10) Classifiers (Estimator)
svms = Methods(*[SVM(loss=loss, C=C)
for loss in ("l1", "l2") for C in [1, 10]])
svms.run(X=X, y=y)
print svms.reduce()
# Parallelize sequential Pipeline: Anova(k best selection) + SVM.
# Methods Methods (Splitter)
# / | \
# 1 5 10 SelectKBest (Estimator)
# | | |
# SVM SVM SVM Classifiers (Estimator)
anovas_svm = Methods(*[Pipe(SelectKBest(k=k), SVM()) for k in [1, 5, 10]])
anovas_svm.run(X=X, y=y)
print anovas_svm.reduce()
# Cross-validation
# ----------------
# CV of LDA
# CV (Splitter)
# / | \
# 0 1 2 Folds (Slicer)
# | |
# Methods (Splitter)
# / \
# LDA SVM Classifier (Estimator)
from epac import CV, Methods
cv = CV(Methods(LDA(), SVM()))
cv.run(X=X, y=y)
print cv.reduce()
# Model selection using CV
# ------------------------
# CVBestSearchRefit
# Methods (Splitter)
# / \
# SVM(C=1) SVM(C=10) Classifier (Estimator)
from epac import Pipe, CVBestSearchRefit, Methods
# CV + Grid search of a simple classifier
wf = CVBestSearchRefit(Methods(SVM(C=1), SVM(C=10)))
wf.run(X=X, y=y)
print wf.reduce()
# Feature selection combined with SVM and LDA
# CVBestSearchRefit
# Methods (Splitter)
# / \
# KBest(1) KBest(5) SelectKBest (Estimator)
# |
# Methods (Splitter)
# / \
# LDA() SVM() ... Classifiers (Estimator)
pipelines = Methods(*[Pipe(SelectKBest(k=k), Methods(LDA(), SVM()))
for k in [1, 5]])
print [n for n in pipelines.walk_leaves()]
best_cv = CVBestSearchRefit(pipelines)
best_cv.run(X=X, y=y)
best_cv.reduce()
# Put it in an outer CV
cv = CV(best_cv)
cv.run(X=X, y=y)
cv.reduce()
# Perms + Cross-validation of SVM(linear) and SVM(rbf)
# -------------------------------------
# Perms Perm (Splitter)
# / | \
# 0 1 2 Samples (Slicer)
# |
# CV CV (Splitter)
# / | \
# 0 1 2 Folds (Slicer)
# |
# Methods Methods (Splitter)
# / \
# SVM(linear) SVM(rbf) Classifiers (Estimator)
from sklearn.svm import SVC
from epac import Perms, CV, Methods
perms_cv_svm = Perms(CV(Methods(*[SVC(kernel="linear"), SVC(kernel="rbf")])))
perms_cv_svm.run(X=X, y=y)
perms_cv_svm.reduce()
# Run with soma-workflow for multi-processes
from epac import SomaWorkflowEngine
sfw_engine = SomaWorkflowEngine(tree_root=perms_cv_svm,
num_processes=2,
)
perms_cv_svm = sfw_engine.run(X=X, y=y)
perms_cv_svm.reduce()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
2365,
2310,
678,
25,
2816,
25,
3510,
2211,
198,
198,
31,
9800,
25,
1225,
280,
446,
13,
646,
2052,
77,
323,
31,
344,
64,
13,
8310,
198,
... | 2.180592 | 2,298 |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
import cugraph
import pandas as pd
import networkx as nx
import numpy as np
import dask_cudf
import os
from cugraph.dask.common.mg_utils import (get_client)
#
# Datasets
#
DATASETS_UNDIRECTED = ['../datasets/karate.csv', '../datasets/dolphins.csv']
DATASETS_UNRENUMBERED = ['../datasets/karate-disjoint.csv']
DATASETS = ['../datasets/karate-disjoint.csv',
'../datasets/dolphins.csv',
'../datasets/netscience.csv']
# '../datasets/email-Eu-core.csv']
STRONGDATASETS = ['../datasets/dolphins.csv',
'../datasets/netscience.csv',
'../datasets/email-Eu-core.csv']
DATASETS_KTRUSS = [('../datasets/polbooks.csv',
'../datasets/ref/ktruss/polbooks.csv')]
DATASETS_SMALL = ['../datasets/karate.csv',
'../datasets/dolphins.csv',
'../datasets/polbooks.csv']
def random_edgelist(e=1024, ef=16,
dtypes={"src": np.int32, "dst": np.int32, "val": float},
drop_duplicates=True, seed=None):
""" Create a random edge list
Parameters
----------
e : int
Number of edges
ef : int
Edge factor (average number of edges per vertex)
dtypes : dict
Mapping of column names to types.
Supported type is {"src": int, "dst": int, "val": float}
drop_duplicates
Drop duplicates
seed : int (optional)
Randomstate seed
Examples
--------
>>> from cugraph.tests import utils
>>> # genrates 20 df with 100M edges each and write to disk
>>> for x in range(20):
>>> df = utils.random_edgelist(e=100000000, ef=64,
>>> dtypes={'src':np.int32, 'dst':np.int32},
>>> seed=x)
>>> df.to_csv('df'+str(x), header=False, index=False)
>>> #df.to_parquet('files_parquet/df'+str(x), index=False)
"""
state = np.random.RandomState(seed)
columns = dict((k, make[dt](e // ef, e, state))
for k, dt in dtypes.items())
df = pd.DataFrame(columns)
if drop_duplicates:
df = df.drop_duplicates()
print("Generated "+str(df.shape[0])+" edges")
return cudf.from_pandas(df)
make = {
float: make_float,
np.int32: make_int32,
np.int64: make_int64
}
| [
2,
15069,
357,
66,
8,
13130,
12,
42334,
11,
15127,
23929,
44680,
6234,
13,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 2.24407 | 1,307 |
#! /usr/bin/python3
# 797. All Paths From Source to Target
# Medium
#
# Given a directed, acyclic graph of N nodes. Find all possible paths from node 0 to node N-1, and return them in any order.
#
# The graph is given as follows: the nodes are 0, 1, ..., graph.length - 1. graph[i] is a list of all nodes j for which the edge (i, j) exists.
#
# Example:
# Input: [[1,2], [3], [3], []]
# Output: [[0,1,3],[0,2,3]]
# Explanation: The graph looks like this:
# 0--->1
# | |
# v v
# 2--->3
# There are two paths: 0 -> 1 -> 3 and 0 -> 2 -> 3.
#
# Note:
#
# The number of nodes in the graph will be in the range [2, 15].
# You can print different paths in any order, but you should keep the order of nodes inside one path.
from typing import List
check_solution()
| [
2,
0,
1220,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
767,
5607,
13,
1439,
10644,
82,
3574,
8090,
284,
12744,
198,
2,
13398,
198,
2,
198,
2,
11259,
257,
7924,
11,
936,
88,
565,
291,
4823,
286,
399,
13760,
13,
220,
9938,
477,
... | 2.765125 | 281 |
#!/usr/bin/python
import time
import Axon
import Axon.ThreadedComponent
Timer().run()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
640,
198,
11748,
12176,
261,
198,
11748,
12176,
261,
13,
16818,
276,
21950,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
198,
48801,
22446,
5143,
3419,
628
] | 2.404762 | 42 |
########################################################################
#
# File Name: ParsedExpr.py
#
#
"""
The implementation of all of the expression pared tokens.
WWW: http://4suite.org/XPATH e-mail: support@4suite.org
Copyright (c) 2000-2001 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.org/COPYRIGHT for license and copyright information
"""
import string, UserList, types
from xml.dom import EMPTY_NAMESPACE
from xml.dom.ext import SplitQName
from xml.xpath import CompiletimeException, RuntimeException
from xml.xpath import g_extFunctions
from xml.xpath import ParsedNodeTest
from xml.xpath import CoreFunctions, Conversions
from xml.xpath import Util
from xml.xpath import ParsedStep
from xml.xpath import ParsedAxisSpecifier
from xml.utils import boolean
import Set
#Node Set Expressions
#These must return a node set
#Boolean Expressions
#All will return a boolean value
NumberTypes = [types.IntType, types.FloatType, types.LongType]
#Number Expressions
from xml.xpath import Inf, NaN
| [
29113,
29113,
7804,
198,
2,
198,
2,
9220,
6530,
25,
220,
220,
23042,
276,
3109,
1050,
13,
9078,
198,
2,
198,
2,
198,
37811,
198,
464,
7822,
286,
477,
286,
262,
5408,
279,
1144,
16326,
13,
198,
17947,
54,
25,
2638,
1378,
19,
2385,
... | 3.268519 | 324 |
from abc import ABC, abstractmethod
from typing import Optional
from pythonit_toolkit.emails.templates import EmailTemplate
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
21015,
270,
62,
25981,
15813,
13,
368,
1768,
13,
11498,
17041,
1330,
9570,
30800,
628
] | 4.064516 | 31 |
## @file
# This file is used to define the identification of INF/DEC/DSC files
#
# Copyright (c) 2007, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
## Identification
#
# This class defined basic Identification information structure which is used by INF/DEC/DSC files
#
# @param object: Inherited from object class
#
# @var FileName: To store data for Filename
# @var FileFullPath: To store data for full path of the file
# @var FileRelativePath: To store data for relative path of the file
# @var RunStatus: Status of build system running
#
## GetFileName
#
# Reserved
#
## GetFileName
#
# Reserved
#
## GetFileName
#
# Reserved
#
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
id = Identification()
| [
2235,
2488,
7753,
198,
2,
770,
2393,
318,
973,
284,
8160,
262,
11795,
286,
45594,
14,
41374,
14,
5258,
34,
3696,
198,
2,
198,
2,
15069,
357,
66,
8,
4343,
11,
8180,
10501,
13,
1439,
2489,
10395,
29847,
11473,
29,
198,
2,
30628,
55,... | 2.893082 | 318 |
# Copyright 2019-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions for constructing/calculating the means, variances and covariances of
Gaussian states.
"""
from itertools import product
from scipy.special import factorial
import numpy as np
from .._hafnian import hafnian, reduction
from .._torontonian import threshold_detection_prob
from .conversions import reduced_gaussian, Qmat, Xmat, complex_to_real_displacements
def photon_number_mean(mu, cov, j, hbar=2):
r"""Calculate the mean photon number of mode j of a Gaussian state.
Args:
mu (array): vector of means of the Gaussian state using the ordering
:math:`[q_1, q_2, \dots, q_n, p_1, p_2, \dots, p_n]`
cov (array): the covariance matrix of the Gaussian state
j (int): the j :sup:`th` mode
hbar (float): the ``hbar`` convention used in the commutation
relation :math:`[q, p]=i\hbar`
Returns:
float: the mean photon number in mode :math:`j`.
"""
num_modes = len(mu) // 2
return (
mu[j] ** 2 + mu[j + num_modes] ** 2 + cov[j, j] + cov[j + num_modes, j + num_modes] - hbar
) / (2 * hbar)
def photon_number_mean_vector(mu, cov, hbar=2):
r"""Calculate the mean photon number of each of the modes in a Gaussian state
Args:
mu (array): vector of means of the Gaussian state using the ordering
:math:`[q_1, q_2, \dots, q_n, p_1, p_2, \dots, p_n]`
cov (array): the covariance matrix of the Gaussian state
hbar (float): the ``hbar`` convention used in the commutation
relation :math:`[q, p]=i\hbar`
Returns:
array: the vector of means of the photon number distribution
"""
N = len(mu) // 2
return np.array([photon_number_mean(mu, cov, j, hbar=hbar) for j in range(N)])
def photon_number_covar(mu, cov, j, k, hbar=2):
r""" Calculate the variance/covariance of the photon number distribution
of a Gaussian state.
Implements the covariance matrix of the photon number distribution of a
Gaussian state according to the Last two eq. of Part II. in
`'Multidimensional Hermite polynomials and photon distribution for polymode
mixed light', Dodonov et al. <https://journals.aps.org/pra/abstract/10.1103/PhysRevA.50.813>`_
.. math::
\sigma_{n_j n_j} &= \frac{1}{2}\left(T_j^2 - 2d_j - \frac{1}{2}\right)
+ \left<\mathbf{Q}_j\right>\mathcal{M}_j\left<\mathbf{Q}_j\right>, \\
\sigma_{n_j n_k} &= \frac{1}{2}\mathrm{Tr}\left(\Lambda_j \mathbf{M} \Lambda_k \mathbf{M}\right)
+ \left<\mathbf{Q}\right>\Lambda_j \mathbf{M} \Lambda_k\left<\mathbf{Q}\right>,
where :math:`T_j` and :math:`d_j` are the trace and the determinant of
:math:`2 \times 2` matrix :math:`\mathcal{M}_j` whose elements coincide
with the nonzero elements of matrix :math:`\mathbf{M}_j = \Lambda_j \mathbf{M} \Lambda_k`
while the two-vector :math:`\mathbf{Q}_j` has the components :math:`(q_j, p_j)`.
:math:`2N \times 2N` projector matrix :math:`\Lambda_j` has only two nonzero
elements: :math:`\left(\Lambda_j\right)_{jj} = \left(\Lambda_j\right)_{j+N,j+N} = 1`.
Note that the convention for ``mu`` used here differs from the one used in Dodonov et al.,
They both provide the same results in this particular case.
Also note that the original reference of Dodonov et al. has an incorrect prefactor of 1/2
in the last terms of the last equation above.
Args:
mu (array): vector of means of the Gaussian state using the ordering
:math:`[q_1, q_2, \dots, q_n, p_1, p_2, \dots, p_n]`
cov (array): the covariance matrix of the Gaussian state
j (int): the j :sup:`th` mode
k (int): the k :sup:`th` mode
hbar (float): the ``hbar`` convention used in the commutation
relation :math:`[q, p]=i\hbar`
Returns:
float: the covariance for the photon numbers at modes :math:`j` and :math:`k`.
"""
if j == k:
mu, cov = reduced_gaussian(mu, cov, [j])
term_1 = 0.5 * np.trace(cov) ** 2 - np.linalg.det(cov)
term_2 = mu @ cov @ mu
return ((term_1 + term_2) / hbar ** 2) - 0.25
mu, cov = reduced_gaussian(mu, cov, [j, k])
term_1 = cov[0, 1] ** 2 + cov[0, 3] ** 2 + cov[2, 1] ** 2 + cov[2, 3] ** 2
term_2 = (
cov[0, 1] * mu[0] * mu[1]
+ cov[2, 1] * mu[1] * mu[2]
+ cov[0, 3] * mu[0] * mu[3]
+ cov[2, 3] * mu[2] * mu[3]
)
return (term_1 + 2 * term_2) / (2 * hbar ** 2)
def photon_number_covmat(mu, cov, hbar=2):
r"""Calculate the covariance matrix of the photon number distribution of a
Gaussian state.
Args:
mu (array): vector of means of the Gaussian state using the ordering
:math:`[q_1, q_2, \dots, q_n, p_1, p_2, \dots, p_n]`
cov (array): the covariance matrix of the Gaussian state
hbar (float): the ``hbar`` convention used in the commutation
relation :math:`[q, p]=i\hbar`
Returns:
array: the covariance matrix of the photon number distribution
"""
N = len(mu) // 2
pnd_cov = np.zeros((N, N))
for i in range(N):
for j in range(i + 1):
pnd_cov[i][j] = photon_number_covar(mu, cov, i, j, hbar=hbar)
pnd_cov[j][i] = pnd_cov[i][j]
return pnd_cov
def photon_number_expectation(mu, cov, modes, hbar=2):
r"""Calculates the expectation value of the product of the number operator of the modes in a Gaussian state.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
modes (list): list of modes
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): expectation value of the product of the number operators of the modes.
"""
n, _ = cov.shape
n_modes = n // 2
rpt = np.zeros([n], dtype=int)
for i in modes:
rpt[i] = 1
rpt[i + n_modes] = 1
return normal_ordered_expectation(mu, cov, rpt, hbar=hbar)
def photon_number_squared_expectation(mu, cov, modes, hbar=2):
r"""Calculates the expectation value of the square of the product of the number operator of the modes in
a Gaussian state.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
modes (list): list of modes
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): expectation value of the square of the product of the number operator of the modes.
"""
n_modes = len(modes)
mu_red, cov_red = reduced_gaussian(mu, cov, modes)
result = 0
for item in product([1, 2], repeat=n_modes):
rpt = item + item
term = normal_ordered_expectation(mu_red, cov_red, rpt, hbar=hbar)
result += term
return result
def normal_ordered_expectation(mu, cov, rpt, hbar=2):
r"""Calculates the expectation value of the normal ordered product
:math:`\prod_{i=0}^{N-1} a_i^{\dagger n_i} \prod_{j=0}^{N-1} a_j^{m_j}` with respect to an N-mode Gaussian state,
where :math:`\text{rpt}=(n_0, n_1, \ldots, n_{N-1}, m_0, m_1, \ldots, m_{N-1})`.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
rpt (list): integers specifying the terms to calculate.
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): expectation value of the normal ordered product of operators
"""
return s_ordered_expectation(mu, cov, rpt, hbar, s=1)
def s_ordered_expectation(mu, cov, rpt, hbar=2, s=0):
r"""Calculates the expectation value of the s-ordered product
obtained by taking deirvatives of the characteristic function of a Gaussian states,
Here, :math:`\text{rpt}=(n_0, n_1, \ldots, n_{N-1}, m_0, m_1, \ldots, m_{N-1})`.
indicates how many derivatives are taken with respect to the complex argument and its
conjugate.
The values :math:`s=\{1,0,-1\}` correspond respectively to normal, symmetric and antinormal order.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
rpt (list): integers specifying the terms to calculate.
hbar (float): value of hbar in the uncertainty relation.
s (float): value setting the ordering it must be between -1 and 1.
Returns:
(float): expectation value of the normal ordered product of operators
"""
# The following seven lines are written so that we remove from the calculation the
# modes k that we don't care about. These modes have rpt[k] = rpt[k+M] = 0
if np.allclose(rpt, 0):
return 1.0
M = len(cov) // 2
modes = np.where(np.array(rpt[0:M]) + np.array(rpt[M : 2 * M]) != 0)[0]
mu, cov = reduced_gaussian(mu, cov, list(modes))
ind = list(modes) + list(modes + M)
rpt = list(np.array(rpt)[np.array(ind)])
alpha = complex_to_real_displacements(mu, hbar=hbar)
n = len(cov)
V = (Qmat(cov, hbar=hbar) - 0.5 * (s + 1) * np.identity(n)) @ Xmat(n // 2)
A = reduction(V, rpt)
if np.allclose(mu, 0):
return hafnian(A)
np.fill_diagonal(A, reduction(np.conj(alpha), rpt))
return hafnian(A, loop=True)
def mean_clicks(cov, hbar=2):
r"""Calculates the total mean number of clicks when a zero-mean gaussian state
is measured using threshold detectors.
Args
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering
hbar (float): the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar`
Returns
float: mean number of clicks
"""
n, _ = cov.shape
nmodes = n // 2
Q = Qmat(cov, hbar=hbar)
meanc = 1.0 * nmodes
for i in range(nmodes):
det_val = np.real(Q[i, i] * Q[i + nmodes, i + nmodes] - Q[i + nmodes, i] * Q[i, i + nmodes])
meanc -= 1.0 / np.sqrt(det_val)
return meanc
def variance_clicks(cov, hbar=2):
r"""Calculates the variance of the total number of clicks when a zero-mean gaussian state
is measured using threshold detectors.
Args
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering
hbar (float): the value of :math:`\hbar` in the commutation relation :math:`[\x,\p]=i\hbar`
Returns
float: variance in the total number of clicks
"""
n, _ = cov.shape
means = np.zeros([n])
nmodes = n // 2
Q = Qmat(cov, hbar=hbar)
vac_probs = np.array(
[
np.real(Q[i, i] * Q[i + nmodes, i + nmodes] - Q[i + nmodes, i] * Q[i, i + nmodes])
for i in range(nmodes)
]
)
vac_probs = np.sqrt(vac_probs)
vac_probs = 1 / vac_probs
term1 = np.sum(vac_probs * (1 - vac_probs))
term2 = 0
for i in range(nmodes):
for j in range(i):
_, Qij = reduced_gaussian(means, Q, [i, j])
prob_vac_ij = np.linalg.det(Qij).real
prob_vac_ij = 1.0 / np.sqrt(prob_vac_ij)
term2 += prob_vac_ij - vac_probs[i] * vac_probs[j]
return term1 + 2 * term2
def _coeff_normal_ordered(m, k):
r"""Returns the coefficients giving the expansion of a photon number power in terms of normal ordered power of creation
and annihilation operators. The coefficient is given by :math:`\sum_{\mu=0}^k \frac{(-1)^{k-\mu} \mu^m}{\mu!(k-\mu)!}`.
Args:
m (int): power of the photon number operator, :math:`(a^\dagger a)^m `.
k (int): power of the normal ordered term, :math:`a^{\dagger i} a^i`.
Returns:
(float): expansion coefficient
"""
return sum(
[
(1 / (factorial(mu) * factorial(k - mu))) * ((-1) ** (k - mu) * (mu ** m))
for mu in range(0, k + 1)
]
)
def photon_number_moment(mu, cov, indices, hbar=2):
r"""Calculates the expectation value of product of powers of photon number operators of a Gaussian state.
The powers are specified by a dictionary with modes as keys and powers as values.
The calculation is performed by first writing any power of the photon number as
:math:`(a^\dagger a)^m = \sum_{k=1}^m c_k a^{\dagger k} a^k`
where the coefficients :math:`c_i` are provided by the function `_coeff_normal_ordered`.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
indices (dictionary): specification of the different modes and their power of their photon number
hbar (float): value of hbar in the uncertainty relation.
Returns:
float: the expectation value of the photon number powers.
"""
N = len(cov) // 2
list_indices = [indices[key] for key in indices]
modes = list(indices)
# Find the expansion coefficients of all the different powers
expansion_coeff = [
[_coeff_normal_ordered(indices[key], i) for i in range(1, 1 + indices[key])]
for key in indices
]
values = [list(range(i)) for i in list_indices]
net_sum = 0.0
# Construct the product of each possible term appearing in the normal ordered expansion
for item in product(*values):
rpt = [0] * N
for i, key in enumerate(modes):
rpt[key] = item[i] + 1
rpt = rpt + rpt
prod_coeff = np.prod([expansion_coeff[i][coeff] for i, coeff in enumerate(item)])
net_sum += prod_coeff * s_ordered_expectation(mu, cov, rpt, s=1, hbar=hbar)
return np.real_if_close(net_sum)
def partition(collection):
"""Generate all set partitions of a collection.
Taken from: https://stackoverflow.com/a/30134039
Args:
collection (sequence): set to find partitions of
Yields:
list[list]: set partition of collection
"""
if len(collection) == 1:
yield [collection]
return
first = collection[0]
for smaller in partition(collection[1:]):
for n, subset in enumerate(smaller):
yield smaller[:n] + [[first] + subset] + smaller[n + 1 :]
yield [[first]] + smaller
def _list_to_freq_dict(words):
"""Convert between a list which of "words" and a dictionary
which shows how many times each word appears in word
Args:
words (list): list of words
Returns:
dict : how many times a word appears. key is word, value is multiplicity
"""
return {i: words.count(i) for i in set(words)}
def photon_number_cumulant(mu, cov, modes, hbar=2):
r"""Calculates the photon-number cumulant of the modes in the Gaussian state.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
modes (list or array): list of modes. Note that it can have repetitions.
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): the cumulant
"""
modes = list(modes) # turns modes from array to list if passed in as array
kappa = 0
for pi in partition(modes):
size = len(pi)
term = factorial(size - 1) * (-1) ** (size - 1)
for B in pi:
indices = _list_to_freq_dict(B)
term *= photon_number_moment(mu, cov, indices, hbar=hbar)
kappa += term
return kappa
def click_cumulant(mu, cov, modes, hbar=2):
r"""Calculates the click cumulant of the modes in the Gaussian state.
Args:
mu (array): length-:math:`2N` means vector in xp-ordering.
cov (array): :math:`2N\times 2N` covariance matrix in xp-ordering.
modes (list or array): list of modes.
hbar (float): value of hbar in the uncertainty relation.
Returns:
(float): the cumulant
"""
modes = list(modes) # turns modes from array to list if passed in as array
kappa = 0
for pi in partition(modes):
size = len(pi)
term = factorial(size - 1) * (-1) ** (size - 1)
for B in pi:
B = list(set(B)) # remove repetitions
pattern = np.ones_like(B)
mu_red, cov_red = reduced_gaussian(mu, cov, B)
summand = threshold_detection_prob(mu_red, cov_red, pattern, hbar=hbar)
term *= summand
kappa += term
return kappa
| [
2,
15069,
13130,
12,
42334,
47482,
324,
84,
29082,
21852,
3457,
13,
198,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,... | 2.397544 | 7,086 |
import getopt
import os
import sys
from OCC.StlAPI import StlAPI_Writer
from OCC.STEPControl import STEPControl_Reader
from OCC.IFSelect import IFSelect_RetDone, IFSelect_ItemsByEntity
if __name__ == '__main__':
main(sys.argv[1:])
| [
11748,
651,
8738,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
6738,
440,
4093,
13,
1273,
75,
17614,
1330,
520,
75,
17614,
62,
34379,
198,
6738,
440,
4093,
13,
42135,
15988,
1330,
49154,
15988,
62,
33634,
198,
6738,
440,
4093,
13,
... | 2.91358 | 81 |
from markdown import markdown
from BeautifulSoup import BeautifulSoup
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
@register.filter
@stringfilter
| [
6738,
1317,
2902,
1330,
1317,
2902,
198,
6738,
23762,
50,
10486,
1330,
23762,
50,
10486,
198,
198,
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
28243,
13,
12286,
10379,
1010,
1330,
4731,
24455,
198,
6738,
42625,
14208,
13... | 3.636364 | 99 |
import pytest
import time
from test_helpers import create_socket, send_msg, assert_msg_ok, assert_msg_fail
from test_setup import CCXML_PATH, CONNECTION, DEVICETYPE, HOME, SESSION
#@pytest.mark.xfail(reason="No validating file path on JS side")
| [
11748,
12972,
9288,
198,
11748,
640,
198,
6738,
1332,
62,
16794,
364,
1330,
2251,
62,
44971,
11,
3758,
62,
19662,
11,
6818,
62,
19662,
62,
482,
11,
6818,
62,
19662,
62,
32165,
198,
6738,
1332,
62,
40406,
1330,
12624,
55,
5805,
62,
3... | 2.963855 | 83 |
from bs4 import BeautifulSoup
import requests
import csv
import sys
from urllib.error import HTTPError
sys.path.append("..")
import mytemp
url='https://s.taobao.com/search?q=%E8%8C%B6%E5%8F%B6&imgfile=&commend=all&ssid=s5-e&search_type=item&sourceId=tb.index&spm=a21bo.2017.201856-taobao-item.2&ie=utf8&initiative_id=tbindexz_20170306&cps=yes&ppath=122216450%3A30419'
| [
201,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
201,
198,
11748,
7007,
201,
198,
11748,
269,
21370,
201,
198,
11748,
25064,
201,
198,
6738,
2956,
297,
571,
13,
18224,
1330,
14626,
12331,
201,
198,
17597,
13,
6978,
13,
33295,
7203... | 2.135593 | 177 |
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# port monitor
from __future__ import absolute_import
_PORT_RANGE_START = 50000
_PORT_RANGE_END = 64 * 1024
# version
__id__ = "$Id$"
# End of file
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
198,
2,
220,
220,
27156,
27156,
27156,
27156,
27156,
198,
2,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.606635 | 211 |
from unittest import TestCase
from command.model.configuration._arg_group import *
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
3141,
13,
19849,
13,
11250,
3924,
13557,
853,
62,
8094,
1330,
1635,
628
] | 3.818182 | 22 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011-2013 by nils_2 <weechatter@arcor.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script deletes weechatlog-files by age or size
# YOU ARE USING THIS SCRIPT AT YOUR OWN RISK!
#
# Usage:
#
# It is recommended to use this script with cron.py script:
# following command will check each 1th day of a month for logfiles older than 100 days and delete them
# /cron add * * 1 * * * core.weechat command /purgelogs age 100 delete
#
# Options:
# do not delete #weechat, #weechat-fr and nils_2 (query) logfiles
# /set plugins.var.python.purgelogs.blacklist "#weechat,#weechat-fr,nils_2"
#
# History:
# 2013-01-25: nils_2, (freenode.#weechat)
# 0.4 : make script compatible with Python 3.x
# 2011-09-18: nils_2, (freenode.#weechat)
# 0.3.1 : code optimization
# 2011-09-17: nils_2, (freenode.#weechat)
# 0.3 : added: search for log-files smaller than age/size (new functions: age_ls and size_ls)
# 2011-03-11: nils_2, (freenode.#weechat)
# 0.2 : added blacklist option
# 2011-02-18: nils_2, (freenode.#weechat)
# 0.1 : initial release
#
# Development is currently hosted at
# https://github.com/weechatter/weechat-scripts
#
# TODO: waiting for "/logger disable all" and "/logger enable all"
try:
import weechat as w
import os, os.path, stat, time
from datetime import date, timedelta
except Exception:
print("This script must be run under WeeChat.")
print("Get WeeChat now at: http://www.weechat.org/")
quit()
SCRIPT_NAME = "purgelogs"
SCRIPT_AUTHOR = "nils_2 <weechatter@arcor.de>"
SCRIPT_VERSION = "0.4"
SCRIPT_LICENSE = "GPL"
SCRIPT_DESC = "delete weechatlog-files by age or size (YOU ARE USING THIS SCRIPT AT YOUR OWN RISK!)"
purgelogs_commands = {
"delete" : "argument for security reasons",
"size" : "greater than <Kib> for log-files to purge",
"size_ls": "less than <Kib> for log-files to purge",
"age" : "older than <days> for log-files to purge (maximum value: 9999)",
"age_ls" : "younger than <days> for log-files to purge",
}
purgelogs_options = {
"blacklist": "" # comma separated list of buffers (short name)
}
blacklist = []
# ================================[ weechat functions ]===============================
def get_path():
""" get logger path """
return w.config_string(w.config_get("logger.file.path")).replace("%h", w.info_get("weechat_dir", ""))
def is_number(s):
""" check if value is a number """
try:
float(s)
return True
except ValueError:
return False
# ================================[ os functions ]===============================
# w.prnt("","delete: %s" % (fname))
# ================================[ main ]===============================
if __name__ == "__main__":
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "", ""):
# str_commands = ""
# for cmd in (purgelogs_commands.keys()):
# str_commands += " " + cmd + ": " + purgelogs_commands[cmd] + "\n";
w.hook_command("purgelogs",
"delete weechatlog-files by date or size",
"[age|age_ls] <days> || [size|size_ls] <in KiB> || [delete]",
" size : greater than <Kib> for log-files to purge\n"
" size_ls : less than <Kib> for log-files to purge\n"
" age : older than <days> for log-files to purge (maximum value: 9999)\n"
" age_ls : younger than <days> for log-files to purge\n"
" delete : argument for security reasons\n"
"\n"
# str_commands + "\n"
"Examples:\n"
" show log-files older than 100 days\n"
" /" + SCRIPT_NAME + " age 100\n"
" purge log-files older than 100 days\n"
" /" + SCRIPT_NAME + " age 100 delete\n"
" show log-files younger than 10 days\n"
" /" + SCRIPT_NAME + " age_ls 10\n"
" purge log-files younger than 10 days\n"
" /" + SCRIPT_NAME + " age_ls 10 delete\n"
" show log-files greater than 100 KiB\n"
" /" + SCRIPT_NAME + " size 100\n"
" purge log-files greater than 100 KiB\n"
" /" + SCRIPT_NAME + " size 100 delete\n",
"age|age_ls|size|size_ls %-",
"purgelogs_cb", "")
w.hook_config('plugins.var.python.%s.blacklist' %SCRIPT_NAME, 'update_blacklist', '')
for option, default_value in purgelogs_options.items():
# for option, default_value in purgelogs_options.iteritems():
if w.config_get_plugin(option) == "":
w.config_set_plugin(option, default_value)
else:
blacklist = w.config_get_plugin('blacklist').split(',')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
2813,
12,
6390,
416,
299,
4487,
62,
17,
1279,
732,
3055,
1436,
31,
5605,
273,
13,
2934,
29,
198,
2,
198,
2,
770,
1430,
318,
1479,
3... | 2.221371 | 2,611 |
"""
Ideally, Sugaroid should also support reading input from
a computer audio input device, such as a desktop microphone or
a laptop microphone. This is not yet implemented.
.. warning ::
This feature is not yet implemented and looking for
community contribution
"""
| [
37811,
198,
41452,
453,
11,
20874,
1868,
815,
635,
1104,
3555,
5128,
422,
220,
198,
64,
3644,
6597,
5128,
3335,
11,
884,
355,
257,
11364,
21822,
393,
198,
64,
13224,
21822,
13,
770,
318,
407,
1865,
9177,
13,
198,
198,
492,
6509,
790... | 4.212121 | 66 |
import os
import smooth1d
#(0) Simulate one dataset, multuple parameters:
nIterations = 3
JJ = [5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 35, 45, 50] #sample sizes
noise_amps = [0.01, 0.20]
sim = smooth1d.sim.Simulator()
sim.set_alpha(0.05)
sim.set_filter('None')
# sim.set_dataset_name('Vaughan1982')
# sim.set_dataset_name('Challis1999a')
# sim.set_dataset_name('Challis1999b')
# sim.set_dataset_name('Challis1999c')
# sim.set_dataset_name('Challis1999d')
sim.set_dataset_name('Challis1999e')
sim.set_metadata_labels(['sample_size','noise_amp'], types=[int,float])
sim.set_results_directory( os.path.dirname(__file__) )
sim.set_seed()
### simulate:
for J in JJ:
for amp in noise_amps:
print('Sample size = %d, Noise amplitude = %.2f' %(J,amp))
sim.set_sample_size( J )
sim.set_noise_amp( amp )
sim.simulate(nIterations, metadata=[J, amp])
# sim.save()
| [
198,
11748,
28686,
198,
11748,
7209,
16,
67,
628,
628,
198,
2,
7,
15,
8,
3184,
5039,
530,
27039,
11,
1963,
29291,
10007,
25,
198,
77,
29993,
602,
796,
513,
198,
32178,
220,
220,
220,
220,
220,
220,
220,
220,
220,
796,
685,
20,
1... | 2.305699 | 386 |
from __future__ import unicode_literals, print_function
import itertools
import json
import os
from conceptnet5.edges import make_edge
from conceptnet5.formats.msgpack_stream import MsgpackStreamWriter
from conceptnet5.languages import ALL_LANGUAGES
from conceptnet5.readers.wiktionary import valid_language
from conceptnet5.uri import conjunction_uri,get_uri_language, is_absolute_url, Licenses, \
split_uri, uri_prefix
N = 100
CURRENT_DIR = os.getcwd()
def weight_scale(weight):
"""
This scale starts out linear, then switches to a square-root scale at x=2.
>>> weight_scale(-1)
-1.0
>>> weight_scale(0)
0.0
>>> weight_scale(1)
1.0
>>> weight_scale(2)
2.0
>>> weight_scale(5)
4.0
>>> weight_scale(10)
6.0
"""
return 2 * max(weight - 1, 1) ** .5 + min(weight, 2) - 2
def combine_assertions(input_filename, output_filename):
"""
Take in a tab-separated, sorted "CSV" files, indicated by
`input_filename`, that should be grouped together into assertions.
Output a msgpack stream of assertions the file indicated by
`output_filename`.
The input file should be made from multiple sources of assertions by
concatenating and sorting them.
The combined assertions will all have the dataset of the first edge that
produces them, and the license of the strongest license being combined.
This process requires its input to be a sorted CSV so that all edges for
the same assertion will appear consecutively.
"""
def group_func(line):
"Group lines by their URI (their first column)."
return line.split('\t', 1)[0]
out = MsgpackStreamWriter(output_filename)
out_bad = MsgpackStreamWriter(output_filename + '.reject')
with open(input_filename, encoding='utf-8') as stream:
for key, line_group in itertools.groupby(stream, group_func):
assertion = make_assertion(line_group)
if assertion is None:
continue
if assertion['weight'] > 0:
destination = out
else:
destination = out_bad
destination.write(assertion)
out.close()
out_bad.close()
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
11,
3601,
62,
8818,
198,
198,
11748,
340,
861,
10141,
198,
11748,
33918,
198,
198,
11748,
28686,
198,
198,
6738,
3721,
3262,
20,
13,
276,
3212,
1330,
787,
62,
14907,
198,
673... | 2.727497 | 811 |
n = int(input('Digite N: '))
for l in range(1, n+1):
for c in range(1, n+1):
if c == l:
print('*', end='')
else:
print('.', end='')
if c % n == 0:
print('\n', end='') | [
77,
796,
493,
7,
15414,
10786,
19511,
578,
399,
25,
705,
4008,
198,
1640,
300,
287,
2837,
7,
16,
11,
299,
10,
16,
2599,
198,
220,
220,
220,
329,
269,
287,
2837,
7,
16,
11,
299,
10,
16,
2599,
198,
220,
220,
220,
220,
220,
220,
... | 1.666667 | 138 |
import pytest
import datetime
from shapely.wkt import loads
from unittest.mock import patch
from pyramid.testing import DummyRequest
from pyramid_oereb.core import b64
from pyramid_oereb.core.adapter import FileAdapter
from pyramid_oereb.core.records.image import ImageRecord
from pyramid_oereb.core.records.theme import ThemeRecord
from pyramid_oereb.core.records.view_service import LegendEntryRecord
from pyramid_oereb.core.records.real_estate import RealEstateRecord
from pyramid_oereb.core.hook_methods import get_symbol, get_symbol_ref, \
get_surveying_data_update_date
from pyramid_oereb.contrib.data_sources.standard.sources.plr import StandardThemeConfigParser
import pyramid_oereb.contrib.data_sources.standard.hook_methods
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
@pytest.fixture
@patch.object(pyramid_oereb.core.hook_methods, 'route_prefix', 'oereb')
| [
11748,
12972,
9288,
198,
11748,
4818,
8079,
198,
198,
6738,
5485,
306,
13,
86,
21841,
1330,
15989,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
6738,
27944,
13,
33407,
1330,
360,
13513,
18453,
198,
198,
6738,
27944,
62... | 3.04902 | 306 |
from gurobipy import *
import numpy as np
import sys
import time
# https://dspace.mit.edu/handle/1721.1/29599
# compute the optimal action, given the (possibly) lambda-adjusted q-values
| [
6738,
915,
22609,
541,
88,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
220,
198,
11748,
25064,
198,
11748,
640,
628,
198,
2,
3740,
1378,
67,
13200,
13,
2781,
13,
15532,
14,
28144,
14,
1558,
2481,
13,
16,
14,
25710,
2079,
628,
62... | 3.029851 | 67 |
# Autogenerated from KST: please remove this line if doing any edits by hand!
import unittest
from default_endian_mod import _schema
| [
2,
5231,
519,
877,
515,
422,
509,
2257,
25,
3387,
4781,
428,
1627,
611,
1804,
597,
31671,
416,
1021,
0,
198,
198,
11748,
555,
715,
395,
198,
198,
6738,
4277,
62,
437,
666,
62,
4666,
1330,
4808,
15952,
2611,
198
] | 3.375 | 40 |
"""Rss scraping with Beautifulsoup."""
import subprocess
import isort # noqa: F401
import requests
import snoop
from bs4 import BeautifulSoup
from loguru import logger
fmt = "{time} - {name} - {level} - {message}"
logger.add("../logs/info.log", level="INFO", format=fmt, backtrace=True, diagnose=True) # noqa: E501
logger.add("../logs/error.log", level="ERROR", format=fmt, backtrace=True, diagnose=True) # noqa: E501
subprocess.run(["isort", __file__])
@logger.catch
# @snoop
def hn_rss():
""""""
url = "https://www.reddit.com/r/commandline.rss"
r = requests.get(url)
print(r)
soup = BeautifulSoup(r.content, features="xml")
# print(soup.prettify())
if __name__ == "__main__":
hn_rss()
| [
37811,
49,
824,
46743,
351,
23762,
82,
10486,
526,
15931,
198,
11748,
850,
14681,
198,
198,
11748,
318,
419,
220,
1303,
645,
20402,
25,
376,
21844,
198,
11748,
7007,
198,
11748,
3013,
11224,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10... | 2.556338 | 284 |
from .sort import Sort
from .mpt import MPT | [
6738,
764,
30619,
1330,
33947,
198,
6738,
764,
76,
457,
1330,
4904,
51
] | 3.307692 | 13 |
"""Entry point for the pretraining task."""
import collections
import logging
import math
import os
import warnings
from pathlib import Path
from typing import Dict, Union, Any, Optional, List
import numpy as np
import torch
import torch.nn as nn
from packaging import version
from torch.cuda.amp import autocast
from torch.optim import Optimizer
# noinspection PyProtectedMember
from torch.utils.data import DataLoader, DistributedSampler
from transformers import EvaluationStrategy, PreTrainedModel, set_seed, TrainerState, WEIGHTS_NAME, is_wandb_available
if is_wandb_available():
from transformers.integrations import WandbCallback
from src.visualization.wandb_callbacks import MyWandbCallback
from transformers.trainer import Trainer, is_torch_tpu_available
from transformers.trainer_pt_utils import reissue_pt_warnings
from transformers.trainer_utils import PredictionOutput, TrainOutput
from src.data.make_dataset_document_electra import make_datasets_document_electra
from src.data.utils import get_dataset_info
from src.features.features_document_electra import DataCollatorForDocumentElectra
from src.models.metrics import ComputeMetricsDocumentElectraForPretraining
from src.models.modeling_document_electra import DocumentElectraConfig, DocumentElectraPretrainingModel, \
DocumentElectraPretrainingModelOutput
from src.models.utils import get_tokenizer, MyTrainingArguments, my_set_seed
from src.visualization.tensorboard_utils import get_tensorboard_experiment_id, MySummaryWriter, to_sanitized_dict
if is_torch_tpu_available():
# noinspection PyUnresolvedReferences
import torch_xla.core.xla_model as xm
# noinspection PyUnresolvedReferences
import torch_xla.debug.metrics as met
# noinspection PyUnresolvedReferences
import torch_xla.distributed.parallel_loader as pl
import sys
IN_COLAB = 'google.colab' in sys.modules
logger = logging.getLogger(__name__)
_default_settings = {}
if torch.cuda.is_available() and (torch.cuda.get_device_properties(0).total_memory < 2 ** 30 * 6):
# Less 6 GB GPU memory
_default_settings["max_length"] = 128 * 16 # 2048
_default_settings["max_position_embeddings"] = 4096
_default_settings["per_device_train_batch_size"] = 1
_default_settings["per_device_eval_batch_size"] = 1
_default_settings["num_hidden_layers"] = 12
_default_settings["effective_batch_size"] = 4
_default_settings["lr"] = 5e-4
else:
_default_settings["max_length"] = 128 * 16 # 2048
_default_settings["max_position_embeddings"] = 4096
_default_settings["per_device_train_batch_size"] = 1
_default_settings["per_device_eval_batch_size"] = 1
_default_settings["num_hidden_layers"] = 12
_default_settings["effective_batch_size"] = 4
_default_settings["lr"] = 5e-4
# Hack to prevent tokenizer to fork
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# noinspection PyTypeChecker
def train_model_pretraining(
dataset_name: Union[str, List[str]] = None,
dataset_sampling: Union[float, List[float]] = None,
dataset_dir: str = None,
cache_dir: str = None,
output_dir: str = "checkpoints",
tokenizer_path: str = r"models/ByteLevelBPETokenizer-vocab_size=30522-min_frequency=2",
gradient_accumulation_steps: int = (_default_settings["effective_batch_size"] /
_default_settings["per_device_train_batch_size"]),
# 1 for ElectraSmall but aggregation across docs decrease variance
per_device_train_batch_size: int = _default_settings["per_device_train_batch_size"],
per_device_eval_batch_size: int = _default_settings["per_device_eval_batch_size"],
lr: int = _default_settings["lr"], # 5e-4 for ElectraSmall * gradient_accumulation_steps
max_grad_norm: float = 1, # 1 for Electra
# https://github.com/google-research/electra/blob/81f7e5fc98b0ad8bfd20b641aa8bc9e6ac00c8eb/model/optimization.py
embedding_size: int = 128, # Output and input embedding size, 128 for ElectraSmall
hidden_size: int = 256, # 256 for ElectraSmall
chunk_length: int = 128,
num_hidden_layers: int = _default_settings["num_hidden_layers"], # 12 for Electra Small
num_attention_heads: int = 4,
layer_depth_offset: int = -1,
max_sentence_length: int = 128,
max_sentences: int = 128,
max_length: int = _default_settings["max_length"],
max_position_embeddings: int = _default_settings["max_position_embeddings"],
intermediate_size: int = 1024, # 1024 for ElectraSmall
hidden_act: str = "gelu",
generator_size: float = 0.25, # Generator size multiplier
generator_layer_size: float = 1.0, # Generator layer size multiplier
mlm_probability: float = 0.15,
mlm_replacement_probability: float = 0.85,
temperature: float = 1., # 1 for ElectraSmall
discriminant_loss_factor: float = 50, # Factor for the discriminant loss, Default 50 in Electra (lambda)
hidden_dropout_prob: float = 0.1, # 0.1 for ElectraSmall
attention_probs_dropout_prob: float = 0.1, # 0.1 for ElectraSmall
logging_steps: int = _default_settings["effective_batch_size"] * 3, # Default logging every 3 grad updates
eval_steps: int = _default_settings["effective_batch_size"] * 3 * 10,
save_steps: int = _default_settings["effective_batch_size"] * 800, # Every 800 steps (about every hour)
# Default evaluation every 10 logging steps (30 grad updates)
seed: int = 42,
training_set_random_seed: int = 42,
valid_test_split_random_seed: int = 42,
training_set_size: int = -1,
num_proc: int = 0,
gradient_checkpointing: bool = False, # Decrease mem by 5/10% but increase compute cost
pretrain_path: str = None,
warmup_steps: int = int(10000), # 10000 for ElectraSmall
weight_decay: float = 0.01, # 0.01 for ElectraSmall
tensorboard_tracking_folder: str = "tensorboard",
metric_to_train_generator: str = "eval_is_fake_token_AUC",
threshold_to_train_generator: float = 0.0,
relative_position_embeddings: bool = False,
sequence_embeddings: bool = False,
experiment_name: Optional[str] = None,
) -> Optional[Dict[str, float]]:
"""
Main function to launch the pretraining task
:param experiment_name: Name of experiment
:param dataset_name: name or list of names of datasets from HuggingFace NLP library.
Default [Wikipedia-20200501.en', 'bookcorpus']
:param dataset_sampling: ratio for downsampling
Default [1.0, 0.3]
:param dataset_dir: Directory path for the cache for the HuggingFace datasets library
:param cache_dir: Directory to store the cache for the processed datasets.
:param output_dir: Checkpoints folder
:param tokenizer_path: Path to tokenizer
:param gradient_accumulation_steps: Default 32.
:param per_device_train_batch_size: Batch size per GPU. Default 1.
:param per_device_eval_batch_size: Batch size per GPU. Default 2.
:param lr: Learning rate. Default 5e-4 like in Electra paper * 32
:param max_grad_norm: Gradient norm clipping. Default 1 like Electra
:param embedding_size: Output embedding size for the Electra encoder
:param hidden_size: Hidden embedding size within encoders. Default 256 like ElectraSmall
:param num_hidden_layers: Number of layer. Default 12 to be equivalent to ElectraSmall (12)
:param num_attention_heads: Number of attention heads. Default 4 like ElectraSmall
:param chunk_length: Default 128.
:param layer_depth_offset: Define which layer to use as sentence embedding or document embedding. Default -1.
:param max_sentence_length: Longer sentences will be truncated.
:param max_sentences: Longer document will be truncated
:param max_length: maximum size for a document. It could be less than max_sequence_length * max_sentences.
If None, then the max_length will be max_sequence_length * max_sentences.
Default None
:param max_position_embeddings: Maximum allowed sequence length for the model. Default 4096.
:param intermediate_size: Default 1024 like ElectraSmall
:param hidden_act: Activation for encoder. Default "gelu" like Electra
:param hidden_dropout_prob: Dropout probability for FCN layers for encoder.
:param logging_steps: Logging steps (show train loss) in number of samples (and not in gradient updates)
:param eval_steps: Evaluate steps (evaluate and show val loss) in number of samples (and not in gradient updates)
If a float is provided, then the evaluation step is every eval_steps * steps per epoch.
:param save_steps: Number of steps to perform a checkpoint. Default every 800 steps (equivalent every hour)
:param weight_decay: Default 0.01 for ElectraSmall
:param warmup_steps: Default 10000 samples like ElectraSmall
:param num_proc: Number of processor for data preprocessing steps
:param discriminant_loss_factor: factor for the discriminant loss. Default 50 as Electra
:param generator_size: factor to decrease generator complexity compared to the discriminant. Default 25%
:param generator_layer_size: factor to decrease generator complexity
compared to the discriminant (number of layers). Default 1.0
compared to the discriminant at token level (only for num_layers). Default 25%
:param mlm_replacement_probability: Probability of replacement for selected tokens. Default: 0.85
:param mlm_probability: Probability to corrupt tokens. Default: 0.15
:param temperature: Temperature for the MLM sampling. Default 1 like Electra
:param training_set_size: Default -1 to use all possible training set.
:param seed: Seed used in everything except for dataset split and shuffling
:param training_set_random_seed: Seed used only for shuffle the training set
:param valid_test_split_random_seed: Seed used only for the split between test and validation sets.
Required to ensure the validation set remains the same if seed is used.
:param attention_probs_dropout_prob
:param tensorboard_tracking_folder:
:param pretrain_path: Path to the pretrained checkpoints. Model weights and optimizer states will be loaded.
This will allow to continue training.
:param gradient_checkpointing: Default False
:param metric_to_train_generator: Metric to monitor for deciding to train the generator or not
:param threshold_to_train_generator: Threshold to switch on the generator training (training if above). Default: 0.0
:param relative_position_embeddings: Use relative position embeddings. Default True
:param sequence_embeddings: Use sequence embeddings (number of sentences). Default True
:return:
"""
my_set_seed(seed=seed)
if dataset_name is None:
dataset_name = ["openwebtext"]
if dataset_sampling is None:
dataset_sampling = [1.0]
if experiment_name is None:
experiment_name = "pretraining"
tokenizer = get_tokenizer(tokenizer_path=tokenizer_path)
if isinstance(dataset_name, List):
dataset_name = [name.split("-") for name in dataset_name]
dataset_name = [(name[0], name[1] if len(name) > 1 else None) for name in dataset_name]
dataset_infos = [get_dataset_info(dataset_name=name, dataset_subset=subset) for name, subset in dataset_name]
else:
dataset_name = dataset_name.split("-")
dataset_name, dataset_subset = (dataset_name[0], dataset_name[1] if len(dataset_name) > 1 else None)
dataset_infos = [get_dataset_info(dataset_name=dataset_name, dataset_subset=dataset_subset)]
config = DocumentElectraConfig(
vocab_size=tokenizer.get_vocab_size(),
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
embedding_size=embedding_size,
hidden_size=hidden_size,
intermediate_size=intermediate_size,
num_attention_heads=num_attention_heads,
num_hidden_layers=num_hidden_layers,
layer_depth_offset=layer_depth_offset,
max_sentence_length=max_sentence_length,
max_sentences=max_sentences,
max_position_embeddings=max_position_embeddings,
max_length=max_length,
gradient_checkpointing=gradient_checkpointing,
generator_size=generator_size,
generator_layer_size=generator_layer_size,
mlm_probability=mlm_probability,
mlm_replacement_probability=mlm_replacement_probability,
temperature=temperature,
discriminant_loss_factor=discriminant_loss_factor,
chunk_length=chunk_length,
relative_position_embeddings=relative_position_embeddings,
sequence_embeddings=sequence_embeddings,
)
model = DocumentElectraPretrainingModel(config=config)
# Torch Script is not compatible with Transformers model
# See https://github.com/huggingface/transformers/pull/6846
# traced_model = torch.jit.script(model)
train_set, val_set, test_set = make_datasets_document_electra(
tokenizer=tokenizer,
dataset_info=dataset_infos,
cache_dir=cache_dir,
dataset_dir=dataset_dir,
dataset_sampling=dataset_sampling,
training_set_size=training_set_size,
# 100 steps for evaluation in ElectraSmall
# We use 200 documents instead of 1 for ElectraSmall to have a better estimate (no random shuffle in our case)
# https://github.com/google-research/electra/blob/79111328070e491b287c307906701ebc61091eb2/configure_pretraining.py#L50
validation_set_size=100 * per_device_eval_batch_size,
test_set_size=0,
training_set_random_seed=training_set_random_seed,
valid_test_split_random_seed=valid_test_split_random_seed,
num_proc=1) # Multiple processors will need a dataset copy (so it increase the disk space requirement)
train_data_collator = DataCollatorForDocumentElectra(config=config)
effective_train_batch_size = per_device_train_batch_size * gradient_accumulation_steps
training_args = MyTrainingArguments(
output_dir=get_tensorboard_experiment_id(experiment_name=experiment_name,
tensorboard_tracking_folder=output_dir),
overwrite_output_dir=True,
# num_train_epochs=epochs,
# equivalent to 1M (original paper use 20M with 128 batches instead of 6M in this preprocessing)
# This gives an equivalent of 6.4 epochs for these 1M steps
max_steps=1e6,
per_device_train_batch_size=per_device_train_batch_size,
per_device_eval_batch_size=per_device_eval_batch_size,
save_steps=int(save_steps / effective_train_batch_size),
save_total_limit=200,
learning_rate=lr,
fp16=True,
max_grad_norm=max_grad_norm,
gradient_accumulation_steps=gradient_accumulation_steps,
logging_dir=get_tensorboard_experiment_id(experiment_name=experiment_name,
tensorboard_tracking_folder=tensorboard_tracking_folder),
logging_steps=int(logging_steps / effective_train_batch_size),
eval_steps=(int(eval_steps / effective_train_batch_size) if type(eval_steps) == int
else int(eval_steps * (len(train_set) / effective_train_batch_size))),
do_eval=True,
do_train=True,
evaluation_strategy=EvaluationStrategy.STEPS,
seed=seed,
# Custom attributes to keep in TB
pretrain_path=str(pretrain_path),
optimizer="AdamW",
adam_beta1=0.9,
adam_beta2=0.999,
adam_epsilon=1e-6, # If lower, this may create an issue for amp.
warmup_steps=warmup_steps,
weight_decay=weight_decay,
dataloader_num_workers=num_proc,
training_set_size=training_set_size,
training_set_random_seed=training_set_random_seed,
valid_test_split_random_seed=valid_test_split_random_seed,
metric_to_train_generator=metric_to_train_generator,
threshold_to_train_generator=threshold_to_train_generator
)
# Instantiate TensorBoard
# Required to be done outside Trainer in order to log in TensorBoard via custom metrics (for non scalar logging)
# And also to keep more hparams in TensorBoard
tb_writer = MySummaryWriter(log_dir=training_args.logging_dir)
tb_writer.add_text("config", config.to_json_string())
tb_writer.add_text("args", training_args.to_json_string())
tb_writer.add_hparams({**to_sanitized_dict(config),
**to_sanitized_dict(training_args),
}, metric_dict={})
trainer = MyTrainer(
model=model,
args=training_args,
data_collator=train_data_collator,
train_dataset=train_set,
eval_dataset=val_set,
compute_metrics=ComputeMetricsDocumentElectraForPretraining(tb_writer=tb_writer,
hparams={**to_sanitized_dict(config),
**to_sanitized_dict(training_args),
},
sentence_predictions=False),
mask_token_id=config.mask_token_id,
tb_writer=tb_writer
)
if is_wandb_available():
# Workaround to force the creation of a new Wandb callback for each run if we launches several run
trainer.pop_callback(WandbCallback)
trainer.add_callback(MyWandbCallback)
if pretrain_path is not None:
pretrain_path = Path(pretrain_path)
assert pretrain_path.exists() and pretrain_path.is_dir()
logger.info(f"Load pretrained weights from {pretrain_path}")
pretrain_dict = torch.load(str(pretrain_path / "pytorch_model.bin"))
model.load_state_dict(state_dict=pretrain_dict) # Default is strict=True
logger.info(f"Load pretrained weights from {pretrain_path} Completed")
trainer.train(model_path=str(pretrain_path))
else:
trainer.train()
eval_output: Dict[str, float] = trainer.evaluate()
logger.info(eval_output)
return eval_output
class MyTrainer(Trainer):
"""
Custom method to include:
- Logging of several losses
- Management of variable size of outputs (binary classification and token level and sentence level)
- Monitoring of one metric to decide to train the generator or not
"""
args: MyTrainingArguments
"""
training_step should return more losses
compute_loss also
Train should use the new type from Training step
"""
def training_step(self, model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]]) -> DocumentElectraPretrainingModelOutput:
"""
Perform a training step on a batch of inputs.
Extra logic: keep track of more than one loss
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
if hasattr(self, "_training_step"):
warnings.warn(
"The `_training_step` method is deprecated and won't be called in a future version, define `training_step` in your subclass.",
FutureWarning,
)
return self._training_step(model, inputs, self.optimizer)
model.train()
inputs = self._prepare_inputs(inputs)
if self.args.fp16:
with autocast():
output: DocumentElectraPretrainingModelOutput = self.compute_loss(model, inputs)
else:
output: DocumentElectraPretrainingModelOutput = self.compute_loss(model, inputs)
if self.args.gradient_accumulation_steps > 1:
output.loss /= (self.args.gradient_accumulation_steps * self.args.n_gpu)
output.generator_loss /= (self.args.gradient_accumulation_steps * self.args.n_gpu)
output.discriminant_loss /= (self.args.gradient_accumulation_steps * self.args.n_gpu)
output.discriminant_token_loss /= (self.args.gradient_accumulation_steps * self.args.n_gpu)
if self.args.fp16:
self.scaler.scale(output.loss).backward()
else:
output.loss.backward()
output.loss = output.loss.detach()
return output
def prediction_loop(
self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(dataloader, description, prediction_loss_only=prediction_loss_only)
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
model: DocumentElectraPretrainingModel = self.model
assert isinstance(model, DocumentElectraPretrainingModel)
# multi-gpu eval
if self.args.n_gpu > 1:
# noinspection PyTypeChecker
model = torch.nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", batch_size)
# Custom logic
losses_host: torch.Tensor = torch.tensor(0.0, device=model.device)
# Generator
generator_losses_host: torch.Tensor = torch.tensor(0.0, device=model.device)
mlm_preds_host: List[torch.Tensor] = []
sampled_mlm_preds_host: List[torch.Tensor] = []
generator_labels_host: List[torch.Tensor] = []
# Discriminant
discriminant_losses_host: torch.Tensor = torch.tensor(0.0, device=model.device)
discriminant_token_losses_host: torch.Tensor = torch.tensor(0.0, device=model.device)
is_fake_preds_host: List[torch.Tensor] = []
is_fake_labels_host: List[torch.Tensor] = []
# End Custom logic
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
# Custom logic
output = self.prediction_step(model, inputs, prediction_loss_only)
assert output.loss is not None
losses_host += output.loss.detach()
# Generator
generator_losses_host += output.generator_loss
# Only corrupted tokens
mask_tokens_mask = output.labels_generator.reshape(-1).ne(-100)
mlm_preds_host += [output.mlm_input_ids.reshape(-1)[mask_tokens_mask]]
sampled_mlm_preds_host += [output.sampled_input_ids.reshape(-1)[mask_tokens_mask]]
generator_labels_host += [output.labels_generator.reshape(-1)[mask_tokens_mask]]
# Discriminant
discriminant_losses_host += output.discriminant_loss
discriminant_token_losses_host += output.discriminant_token_loss
non_special_tokens = output.labels_at_token_level.reshape(-1).ne(-100)
is_fake_preds_host += [output.is_fake_logits.reshape(-1)[non_special_tokens]]
is_fake_labels_host += [output.labels_at_token_level.reshape(-1)[non_special_tokens]]
# End Custom Logic
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Custom logic
metrics = self.compute_metrics(predictions=[torch.cat(is_fake_preds_host).cpu(),
torch.cat(mlm_preds_host).cpu(),
torch.cat(sampled_mlm_preds_host).cpu()],
labels=[torch.cat(is_fake_labels_host).cpu(),
torch.cat(generator_labels_host).cpu(),
torch.cat(generator_labels_host).cpu()])
metrics["loss"] = (losses_host / num_examples).cpu().item()
metrics["generator_loss"] = (generator_losses_host / num_examples).cpu().item()
metrics["discriminant_loss"] = (discriminant_losses_host / num_examples).cpu().item()
metrics["discriminant_token_loss"] = (discriminant_token_losses_host / num_examples).cpu().item()
# Prefix all keys with eval_
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if model.train_generator and (self.args.metric_to_train_generator in metrics) and (
metrics[self.args.metric_to_train_generator] <= self.args.threshold_to_train_generator):
model.train_generator = False
if not model.train_generator and (self.args.metric_to_train_generator in metrics) and (
metrics[self.args.metric_to_train_generator] > self.args.threshold_to_train_generator):
model.train_generator = True
metrics[f"train_generator"] = float(model.train_generator)
# Metrics contain already all information. No need to add predictions and labels.
# Anyway, not possible in this case since the dimension differs between labels
return PredictionOutput(predictions=np.array([]), label_ids=np.array([]), metrics=metrics)
# End Custom Logic
# noinspection PyUnresolvedReferences
def train(self, model_path: Optional[str] = None,
trial: Dict[str, Any] = None): # Comment optuna library to prevent dependencies
# trial: Union["optuna.Trial", Dict[str, Any]] = None):
"""
Main training entry point.
Args:
model_path (:obj:`str`, `optional`):
Local path to the model if the model to train has been instantiated from a local path. If present,
training will resume from the optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
"""
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(self.args.seed)
model = self.call_model_init(trial)
self.model = model.to(self.args.device)
# Reinitializes optimizer and scheduler
self.optimizer: Optional[Optimizer] = None
# noinspection PyUnresolvedReferences,PyProtectedMember
self.lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if self.args.max_steps > 0:
max_steps = self.args.max_steps
num_train_epochs = int(self.args.max_steps // num_update_steps_per_epoch + int(
self.args.max_steps % num_update_steps_per_epoch > 0
))
else:
max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = int(math.ceil(self.args.num_train_epochs))
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = self.args.max_steps
num_train_epochs = 1
num_update_steps_per_epoch = max_steps
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
# Check if saved optimizer or scheduler states exist
if (
model_path is not None
and os.path.isfile(os.path.join(model_path, "optimizer.pt"))
and os.path.isfile(os.path.join(model_path, "scheduler.pt"))
):
# Load in optimizer and scheduler states
self.optimizer.load_state_dict(
torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.device)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
# Mixed precision training with apex (torch < 1.6)
model = self.model
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if self.args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=(
not getattr(model.config, "gradient_checkpointing", False)
if isinstance(model, PreTrainedModel)
else True
),
)
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
# Train!
if is_torch_tpu_available():
total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size()
else:
# noinspection PyUnresolvedReferences
total_train_batch_size = (
self.args.train_batch_size
* self.args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)
)
num_examples = (
self.num_examples(train_dataloader)
if train_dataset_is_sized
else total_train_batch_size * self.args.max_steps
)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", num_examples)
logger.info(" Num Epochs = %d", num_train_epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", max_steps)
self.state.epoch = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if model_path and os.path.isfile(os.path.join(model_path, "trainer_state.json")):
self.state = TrainerState.load_from_json(os.path.join(model_path, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
steps_trained_in_current_epoch = self.state.global_step % num_update_steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.state.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
tr_loss = torch.tensor(0.0).to(self.args.device)
# Custom logic
logs = {}
tr_generator_loss, tr_discriminant_loss = 0.0, 0.0
tr_discriminant_token_loss, tr_discriminant_sentence_loss = 0.0, 0.0
# End Custom Logic
self._total_flos = self.state.total_flos
model.zero_grad()
self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(
self.args.device
)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps
self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
if (step + 1) % self.args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)
# Custom Logic
if (
((step + 1) % self.args.gradient_accumulation_steps != 0)
and self.args.local_rank != -1
):
with model.no_sync():
output = self.training_step(model, inputs)
else:
output = self.training_step(model, inputs)
tr_loss += output.loss
tr_generator_loss += output.generator_loss
tr_discriminant_loss += output.discriminant_loss
tr_discriminant_token_loss += output.discriminant_token_loss
# End Custom Logic
self._total_flos += self.floating_point_ops(inputs)
if (step + 1) % self.args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
self.args.gradient_accumulation_steps >= steps_in_epoch == (step + 1)
):
if self.args.fp16:
self.scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm)
if is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.args.fp16:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.optimizer.step()
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)
# Custom Logic
logs["generator_loss"] = tr_generator_loss
logs["discriminant_loss"] = tr_discriminant_loss
logs["discriminant_token_loss"] = tr_discriminant_token_loss
self._maybe_log_save_evalute(tr_loss, model, trial, epoch, logs=logs)
logs = {}
tr_generator_loss, tr_discriminant_loss = 0.0, 0.0
tr_discriminant_token_loss = 0.0
# End Custom Logic
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)
# Custom Logic
logs["generator_loss"] = tr_generator_loss
logs["discriminant_loss"] = tr_discriminant_loss
logs["discriminant_token_loss"] = tr_discriminant_token_loss
self._maybe_log_save_evalute(tr_loss, model, trial, epoch, logs=logs)
# End Custom Logic
if self.args.tpu_metrics_debug or self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
if isinstance(model, PreTrainedModel):
self.model = model.from_pretrained(self.state.best_model_checkpoint)
self.model = self.model.to(self.args.device)
else:
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))
self.model.load_state_dict(state_dict)
self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)
return TrainOutput(self.state.global_step, tr_loss.item() / self.state.global_step)
def prediction_step(
self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool
) -> DocumentElectraPretrainingModelOutput:
"""
Extra logic: add the parameter to return all outputs and also track all losses (not just the combined one)
:param model:
:param inputs:
:param prediction_loss_only:
:return:
"""
inputs = self._prepare_inputs(inputs)
inputs["loss_only"] = False
with torch.no_grad():
return model(**inputs)
def compute_loss(self, model, inputs) -> DocumentElectraPretrainingModelOutput:
"""
Extra logic: return all combined loss, generator loss and discriminator loss.
The default HuggingFace implementation is only for one loss to monitor.
:param model:
:param inputs:
:return:return all combined loss, generator loss and discriminator loss.
"""
inputs["loss_only"] = True
outputs: DocumentElectraPretrainingModelOutput = model(**inputs)
return outputs
# noinspection SpellCheckingInspection
def _maybe_log_save_evalute(self, tr_loss, model, trial, epoch, logs=None):
"""
Custom logic is to already provide a logs dictionary with the additional losses
:param tr_loss:
:param model:
:param trial:
:param epoch:
:param logs:
:return:
"""
if logs is None:
logs = {}
if self.control.should_log:
tr_loss_scalar = tr_loss.item()
logs["loss"] = (tr_loss_scalar - self._logging_loss_scalar) / self.args.logging_steps
# backward compatibility for pytorch schedulers
logs["learning_rate"] = (
self.lr_scheduler.get_last_lr()[0]
if version.parse(torch.__version__) >= version.parse("1.4")
else self.lr_scheduler.get_lr()[0]
)
self._logging_loss_scalar = tr_loss_scalar
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
| [
37811,
30150,
966,
329,
262,
2181,
24674,
4876,
526,
15931,
198,
11748,
17268,
198,
11748,
18931,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
14601,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
360,
713,
11,
4479,
11,... | 2.353538 | 18,346 |
import os
import portalocker
from deep_architect.contrib.communicators.communicator import Communicator
from deep_architect.contrib.communicators.file_utils import (consume_file,
read_file,
write_file)
| [
11748,
28686,
198,
11748,
17898,
12721,
198,
198,
6738,
2769,
62,
998,
5712,
13,
3642,
822,
13,
10709,
44549,
13,
10709,
26407,
1330,
4440,
26407,
198,
6738,
2769,
62,
998,
5712,
13,
3642,
822,
13,
10709,
44549,
13,
7753,
62,
26791,
1... | 1.821229 | 179 |
astr = 'python'
alist = [10, 20, 30]
atuple = ('tom', 'jerry')
adict = {'name': 'bob', 'age': 20}
for ch in astr:
print(ch)
for i in alist:
print(i)
for name in atuple:
print(name)
for key in adict:
print('%s: %s' % (key, adict[key]))
| [
459,
81,
796,
705,
29412,
6,
198,
49845,
796,
685,
940,
11,
1160,
11,
1542,
60,
198,
265,
29291,
796,
19203,
39532,
3256,
705,
73,
6996,
11537,
198,
324,
713,
796,
1391,
6,
3672,
10354,
705,
65,
672,
3256,
705,
496,
10354,
1160,
9... | 2.073171 | 123 |
from django.contrib import admin
from blog.models import Comentarios
# Register your models here.
admin.site.register(Comentarios)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
4130,
13,
27530,
1330,
955,
298,
13010,
198,
198,
2,
17296,
534,
4981,
994,
13,
198,
198,
28482,
13,
15654,
13,
30238,
7,
5377,
298,
13010,
8,
198
] | 3.5 | 38 |
import asyncio
from collections import namedtuple
import pytest
from .route import parse, MatcherEntry, Segment, SegmentType, Route, \
compile, roundto8
@pytest.mark.parametrize('pattern,result', [
('/', [('exact', '/')]),
('/{{a}}', [('exact', '/{a}')]),
('{a}', [('placeholder', 'a')]),
('a/{a}', [('exact', 'a/'), ('placeholder', 'a')]),
('{a}/a', [('placeholder', 'a'), ('exact', '/a')]),
('{a}/{{a}}', [('placeholder', 'a'), ('exact', '/{a}')]),
('{a}/{b}', [('placeholder', 'a'), ('exact', '/'), ('placeholder', 'b')])
])
@pytest.mark.parametrize('pattern,error', [
('{a', 'Unbalanced'),
('{a}/{b', 'Unbalanced'),
('{a}a', 'followed by'),
('{a}/{a}', 'Duplicate')
])
DecodedRoute = namedtuple(
'DecodedRoute',
'route_id,handler_id,coro_func,simple,placeholder_cnt,segments,methods')
@pytest.mark.parametrize('route', [
Route('/', handler, []),
Route('/', coro, ['GET']),
Route('/test/{hi}', handler, []),
Route('/test/{hi}', coro, ['POST']),
Route('/tést', coro, ['POST'])
], ids=Route.describe)
| [
11748,
30351,
952,
198,
6738,
17268,
1330,
3706,
83,
29291,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
764,
38629,
1330,
21136,
11,
6550,
2044,
30150,
11,
1001,
5154,
11,
1001,
5154,
6030,
11,
18956,
11,
3467,
198,
220,
220,
220,
1... | 2.254132 | 484 |
import gevent.monkey
gevent.monkey.patch_ssl()
from mq_handler.hupun_stock_bills.model.erp_receipt_info import ErpReceiptInfo
from mq_handler.hupun_stock_bills.page.query_outbound import QueryOutbound
from mq_handler.hupun_stock_bills.page.outbound import Outbound
import json
from mq_handler.base import Base
from pyspider.helper.date import Date
import traceback
from pyspider.helper.retry import Retry
from alarm.page.ding_talk import DingTalk
from mq_handler.hupun_stock_bills.config import get_ding_token
class AddOutbound(Base):
'''
创建 出库单
'''
@Retry.retry_parameter(4, sleep_time=10)
@Retry.retry_parameter(4, sleep_time=10)
@staticmethod
| [
11748,
4903,
1151,
13,
49572,
198,
469,
1151,
13,
49572,
13,
17147,
62,
45163,
3419,
198,
6738,
285,
80,
62,
30281,
13,
71,
929,
403,
62,
13578,
62,
65,
2171,
13,
19849,
13,
263,
79,
62,
260,
344,
10257,
62,
10951,
1330,
5256,
79,... | 2.52809 | 267 |
import pymongo
import re
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from json_parser import parser
import numpy as np
query()
| [
11748,
279,
4948,
25162,
198,
11748,
302,
198,
11748,
28686,
11,
17597,
11,
1040,
806,
198,
14421,
62,
15908,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
7,
1040,
806,
13,
1136,
7753,
7,
1040,
806,
... | 2.647619 | 105 |
# coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httpx
import pytest
from rubrix import load
from rubrix.client.models import TextClassificationRecord
from rubrix.client.sdk.text_classification.models import (
CreationTextClassificationRecord,
TextClassificationBulkData,
)
from rubrix.labeling.text_classification.rule import Rule, RuleNotAppliedError
from tests.server.test_helpers import client
@pytest.fixture(scope="module")
@pytest.mark.parametrize(
"name,expected", [(None, "query_string"), ("test_name", "test_name")]
)
| [
2,
220,
19617,
28,
40477,
12,
23,
198,
2,
220,
15069,
33448,
12,
25579,
11,
262,
31517,
1872,
311,
13,
43,
13,
1074,
13,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
1... | 3.354167 | 336 |
# myTeam.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from captureAgents import CaptureAgent
import distanceCalculator
import random, time, util
from game import Directions
from game import Actions
import game
from util import PriorityQueue
from util import Queue
#################
# Team creation #
#################
def createTeam(firstIndex, secondIndex, isRed,
first = 'AtkAgent', second = 'DefAgent'):
"""
This function should return a list of two agents that will form the
team, initialized using firstIndex and secondIndex as their agent
index numbers. isRed is True if the red team is being created, and
will be False if the blue team is being created.
As a potentially helpful development aid, this function can take
additional string-valued keyword arguments ("first" and "second" are
such arguments in the case of this function), which will come from
the --redOpts and --blueOpts command-line arguments to capture.py.
For the nightly contest, however, your team will be created without
any extra arguments, so you should make sure that the default
behavior is what you want for the nightly contest.
"""
# The following line is an example only; feel free to change it.
return [eval(first)(firstIndex), eval(second)(secondIndex)]
##########
# Agents #
##########
class DummyAgent(CaptureAgent):
"""
A Dummy agent to serve as an example of the necessary agent structure.
You should look at baselineTeam.py for more details about how to
create an agent as this is the bare minimum.
"""
def registerInitialState(self, gameState):
"""
This method handles the initial setup of the
agent to populate useful fields (such as what team
we're on).
A distanceCalculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.getDistance(p1, p2)
IMPORTANT: This method may run for at most 15 seconds.
"""
'''
Make sure you do not delete the following line. If you would like to
use Manhattan distances instead of maze distances in order to save
on initialization time, please take a look at
CaptureAgent.registerInitialState in captureAgents.py.
'''
self.startpos=gameState.getAgentPosition(self.index)
CaptureAgent.registerInitialState(self, gameState)
self.midwidth = gameState.data.layout.width / 2
self.carryfoods = 0
self.foodnum = len(self.getFood(gameState).asList())
self.foods = self.getFood(gameState).asList()
self.hisdefendfoods = self.getFoodYouAreDefending(gameState).asList()
self.height = gameState.data.layout.height
self.hispos = None
initmap = InitMap(self,gameState)
self.safefoodlist,self.dangerfoodlist = initmap.gainlist()
self.deadends = initmap.gaindeadends()
self.indanger = False
'''
Your initialization code goes here, if you need any.
'''
def chooseAction(self, gameState):
"""
Picks among actions randomly.
"""
'''
You should change this in your own agent.
'''
problem = foodsearchproblem(gameState,self)
return self.astarsearch(problem,gameState,self.foodhuristic)[0]
############################################
# Attack Agent #
############################################
############################################
# Defence Agent #
############################################
############################################
# Search Problem #
############################################
######################################
# Map Initializer #
######################################
| [
2,
616,
15592,
13,
9078,
198,
2,
45337,
198,
2,
10483,
26426,
6188,
25,
220,
921,
389,
1479,
284,
779,
393,
9117,
777,
4493,
329,
198,
2,
9856,
4959,
2810,
326,
357,
16,
8,
345,
466,
407,
14983,
393,
7715,
198,
2,
8136,
11,
357,... | 3.374315 | 1,277 |
import sqlite3
from flask import g
DATABASE = '/path/to/database.db'
@app.teardown_appcontext | [
11748,
44161,
578,
18,
198,
6738,
42903,
1330,
308,
198,
198,
35,
1404,
6242,
11159,
796,
31051,
6978,
14,
1462,
14,
48806,
13,
9945,
6,
198,
198,
31,
1324,
13,
660,
446,
593,
62,
1324,
22866
] | 2.638889 | 36 |
import csv
import logging
import math
def can_float(value):
"""Takes in a string a determines whether or not it
can be converted to a float.
Args:
value: the string to check if can be converted to float
Returns:
bool: True if can convert to float, False otherwise
Raises:
ValueError: if value cannot be converted to float
"""
try:
float(value)
return True
except ValueError:
logging.warning('Warning: Non-float value found in dataset. Skipping '
'this time, voltage pair')
return False
def get_data(filename_arg):
"""Open the data file of interest and pulls time and voltage values
from the data file to be used for calculating the heart rate.
Args:
filename_arg: name of test data file to be tested
Returns:
time: list containing the time values of the data set (all floats)
voltage: list containing the voltage values of the data
set (all floats)
Raises:
IOError: if file associated with filename_arg cannot be opened
"""
time = []
voltage = []
try:
open(filename_arg, 'r')
logging.info('Successfully opened file: %s ' % filename_arg)
except IOError as inst:
logging.error('Error: File could not be opened. Please try again')
raise inst
data_file = open(filename_arg, 'r')
data_table = csv.reader(data_file, delimiter=',')
for index in data_table:
if can_float(index[0]) and can_float(index[1]):
if math.isnan(float(index[0])) or math.isnan(float(index[1])):
logging.warning('Warning: Null value found in dataset. '
'Skipping this time, voltage pair')
else:
time.append(float(index[0]))
voltage.append(float(index[1]))
return time, voltage
| [
11748,
269,
21370,
198,
11748,
18931,
198,
11748,
10688,
628,
198,
4299,
460,
62,
22468,
7,
8367,
2599,
198,
220,
220,
220,
37227,
51,
1124,
287,
257,
4731,
257,
15947,
1771,
393,
407,
340,
198,
220,
220,
220,
460,
307,
11513,
284,
... | 2.283315 | 893 |
import hashlib
import torch
import torch.nn as nn
class Model(nn.Module):
""" Class representing sampleable neural network model """
def hashsummary(self):
""" Print a model summary - checksums of each layer parameters """
children = list(self.children())
result = []
for child in children:
result.extend(hashlib.sha256(x.detach().cpu().numpy().tobytes()).hexdigest() for x in child.parameters())
return result
def num_params(self):
""" Get the number of model parameters. """
return sum(p.numel() for p in self.parameters())
def set_params(self, theta, grad_val=None):
""" Set model parameters with theta. """
i = 0
for p in self.parameters():
j = p.numel()
p.data = theta[i:i+j].view(p.size())
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
if grad_val is not None:
p.grad = grad_val[i:i+j].view(p.size())
i += j
| [
11748,
12234,
8019,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
198,
4871,
9104,
7,
20471,
13,
26796,
2599,
198,
220,
220,
220,
37227,
5016,
10200,
6291,
540,
17019,
3127,
2746,
37227,
628,
220,
220,
220,
825,
... | 2.204641 | 474 |
# TODO: create PyTorch adapter! | [
2,
16926,
46,
25,
2251,
9485,
15884,
354,
21302,
0
] | 3.1 | 10 |
from submission import Submission
| [
6738,
14498,
1330,
42641,
628,
198
] | 6 | 6 |
"""
The app pages stores some utility files:
- fixtures for the tests
- base.html
- home.html
"""
from django.views.generic import TemplateView
from django.shortcuts import render
| [
37811,
198,
220,
220,
220,
383,
598,
5468,
7000,
617,
10361,
3696,
25,
198,
220,
220,
220,
532,
34609,
329,
262,
5254,
198,
220,
220,
220,
532,
2779,
13,
6494,
198,
220,
220,
220,
532,
1363,
13,
6494,
198,
37811,
198,
198,
6738,
4... | 3.174603 | 63 |
# -*- coding: utf-8 -*-
try:
from BeautifulSoup import BeautifulSoup as bss
except:
from bs4 import BeautifulSoup as bs
try:
import urllib2
except:
import urllib.request as urllib2
import urllib
import re
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
28311,
25,
198,
197,
6738,
23762,
50,
10486,
1330,
23762,
50,
10486,
355,
275,
824,
198,
16341,
25,
198,
197,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
355,
275,
82,
... | 2.573171 | 82 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
628
] | 2.891892 | 37 |
import math
res = 0
k1, k2 = [int(x) for x in input().split()]
n = int(input())
objects = []
for i in range(n):
x_i, y_i = [int(a) for a in input().split()]
objects.append((x_i, y_i))
prob_x = {}
prob_xy = {}
for i in range(n):
prob_x[objects[i][0]] = prob_x.get(objects[i][0], 0) + 1. / n
prob_xy[objects[i]] = prob_xy.get(objects[i], 0) + 1. / n
for item in prob_xy.items():
res = res - item[1] * (math.log(item[1]) - math.log(prob_x[item[0][0]]))
print(res)
| [
11748,
10688,
198,
198,
411,
796,
657,
198,
74,
16,
11,
479,
17,
796,
685,
600,
7,
87,
8,
329,
2124,
287,
5128,
22446,
35312,
3419,
60,
198,
77,
796,
493,
7,
15414,
28955,
198,
198,
48205,
796,
17635,
198,
1640,
1312,
287,
2837,
... | 2.094421 | 233 |
"""
Django settings for tock.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.utils.crypto import get_random_string
from pathlib import Path
from .env import env
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DATABASES = {}
ROOT_URLCONF = 'tock.urls'
WSGI_APPLICATION = 'tock.wsgi.application'
SECRET_KEY = env.get_credential('DJANGO_SECRET_KEY', get_random_string(50))
LOGIN_URL = '/auth/login'
LOGIN_REDIRECT_URL = '/'
CSRF_FAILURE_VIEW = 'tock.views.csrf_failure'
INSTALLED_APPS = (
'django.contrib.contenttypes', # may be okay to remove
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.messages',
'uaa_client',
'tock.apps.TockAppConfig',
'projects.apps.ProjectsAppConfig',
'hours.apps.HoursAppConfig',
'employees.apps.EmployeesAppConfig',
'organizations.apps.OrganizationsAppConfig',
'api.apps.ApiAppConfig',
'utilization.apps.UtilizationAppConfig',
'rest_framework.authtoken',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'/templates/'
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'tock.context_processors.version_url',
'tock.context_processors.tock_settings_for_context',
],
},
},
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.security.SecurityMiddleware',
'uaa_client.middleware.UaaRefreshMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'tock.middleware.AutoLogout',
]
AUTHENTICATION_BACKENDS = (
'tock.remote_user_auth.TockUserBackend',
)
ALLOWED_HOSTS = ['*'] # proxied
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'UNICODE_JSON': False,
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
# use our CSV renderer instead of rest_framework_csv's
'api.renderers.PaginatedCSVRenderer',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
}
try:
VERSION = (Path(BASE_DIR) / '..' / 'VERSION').read_text().strip()
except IOError:
VERSION = 'main'
UAA_APPROVED_DOMAINS = {
'gsa.gov',
}
UAA_CLIENT_ID = env.get_credential('UAA_CLIENT_ID', None)
UAA_CLIENT_SECRET = env.get_credential('UAA_CLIENT_SECRET', None)
UAA_AUTH_URL = 'https://login.fr.cloud.gov/oauth/authorize'
UAA_TOKEN_URL = 'https://uaa.fr.cloud.gov/oauth/token' # nosec
UAA_LOGOUT_URL = 'https://login.fr.cloud.gov/logout.do'
AUTO_LOGOUT_DELAY_MINUTES = 60
TOCK_CHANGE_REQUEST_FORM = 'https://docs.google.com/forms/d/e/1FAIpQLSe5RDFOlyWm0IXv7_eXjZ3CEjaGj2CmM-_TNgqwMjdspfQz7Q/viewform'
# enable HSTS according to https://cyber.dhs.gov/bod/18-01/
SECURE_HSTS_SECONDS = 31536000
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
# UTILIZATION
RECENT_TOCKS_TO_REPORT = 5
STARTING_FY_FOR_REPORTS_PAGE = 2019
RECENT_TIMECARDS_FOR_BILLABILITY = 4
HOURS_IN_A_REGULAR_WORK_WEEK = 40
DEFAULT_BILLABLE_EXPECTATION = 0.80
DEFAULT_EXPECTED_BILLABLE_HOURS = round(HOURS_IN_A_REGULAR_WORK_WEEK * DEFAULT_BILLABLE_EXPECTATION)
DEFAULT_EXPECTED_PROJECT_ALLOCATION = 1.00
PROJECT_ALLOCATION_CHOICES = (
(0, "---"),
(1.0, "100%"),
(0.5, "50%"),
(0.25, "25%"),
(0.125, "12.5%")
)
# WHITENOISE
WHITENOISE_ALLOW_ALL_ORIGINS = False | [
37811,
198,
35,
73,
14208,
6460,
329,
284,
694,
13,
198,
198,
1890,
517,
1321,
319,
428,
2393,
11,
766,
198,
5450,
1378,
31628,
13,
28241,
648,
404,
305,
752,
13,
785,
14,
268,
14,
16,
13,
22,
14,
4852,
873,
14,
33692,
14,
198,
... | 2.226512 | 2,150 |
"""
Wrapper for loading templates from the filesystem in a multi-tenant setting.
"""
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import connection
from django.template.loaders.filesystem import Loader as BaseLoader
from django_tenants import utils
| [
37811,
198,
36918,
2848,
329,
11046,
24019,
422,
262,
29905,
287,
257,
5021,
12,
1452,
415,
4634,
13,
198,
37811,
628,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
12205,
525,... | 3.804878 | 82 |
import mypackage.foo
mypackage.bar.dostuff()
# <ref>
| [
11748,
616,
26495,
13,
21943,
198,
198,
1820,
26495,
13,
5657,
13,
67,
455,
1648,
3419,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1279,
5420,
29,
198
] | 1.914286 | 35 |
#******************************************************************************************************
# enumerations.py - Gbtc
#
# Copyright © 2021, Grid Protection Alliance. All Rights Reserved.
#
# Licensed to the Grid Protection Alliance (GPA) under one or more contributor license agreements. See
# the NOTICE file distributed with this work for additional information regarding copyright ownership.
# The GPA licenses this file to you under the MIT License (MIT), the "License"; you may not use this
# file except in compliance with the License. You may obtain a copy of the License at:
#
# http://opensource.org/licenses/MIT
#
# Unless agreed to in writing, the subject software distributed under the License is distributed on an
# "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. Refer to the
# License for the specific language governing permissions and limitations.
#
# Code Modification History:
# ----------------------------------------------------------------------------------------------------
# 01/31/2021 - J. Ritchie Carroll
# Generated original version of source code.
#
#******************************************************************************************************
from enum import IntEnum, Flag
# Defines needed enumerations for SNAPdb server commands and responses
| [
2,
17174,
17174,
17174,
2466,
1174,
198,
2,
220,
27056,
602,
13,
9078,
532,
402,
18347,
66,
198,
2,
198,
2,
220,
15069,
10673,
33448,
11,
24846,
9985,
10302,
13,
220,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
49962,
284,
262,
24... | 4.683849 | 291 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
setup(
name='crossbarhttprequests',
packages=['crossbarhttp'],
version='0.1.6',
description='This is a library for connecting to Crossbar.io HTTP Bridge Services using python-requests.',
author='Yomi Daniels',
license='MIT',
author_email='yomid4all@gmail.com',
url='https://github.com/ydaniels/crossbarhttprequests',
long_description=readme,
keywords=['wamp', 'crossbar', 'requests'],
install_requires=['requests', 'requests_futures'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
],
)
| [
28311,
25,
198,
220,
220,
220,
422,
900,
37623,
10141,
1330,
9058,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
961,
1326,
796,
1280,
10786,
15675,
11682,
13,
81,
301,
27691,
961,... | 2.665105 | 427 |
import hashlib
| [
11748,
12234,
8019,
628,
198
] | 3.4 | 5 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# rotate_servo_listener.py
# author: Kentaro Wada <www.kentaro.wada@gmail.com>
import os
import sys
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(__file__)), '../../code'))
import rospy
from std_msgs.msg import Bool
from blink_led import BlinkLed
PIN_CTRL = 13
bl = BlinkLed(PIN_CTRL)
if __name__ == '__main__':
listener()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
23064,
62,
3168,
78,
62,
4868,
877,
13,
9078,
198,
2,
1772,
25,
8758,
12022,
370,
4763,
1279,
2503,
13,
74,
298,... | 2.389222 | 167 |
import random
SIZE = 8192
FACTOR = 0.5
SOURCE_NODE = 342
DEST_NODE = 4982
with open('test.txt', 'w') as f:
f.write(str(SIZE) + '\n' + str(SOURCE_NODE) + '\n' + str(DEST_NODE) + '\n')
for i in range(int(SIZE * FACTOR)):
f.write(str(random.randint(0, SIZE - 1)) + ' ' +
str(random.randint(0, SIZE - 1)) + ' ' +
str(random.randint(0, int(SIZE / FACTOR))))
if i < int(SIZE * FACTOR) - 1:
f.write('\n')
| [
11748,
4738,
198,
198,
33489,
796,
807,
17477,
198,
37,
10659,
1581,
796,
657,
13,
20,
198,
47690,
62,
45,
16820,
796,
44341,
198,
35,
6465,
62,
45,
16820,
796,
604,
4089,
17,
198,
198,
4480,
1280,
10786,
9288,
13,
14116,
3256,
705,... | 1.879518 | 249 |
# (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Provide shapely geometry <-> matplotlib path support.
See also `Shapely Geometric Objects <see_also_shapely>`_
and `Matplotlib Path API <http://matplotlib.org/api/path_api.html>`_.
.. see_also_shapely:
http://toblerity.org/shapely/manual.html#geometric-objects
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import matplotlib
from matplotlib.path import Path
import shapely.geometry as sgeom
def geos_to_path(shape):
"""
Create a list of :class:`matplotlib.path.Path` objects that describe
a shape.
Parameters
----------
shape
A list, tuple or single instance of any of the following
types: :class:`shapely.geometry.point.Point`,
:class:`shapely.geometry.linestring.LineString`,
:class:`shapely.geometry.polygon.Polygon`,
:class:`shapely.geometry.multipoint.MultiPoint`,
:class:`shapely.geometry.multipolygon.MultiPolygon`,
:class:`shapely.geometry.multilinestring.MultiLineString`,
:class:`shapely.geometry.collection.GeometryCollection`,
or any type with a _as_mpl_path() method.
Returns
-------
paths
A list of :class:`matplotlib.path.Path` objects.
"""
if isinstance(shape, (list, tuple)):
paths = []
for shp in shape:
paths.extend(geos_to_path(shp))
return paths
if isinstance(shape, (sgeom.LineString, sgeom.Point)):
return [Path(np.column_stack(shape.xy))]
elif isinstance(shape, sgeom.Polygon):
if shape.is_empty:
return []
vertices = np.concatenate([np.array(shape.exterior.xy)] +
[np.array(ring.xy) for ring in
shape.interiors], 1).T
codes = np.concatenate([poly_codes(shape.exterior)] +
[poly_codes(ring) for ring in shape.interiors])
return [Path(vertices, codes)]
elif isinstance(shape, (sgeom.MultiPolygon, sgeom.GeometryCollection,
sgeom.MultiLineString, sgeom.MultiPoint)):
paths = []
for geom in shape.geoms:
paths.extend(geos_to_path(geom))
return paths
elif hasattr(shape, '_as_mpl_path'):
vertices, codes = shape._as_mpl_path()
return [Path(vertices, codes)]
else:
raise ValueError('Unsupported shape type {}.'.format(type(shape)))
def path_segments(path, **kwargs):
"""
Create an array of vertices and a corresponding array of codes from a
:class:`matplotlib.path.Path`.
Parameters
----------
path
A :class:`matplotlib.path.Path` instance.
Other Parameters
----------------
kwargs
See :func:`matplotlib.path.iter_segments` for details of the keyword
arguments.
Returns
-------
vertices, codes
A (vertices, codes) tuple, where vertices is a numpy array of
coordinates, and codes is a numpy array of matplotlib path codes.
See :class:`matplotlib.path.Path` for information on the types of
codes and their meanings.
"""
pth = path.cleaned(**kwargs)
return pth.vertices[:-1, :], pth.codes[:-1]
def path_to_geos(path, force_ccw=False):
"""
Create a list of Shapely geometric objects from a
:class:`matplotlib.path.Path`.
Parameters
----------
path
A :class:`matplotlib.path.Path` instance.
Other Parameters
----------------
force_ccw
Boolean flag determining whether the path can be inverted to enforce
ccw. Defaults to False.
Returns
-------
A list of instances of the following type(s):
:class:`shapely.geometry.polygon.Polygon`,
:class:`shapely.geometry.linestring.LineString` and/or
:class:`shapely.geometry.multilinestring.MultiLineString`.
"""
# Convert path into numpy array of vertices (and associated codes)
path_verts, path_codes = path_segments(path, curves=False)
# Split into subarrays such that each subarray consists of connected
# line segments based on the start of each one being marked by a
# matplotlib MOVETO code.
verts_split_inds = np.where(path_codes == Path.MOVETO)[0]
verts_split = np.split(path_verts, verts_split_inds)
codes_split = np.split(path_codes, verts_split_inds)
# Iterate through the vertices generating a list of
# (external_geom, [internal_polygons]) tuples.
other_result_geoms = []
collection = []
for path_verts, path_codes in zip(verts_split, codes_split):
if len(path_verts) == 0:
continue
verts_same_as_first = np.all(path_verts[0, :] == path_verts[1:, :],
axis=1)
if all(verts_same_as_first):
geom = sgeom.Point(path_verts[0, :])
elif path_verts.shape[0] > 4 and path_codes[-1] == Path.CLOSEPOLY:
geom = sgeom.Polygon(path_verts[:-1, :])
elif (matplotlib.__version__ < '2.2.0' and
# XXX A path can be given which does not end with close poly,
# in that situation, we have to guess?
path_verts.shape[0] > 3 and verts_same_as_first[-1]):
geom = sgeom.Polygon(path_verts)
else:
geom = sgeom.LineString(path_verts)
# If geom is a Polygon and is contained within the last geom in
# collection, add it to its list of internal polygons, otherwise
# simply append it as a new external geom.
if geom.is_empty:
pass
elif (len(collection) > 0 and
isinstance(collection[-1][0], sgeom.Polygon) and
isinstance(geom, sgeom.Polygon) and
collection[-1][0].contains(geom.exterior)):
collection[-1][1].append(geom.exterior)
elif isinstance(geom, sgeom.Point):
other_result_geoms.append(geom)
else:
collection.append((geom, []))
# Convert each (external_geom, [internal_polygons]) pair into a
# a shapely Polygon that encapsulates the internal polygons, if the
# external geom is a LineString leave it alone.
geom_collection = []
for external_geom, internal_polys in collection:
if internal_polys:
# XXX worry about islands within lakes
geom = sgeom.Polygon(external_geom.exterior, internal_polys)
else:
geom = external_geom
# Correctly orientate the polygon (ccw)
if isinstance(geom, sgeom.Polygon):
if force_ccw and not geom.exterior.is_ccw:
geom = sgeom.polygon.orient(geom)
geom_collection.append(geom)
# If the geom_collection only contains LineStrings combine them
# into a single MultiLinestring.
if geom_collection and all(isinstance(geom, sgeom.LineString) for
geom in geom_collection):
geom_collection = [sgeom.MultiLineString(geom_collection)]
# Remove any zero area Polygons
result = list(filter(not_zero_poly, geom_collection))
return result + other_result_geoms
| [
2,
357,
34,
8,
3517,
12223,
15069,
2813,
532,
2864,
11,
3395,
4452,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
6383,
11081,
13,
198,
2,
198,
2,
6383,
11081,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
34... | 2.388686 | 3,288 |
from chartofaccount.models import Directorate
from main.models import Approval
| [
6738,
8262,
1659,
23317,
13,
27530,
1330,
44437,
198,
6738,
1388,
13,
27530,
1330,
20010,
2100,
628,
198
] | 4.5 | 18 |
from __future__ import print_function
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
process = cms.Process("Demo")
#prepare options
options = VarParsing.VarParsing("analysis")
options.register ('globalTag',
"auto:run2_data",
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"GlobalTag")
options.register ('runNumber',
303014,
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.int, # string, int, or float
"run number")
options.parseArguments()
##
## MessageLogger
##
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.enable = False
process.MessageLogger.SiStripChannelGainFromDBMiscalibrator=dict()
process.MessageLogger.cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
enableStatistics = cms.untracked.bool(True),
threshold = cms.untracked.string("WARNING"),
default = cms.untracked.PSet(limit = cms.untracked.int32(0)),
FwkReport = cms.untracked.PSet(limit = cms.untracked.int32(-1),
reportEvery = cms.untracked.int32(1000)
),
SiStripChannelGainFromDBMiscalibrator = cms.untracked.PSet( limit = cms.untracked.int32(-1))
)
process.load("Configuration.Geometry.GeometryRecoDB_cff") # Ideal geometry and interface
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag,options.globalTag, '')
print("Using Global Tag:", process.GlobalTag.globaltag._value)
##
## Empty Source
##
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32(options.runNumber),
numberEventsInRun = cms.untracked.uint32(1),
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) )
##
## Example smearing configurations
##
##
## separately partition by partition
##
byParition = cms.VPSet(
cms.PSet(partition = cms.string("TIB"),
doScale = cms.bool(True),
doSmear = cms.bool(True),
scaleFactor = cms.double(1.1),
smearFactor = cms.double(0.2)
),
cms.PSet(partition = cms.string("TOB"),
doScale = cms.bool(True),
doSmear = cms.bool(True),
scaleFactor = cms.double(1.2),
smearFactor = cms.double(0.15)
),
cms.PSet(partition = cms.string("TID"),
doScale = cms.bool(True),
doSmear = cms.bool(True),
scaleFactor = cms.double(1.3),
smearFactor = cms.double(0.10)
),
cms.PSet(partition = cms.string("TEC"),
doScale = cms.bool(True),
doSmear = cms.bool(True),
scaleFactor = cms.double(1.4),
smearFactor = cms.double(0.05)
)
)
##
## whole Strip tracker
##
wholeTracker = cms.VPSet(
cms.PSet(partition = cms.string("Tracker"),
doScale = cms.bool(True),
doSmear = cms.bool(True),
scaleFactor = cms.double(1.15),
smearFactor = cms.double(0.05)
)
)
##
## down the hierarchy (Tracker,Subdetector,Side,Layer(Wheel)
##
subsets = cms.VPSet(
cms.PSet(partition = cms.string("Tracker"),
doScale = cms.bool(True),
doSmear = cms.bool(True),
scaleFactor = cms.double(0.65),
smearFactor = cms.double(0.05)
),
cms.PSet(partition = cms.string("TEC"),
doScale = cms.bool(True),
doSmear = cms.bool(True),
scaleFactor = cms.double(1.15),
smearFactor = cms.double(0.02)
),
cms.PSet(partition = cms.string("TECP"),
doScale = cms.bool(True),
doSmear = cms.bool(True),
scaleFactor = cms.double(1.35),
smearFactor = cms.double(0.02)
),
cms.PSet(partition = cms.string("TECP_9"),
doScale = cms.bool(True),
doSmear = cms.bool(True),
scaleFactor = cms.double(1.55),
smearFactor = cms.double(0.02)
)
)
# process.demo = cms.EDAnalyzer('SiStripChannelGainFromDBMiscalibrator',
# record = cms.untracked.string("SiStripApvGainRcd"),
# gainType = cms.untracked.uint32(1), #0 for G1, 1 for G2
# params = subsets, # as a cms.VPset
# saveMaps = cms.bool(True)
# )
process.load("CondTools.SiStrip.scaleAndSmearSiStripGains_cfi")
process.scaleAndSmearSiStripGains.gainType = 1 # 0 for G1, 1 for G2
process.scaleAndSmearSiStripGains.params = subsets # as a cms.VPset
##
## Database output service
##
process.load("CondCore.CondDB.CondDB_cfi")
##
## Output database (in this case local sqlite file)
##
process.CondDB.connect = 'sqlite_file:modifiedGains_'+ process.GlobalTag.globaltag._value+'_IOV_'+str(options.runNumber)+".db"
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDB,
timetype = cms.untracked.string('runnumber'),
toPut = cms.VPSet(cms.PSet(record = cms.string('SiStripApvGainRcd'),
tag = cms.string('modifiedGains')
)
)
)
process.p = cms.Path(process.scaleAndSmearSiStripGains)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
11748,
48849,
14055,
13,
36301,
7248,
13,
19852,
47,
945,
278,
355,
12372,
47,
945,
278,
198,
198,
14681,
796,
26... | 1.866325 | 3,314 |
# coding: utf8
# !/usr/env/python
from terrainbento import Basic
| [
2,
19617,
25,
3384,
69,
23,
198,
2,
5145,
14,
14629,
14,
24330,
14,
29412,
198,
198,
6738,
15510,
46119,
78,
1330,
14392,
628,
628
] | 2.76 | 25 |
from typing import List
import aiohttp
from dataclasses import dataclass
from tastyworks.models.order import Order, OrderPriceEffect
@dataclass
| [
6738,
19720,
1330,
7343,
198,
198,
11748,
257,
952,
4023,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
6738,
25103,
5225,
13,
27530,
13,
2875,
1330,
8284,
11,
8284,
18124,
18610,
628,
198,
31,
19608,
330,
31172,
628,
... | 3.571429 | 42 |
""" This File contains the unit tests for the gip game """
import unittest
from game import gip
| [
37811,
770,
9220,
4909,
262,
4326,
5254,
329,
262,
308,
541,
983,
37227,
198,
11748,
555,
715,
395,
198,
6738,
983,
1330,
308,
541,
628
] | 3.88 | 25 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.types import freeze
DEPS = [
'depot_tools/bot_update',
'depot_tools/gclient',
'depot_tools/git',
'recipe_engine/context',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
'v8',
]
AUTO_REVIEWERS = [
'machenbach@chromium.org',
'vogelheim@chromium.org',
'hablich@chromium.org',
]
BASE_URL = 'https://chromium.googlesource.com'
V8_REPO = BASE_URL + '/v8/v8'
CR_REPO = BASE_URL + '/chromium/src'
LOG_TEMPLATE = 'Rolling v8/%s: %s/+log/%s..%s'
# Skip these dependencies (list without solution name prefix).
BLACKLIST = [
'test/mozilla/data',
'test/simdjs/data',
'test/test262/data',
'test/wasm-js',
'testing/gtest',
'third_party/WebKit/Source/platform/inspector_protocol',
]
| [
198,
2,
15069,
1946,
383,
18255,
1505,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
257,
347,
10305,
12,
7635,
5964,
326,
460,
307,
198,
2,
1043,
287,
262,
38559,
24290,
2393,
13,
198,
198,
... | 2.52551 | 392 |
import pymongo
from luigi.configuration import get_config
config = get_config()
mongo = pymongo.MongoClient(
host=config['mongo']['host'],
port=config['mongo']['port'],
)
| [
11748,
279,
4948,
25162,
198,
6738,
300,
84,
25754,
13,
11250,
3924,
1330,
651,
62,
11250,
628,
198,
11250,
796,
651,
62,
11250,
3419,
628,
198,
76,
25162,
796,
279,
4948,
25162,
13,
44,
25162,
11792,
7,
198,
220,
220,
220,
2583,
28... | 2.541667 | 72 |
#!/usr/bin/env python3
# Start a server on `localhost:8080` that runs a command on every request and serves
# its output, or displays a more detailed 500 error message if the command fails.
# Essentially this is a lightweight version of CGI, I think.
import sys, subprocess
from http.server import BaseHTTPRequestHandler, HTTPServer
cmd = sys.argv[1:]
if not cmd:
print("please provide a command to run", file=sys.stderr)
sys.exit(1)
print("starting server...")
httpd = HTTPServer(("localhost", 8080), Server)
print("running server...")
httpd.serve_forever()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
7253,
257,
4382,
319,
4600,
36750,
25,
1795,
1795,
63,
326,
4539,
257,
3141,
319,
790,
2581,
290,
9179,
198,
2,
663,
5072,
11,
393,
11298,
257,
517,
6496,
5323,
4049,
3... | 3.339181 | 171 |
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QMainWindow, QInputDialog
import numpy as np
import pluto as pl
util = pl.PlutoObject(None)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) | [
11748,
25064,
198,
6738,
9485,
48,
83,
20,
1330,
33734,
14055,
11,
33734,
8205,
72,
11,
33734,
54,
312,
11407,
198,
6738,
9485,
48,
83,
20,
13,
48,
83,
54,
312,
11407,
1330,
1195,
23416,
11,
1195,
13383,
27703,
11,
1195,
20560,
4420... | 2.438596 | 171 |
"""
This package contains functions for enhancing different kinds of structures
(e.g. edges/membrane, blobs/nuclei, vessels) in images.
"""
# import sub-packages to support nested calls
from . import edge, shape
# list out things that are available for public use
__all__ = (
# sub-packages
'edge',
'shape',
)
| [
37811,
198,
1212,
5301,
4909,
5499,
329,
27496,
1180,
6982,
286,
8573,
198,
7,
68,
13,
70,
13,
13015,
14,
11883,
1671,
1531,
11,
698,
8158,
14,
77,
14913,
72,
11,
14891,
8,
287,
4263,
13,
198,
37811,
198,
2,
1330,
850,
12,
43789,
... | 3.295918 | 98 |
import re
| [
11748,
302,
628
] | 3.666667 | 3 |
import numpy as np
from scipy import sparse
import os
"""
NOTE
Format of the savings
NAME X_POS Y_POS HEIGHT(row) WIDTH(col)
"""
CUR_DIR = os.path.dirname(__file__)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
29877,
198,
11748,
28686,
198,
198,
37811,
198,
16580,
198,
26227,
286,
262,
10653,
198,
20608,
1395,
62,
37997,
575,
62,
37997,
11179,
9947,
7,
808,
8,
370,
2389,
4221,
7,... | 2.544118 | 68 |
"""
:mod:`redis_helpers` - Redis and other helper classes for the Bibliographic
Framework App
"""
__author__ = "Jeremy Nelson"
import csv
from aristotle.settings import REDIS_DATASTORE
def get_brief(**kwargs):
"""
Searches datastore and returns brief record from bibframe:CreativeWork,
bibframe:Instance, and bibframe:Authority datastores
:param redis_datastore: Redis bibframe:Work datastore
:param redis_datastore: Redis bibframe:Instance datastore
:param redis_datastore; Redis bibframe:Authority datastore
:param creative_work_key: Redis bibframe:CreativeWork key
:param instance_key: Redis bibframe:Instance key
"""
output, work_key, instance_keys = {},None,[]
redis_datastore = kwargs.get('redis_datastore',
REDIS_DATASTORE)
if kwargs.has_key('creative_work_key'):
work_key = kwargs.get('creative_work_key')
if kwargs.has_key('instance_key'):
instance_keys.append(kwargs.get('instance_key'))
if work_key is None:
work_key = redis_datastore.hget(instance_keys[0],
'instanceOf')
else:
if redis_datastore.hexists(work_key, 'hasInstance'):
instance_keys = [redis_datastore.hget(work_key, 'hasInstance'), ]
elif redis_datastore.exists("{0}:hasInstance".format(work_key)):
instance_keys = redis_datastore.smembers("{0}:hasInstance".format(work_key))
else:
raise ValueError("Work doesn't have an Instance")
work_title_key = redis_datastore.hget(instance_keys[0],
'title')
# Instance has a Title Entity linked to it
if redis_datastore.exists(work_title_key):
title_entity = redis_datastore.hgetall(work_title_key)
raw_title = title_entity.get('titleValue')
if title_entity.has_key('subtitle'):
raw_title = "{0} {1}".format(raw_title,
title_entity.get('subtitle'))
output["title"] = unicode(raw_title,
errors="ignore")
# Title may be stored as a literal
elif redis_datastore.hexists(instance_keys[0], "title"):
output["title"] = unicode(redis_datastore.hget(instance_keys[0],
"title"),
errors="ignore")
output['ils-bib-numbers'] = []
for instance_key in instance_keys:
output['ils-bib-numbers'].append(redis_datastore.hget("{0}:rda:identifierForTheManifestation".format(instance_key),
'ils-bib-number'))
output['creators'] = []
creator_keys = redis_datastore.smembers("{0}:rda:creator".format(work_key))
for creator_key in creator_keys:
output['creators'].append(unicode(redis_datastore.hget(creator_key,
"rda:preferredNameForThePerson"),
errors="ignore"))
return output
def get_json_linked_data(redis_datastore, redis_key):
"""
Function takes a redis_key and Redis instance, return JSON_LD of the
BIBFRAME entity
"""
ld_output = {"@context":{ "bf": "http://bibframe.org/vocab/",
"prov":"http://www.w3.org/ns/prov#",
"rda": "http://rdvocab.info",
"redis_key": None,
"result": None,
"schema":"http://schema.org/" }}
ld_output['redis_key'] = redis_key
for key, value in redis_datastore.hgetall(redis_key).iteritems():
# Assumes all values not explictly starting with "rda", "prov",
# or "schema" is part of the bf (bibframe) name-space
ld_key = None
if key == 'created_on':
ld_output['prov:Generation'] = {'prov:atTime': value }
if key.startswith('rda:')\
or key.startswith('prov')\
or key.startswith('schema'):
ld_key = key
else:
ld_key = "bf:{0}".format(key)
if ld_key is not None:
try:
ld_output[ld_key] = unicode(value)
except UnicodeDecodeError, e:
ld_output[ld_key] = unicode(value, 'iso_8859_1')
return ld_output
| [
37811,
201,
198,
1058,
4666,
25,
63,
445,
271,
62,
16794,
364,
63,
532,
2297,
271,
290,
584,
31904,
6097,
329,
262,
347,
29142,
6826,
201,
198,
25161,
2034,
201,
198,
37811,
201,
198,
834,
9800,
834,
796,
366,
35623,
12996,
1,
201,
... | 1.91617 | 2,350 |
import numpy as np
def centroid(points):
"""
Calculate the center of mass over a collection of points
:param points: The points, expressed as numpy.array
:type points: numpy.array
:return: The center of mass for the given points
:rtype: numpy.array
"""
com = np.mean(points, 0)
return com
def closest_point(v, w, p):
"""
Find the point closest to p on the line between v and w
Modified from StackOverflow at http://stackoverflow.com/a/1501725
Returns the distance and the point on the line segment between v and w
"""
vw = w - v
len_squared = np.dot(vw, vw)
if 0. == len_squared:
# Handles the case when v == w
projection = v
else:
t = max(0., min(1., np.dot((p - v), vw) / len_squared))
projection = v + t * vw
return np.linalg.norm(projection - p), projection
| [
11748,
299,
32152,
355,
45941,
628,
198,
4299,
1247,
3882,
7,
13033,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
27131,
378,
262,
3641,
286,
2347,
625,
257,
4947,
286,
2173,
628,
220,
220,
220,
1058,
17143,
2173,
25,
383,
2... | 2.564327 | 342 |
"""
"""
import sys
from typing import Tuple
from src.tree import Tree
from src.node import Node
import src.util as util
def minimize_nj_criterion(ft: Tree) -> Tuple[tuple, Node]:
"""Returns i,j for which d(i, j) - r(i) -r(j) is minimal and corresponding new node of merging i,j
Args:
ft (Tree): Tree object
Returns:
best_joins (list): list containing the joined node objects
new_node (Node): Node object of the new node
"""
active_nodes = []
for node in ft.nodes:
if node.active:
active_nodes.append(node)
min_dist = sys.float_info.max
best_join = (0, 0)
for i in active_nodes:
for j in active_nodes:
if i == j:
continue
ft.update_T()
criterion = nj_criterion(ft, i, j)
if criterion < min_dist: # if best join up until now
min_dist = criterion
best_join = (i, j) # saves best joining nodes
return best_join
def nj_criterion(ft: Tree, i: Node, j: Node) -> float:
"""
Calculates the Neighbour Joining criterion between two nodes, based on Supplement 1.
Args:
ft: Tree
i: Node
j: Node
Returns:
criterion(float)
"""
if i.leaf and j.leaf:
criterion = util.uncorrected_distance(ft, [i, j]) - util.out_distance(ft, i) - util.out_distance(ft, j)
elif i.leaf:
criterion = util.uncorrected_distance(ft, [j.leftchild, j.rightchild, i]) - util.out_distance(ft,
i) - util.out_distance(
ft, j)
elif j.leaf:
criterion = util.uncorrected_distance(ft, [i.leftchild, i.rightchild, j]) - util.out_distance(ft,
i) - util.out_distance(
ft, j)
else:
criterion = util.uncorrected_distance(ft, [i.leftchild, i.rightchild, j]) - util.out_distance(ft,
i) - util.out_distance(
ft, j)
return criterion
| [
37811,
198,
198,
37811,
198,
11748,
25064,
198,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
6738,
12351,
13,
21048,
1330,
12200,
198,
6738,
12351,
13,
17440,
1330,
19081,
198,
11748,
12351,
13,
22602,
355,
7736,
628,
198,
4299,
17775,
... | 1.930884 | 1,143 |
import api_zoom
import random, os, time, pickle, sys, importlib
from datetime import datetime
import config
get = api_zoom.get
post_json = api_zoom.post_json
if __name__ == '__main__':
os.environ['PYTHONINSPECT'] = '1'
a = Avalon()
avalon_help()
| [
11748,
40391,
62,
89,
4207,
198,
11748,
4738,
11,
28686,
11,
640,
11,
2298,
293,
11,
25064,
11,
1330,
8019,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
4566,
198,
198,
1136,
796,
40391,
62,
89,
4207,
13,
1136,
198,
7353,
62... | 2.431193 | 109 |
import numpy as np
import numba
import scipy.sparse
from functools import wraps
from itertools import chain
from collections.abc import Iterable
from ._sparse_array import SparseArray
from ._utils import check_compressed_axes, normalize_axis, check_zero_fill_value
from ._umath import elemwise
from ._coo.common import (
clip,
triu,
tril,
where,
nansum,
nanmean,
nanprod,
nanmin,
nanmax,
nanreduce,
roll,
kron,
argwhere,
isposinf,
isneginf,
result_type,
diagonal,
diagonalize,
asCOO,
linear_loc,
)
def tensordot(a, b, axes=2, *, return_type=None):
"""
Perform the equivalent of :obj:`numpy.tensordot`.
Parameters
----------
a, b : Union[COO, np.ndarray, scipy.sparse.spmatrix]
The arrays to perform the :code:`tensordot` operation on.
axes : tuple[Union[int, tuple[int], Union[int, tuple[int]], optional
The axes to match when performing the sum.
return_type : {None, COO, np.ndarray}, optional
Type of returned array.
Returns
-------
Union[COO, numpy.ndarray]
The result of the operation.
Raises
------
ValueError
If all arguments don't have zero fill-values.
See Also
--------
numpy.tensordot : NumPy equivalent function
"""
from ._compressed import GCXS
# Much of this is stolen from numpy/core/numeric.py::tensordot
# Please see license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
check_zero_fill_value(a, b)
if scipy.sparse.issparse(a):
a = GCXS.from_scipy_sparse(a)
if scipy.sparse.issparse(b):
b = GCXS.from_scipy_sparse(b)
try:
iter(axes)
except TypeError:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
# a, b = asarray(a), asarray(b) # <--- modified
as_ = a.shape
nda = a.ndim
bs = b.shape
ndb = b.ndim
equal = True
if nda == 0 or ndb == 0:
pos = int(nda != 0)
raise ValueError("Input {} operand does not have enough dimensions".format(pos))
if na != nb:
equal = False
else:
for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (-1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, -1)
oldb = [bs[axis] for axis in notin]
if any(dim == 0 for dim in chain(newshape_a, newshape_b)):
res = asCOO(np.empty(olda + oldb), check=False)
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
res = res.todense()
return res
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = _dot(at, bt, return_type)
return res.reshape(olda + oldb)
def matmul(a, b):
"""Perform the equivalent of :obj:`numpy.matmul` on two arrays.
Parameters
----------
a, b : Union[COO, np.ndarray, scipy.sparse.spmatrix]
The arrays to perform the :code:`matmul` operation on.
Returns
-------
Union[COO, numpy.ndarray]
The result of the operation.
Raises
------
ValueError
If all arguments don't have zero fill-values, or the shape of the two arrays is not broadcastable.
See Also
--------
numpy.matmul : NumPy equivalent function.
COO.__matmul__ : Equivalent function for COO objects.
"""
check_zero_fill_value(a, b)
if not hasattr(a, "ndim") or not hasattr(b, "ndim"):
raise TypeError(
"Cannot perform dot product on types %s, %s" % (type(a), type(b))
)
# When b is 2-d, it is equivalent to dot
if b.ndim <= 2:
return dot(a, b)
# when a is 2-d, we need to transpose result after dot
if a.ndim <= 2:
res = dot(a, b)
axes = list(range(res.ndim))
axes.insert(-1, axes.pop(0))
return res.transpose(axes)
# If a can be squeeze to a vector, use dot will be faster
if a.ndim <= b.ndim and np.prod(a.shape[:-1]) == 1:
res = dot(a.reshape(-1), b)
shape = list(res.shape)
shape.insert(-1, 1)
return res.reshape(shape)
# If b can be squeeze to a matrix, use dot will be faster
if b.ndim <= a.ndim and np.prod(b.shape[:-2]) == 1:
return dot(a, b.reshape(b.shape[-2:]))
if a.ndim < b.ndim:
a = a[(None,) * (b.ndim - a.ndim)]
if a.ndim > b.ndim:
b = b[(None,) * (a.ndim - b.ndim)]
for i, j in zip(a.shape[:-2], b.shape[:-2]):
if i != 1 and j != 1 and i != j:
raise ValueError("shapes of a and b are not broadcastable")
return _matmul_recurser(a, b)
def dot(a, b):
"""
Perform the equivalent of :obj:`numpy.dot` on two arrays.
Parameters
----------
a, b : Union[COO, np.ndarray, scipy.sparse.spmatrix]
The arrays to perform the :code:`dot` operation on.
Returns
-------
Union[COO, numpy.ndarray]
The result of the operation.
Raises
------
ValueError
If all arguments don't have zero fill-values.
See Also
--------
numpy.dot : NumPy equivalent function.
COO.dot : Equivalent function for COO objects.
"""
check_zero_fill_value(a, b)
if not hasattr(a, "ndim") or not hasattr(b, "ndim"):
raise TypeError(
"Cannot perform dot product on types %s, %s" % (type(a), type(b))
)
if a.ndim == 1 and b.ndim == 1:
if isinstance(a, SparseArray):
a = asCOO(a)
if isinstance(b, SparseArray):
b = asCOO(b)
return (a * b).sum()
a_axis = -1
b_axis = -2
if b.ndim == 1:
b_axis = -1
return tensordot(a, b, axes=(a_axis, b_axis))
def _memoize_dtype(f):
"""
Memoizes a function taking in NumPy dtypes.
Parameters
----------
f : Callable
Returns
-------
wrapped : Callable
Examples
--------
>>> def func(dt1):
... return object()
>>> func = _memoize_dtype(func)
>>> func(np.dtype('i8')) is func(np.dtype('int64'))
True
>>> func(np.dtype('i8')) is func(np.dtype('i4'))
False
"""
cache = {}
@wraps(f)
return wrapped
@numba.jit(nopython=True, nogil=True)
def _csr_csr_count_nnz(
out_shape, a_indices, b_indices, a_indptr, b_indptr
): # pragma: no cover
"""
A function for computing the number of nonzero values in the resulting
array from multiplying an array with compressed rows with an array
with compressed rows: (a @ b).nnz.
Parameters
----------
out_shape : tuple
The shape of the output array.
indptr : ndarray
The empty index pointer array for the output.
a_indices, a_indptr : np.ndarray
The indices and index pointer array of ``a``.
b_data, b_indices, b_indptr : np.ndarray
The indices and index pointer array of ``b``.
"""
n_row, n_col = out_shape
nnz = 0
mask = np.full(n_col, -1)
for i in range(n_row):
row_nnz = 0
for j in a_indices[a_indptr[i] : a_indptr[i + 1]]:
for k in b_indices[b_indptr[j] : b_indptr[j + 1]]:
if mask[k] != i:
mask[k] = i
row_nnz += 1
nnz += row_nnz
return nnz
@numba.jit(nopython=True, nogil=True)
def _csr_ndarray_count_nnz(
out_shape, indptr, a_indices, a_indptr, b
): # pragma: no cover
"""
A function for computing the number of nonzero values in the resulting
array from multiplying an array with compressed rows with a dense
numpy array: (a @ b).nnz.
Parameters
----------
out_shape : tuple
The shape of the output array.
indptr : ndarray
The empty index pointer array for the output.
a_indices, a_indptr : np.ndarray
The indices and index pointer array of ``a``.
b : np.ndarray
The second input array ``b``.
"""
nnz = 0
for i in range(out_shape[0]):
cur_row = a_indices[a_indptr[i] : a_indptr[i + 1]]
for j in range(out_shape[1]):
for k in cur_row:
if b[k, j] != 0:
nnz += 1
break
indptr[i + 1] = nnz
return nnz
@numba.jit(nopython=True, nogil=True)
def _csc_ndarray_count_nnz(
a_shape, b_shape, indptr, a_indices, a_indptr, b
): # pragma: no cover
"""
A function for computing the number of nonzero values in the resulting
array from multiplying an array with compressed columns with a dense
numpy array: (a @ b).nnz.
Parameters
----------
a_shape, b_shape : tuple
The shapes of the input arrays.
indptr : ndarray
The empty index pointer array for the output.
a_indices, a_indptr : np.ndarray
The indices and index pointer array of ``a``.
b : np.ndarray
The second input array ``b``.
"""
nnz = 0
mask = np.full(a_shape[0], -1)
for i in range(b_shape[1]):
col_nnz = 0
for j in range(b_shape[0]):
for k in a_indices[a_indptr[j] : a_indptr[j + 1]]:
if b[j, i] != 0 and mask[k] != i:
mask[k] = i
col_nnz += 1
nnz += col_nnz
indptr[i + 1] = nnz
return nnz
@_memoize_dtype
@_memoize_dtype
@_memoize_dtype
@_memoize_dtype
@_memoize_dtype
@_memoize_dtype
@_memoize_dtype
@_memoize_dtype
@_memoize_dtype
@_memoize_dtype
@_memoize_dtype
def stack(arrays, axis=0, compressed_axes=None):
"""
Stack the input arrays along the given dimension.
Parameters
----------
arrays : Iterable[SparseArray]
The input arrays to stack.
axis : int, optional
The axis along which to stack the input arrays.
compressed_axes : iterable, optional
The axes to compress if returning a GCXS array.
Returns
-------
SparseArray
The output stacked array.
Raises
------
ValueError
If all elements of :code:`arrays` don't have the same fill-value.
See Also
--------
numpy.stack : NumPy equivalent function
"""
from ._coo import COO
if any(isinstance(arr, COO) for arr in arrays):
from ._coo import stack as coo_stack
return coo_stack(arrays, axis)
else:
from ._compressed import stack as gcxs_stack
return gcxs_stack(arrays, axis, compressed_axes)
def concatenate(arrays, axis=0, compressed_axes=None):
"""
Concatenate the input arrays along the given dimension.
Parameters
----------
arrays : Iterable[SparseArray]
The input arrays to concatenate.
axis : int, optional
The axis along which to concatenate the input arrays. The default is zero.
compressed_axes : iterable, optional
The axes to compress if returning a GCXS array.
Returns
-------
SparseArray
The output concatenated array.
Raises
------
ValueError
If all elements of :code:`arrays` don't have the same fill-value.
See Also
--------
numpy.concatenate : NumPy equivalent function
"""
from ._coo import COO
if any(isinstance(arr, COO) for arr in arrays):
from ._coo import concatenate as coo_concat
return coo_concat(arrays, axis)
else:
from ._compressed import concatenate as gcxs_concat
return gcxs_concat(arrays, axis, compressed_axes)
def eye(N, M=None, k=0, dtype=float, format="coo", compressed_axes=None):
"""Return a 2-D array in the specified format with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
format : str, optional
A format string.
compressed_axes : iterable, optional
The axes to compress if returning a GCXS array.
Returns
-------
I : SparseArray of shape (N, M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
Examples
--------
>>> eye(2, dtype=int).todense() # doctest: +NORMALIZE_WHITESPACE
array([[1, 0],
[0, 1]])
>>> eye(3, k=1).todense() # doctest: +SKIP
array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
from sparse import COO
if M is None:
M = N
N = int(N)
M = int(M)
k = int(k)
data_length = min(N, M)
if k > 0:
data_length = max(min(data_length, M - k), 0)
n_coords = np.arange(data_length, dtype=np.intp)
m_coords = n_coords + k
elif k < 0:
data_length = max(min(data_length, N + k), 0)
m_coords = np.arange(data_length, dtype=np.intp)
n_coords = m_coords - k
else:
n_coords = m_coords = np.arange(data_length, dtype=np.intp)
coords = np.stack([n_coords, m_coords])
data = np.array(1, dtype=dtype)
return COO(
coords, data=data, shape=(N, M), has_duplicates=False, sorted=True
).asformat(format, compressed_axes=compressed_axes)
def full(shape, fill_value, dtype=None, format="coo", compressed_axes=None):
"""Return a SparseArray of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar
Fill value.
dtype : data-type, optional
The desired data-type for the array. The default, `None`, means
`np.array(fill_value).dtype`.
format : str, optional
A format string.
compressed_axes : iterable, optional
The axes to compress if returning a GCXS array.
Returns
-------
out : SparseArray
Array of `fill_value` with the given shape and dtype.
Examples
--------
>>> full(5, 9).todense() # doctest: +NORMALIZE_WHITESPACE
array([9, 9, 9, 9, 9])
>>> full((2, 2), 9, dtype=float).todense() # doctest: +SKIP
array([[9., 9.],
[9., 9.]])
"""
from sparse import COO
if dtype is None:
dtype = np.array(fill_value).dtype
if not isinstance(shape, tuple):
shape = (shape,)
if compressed_axes is not None:
check_compressed_axes(shape, compressed_axes)
data = np.empty(0, dtype=dtype)
coords = np.empty((len(shape), 0), dtype=np.intp)
return COO(
coords,
data=data,
shape=shape,
fill_value=fill_value,
has_duplicates=False,
sorted=True,
).asformat(format, compressed_axes=compressed_axes)
def full_like(a, fill_value, dtype=None, format=None, compressed_axes=None):
"""Return a full array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of the result will match those of `a`.
dtype : data-type, optional
Overrides the data type of the result.
format : str, optional
A format string.
compressed_axes : iterable, optional
The axes to compress if returning a GCXS array.
Returns
-------
out : SparseArray
Array of `fill_value` with the same shape and type as `a`.
Examples
--------
>>> x = np.ones((2, 3), dtype='i8')
>>> full_like(x, 9.0).todense() # doctest: +NORMALIZE_WHITESPACE
array([[9, 9, 9],
[9, 9, 9]])
"""
if format is None and not isinstance(a, np.ndarray):
format = type(a).__name__.lower()
else:
format = "coo"
if hasattr(a, "compressed_axes") and compressed_axes is None:
compressed_axes = a.compressed_axes
return full(
a.shape,
fill_value,
dtype=(a.dtype if dtype is None else dtype),
format=format,
compressed_axes=compressed_axes,
)
def zeros(shape, dtype=float, format="coo", compressed_axes=None):
"""Return a SparseArray of given shape and type, filled with zeros.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
format : str, optional
A format string.
compressed_axes : iterable, optional
The axes to compress if returning a GCXS array.
Returns
-------
out : SparseArray
Array of zeros with the given shape and dtype.
Examples
--------
>>> zeros(5).todense() # doctest: +SKIP
array([0., 0., 0., 0., 0.])
>>> zeros((2, 2), dtype=int).todense() # doctest: +NORMALIZE_WHITESPACE
array([[0, 0],
[0, 0]])
"""
if compressed_axes is not None:
check_compressed_axes(shape, compressed_axes)
return full(shape, 0, np.dtype(dtype)).asformat(
format, compressed_axes=compressed_axes
)
def zeros_like(a, dtype=None, format=None, compressed_axes=None):
"""Return a SparseArray of zeros with the same shape and type as ``a``.
Parameters
----------
a : array_like
The shape and data-type of the result will match those of `a`.
dtype : data-type, optional
Overrides the data type of the result.
format : str, optional
A format string.
compressed_axes : iterable, optional
The axes to compress if returning a GCXS array.
Returns
-------
out : SparseArray
Array of zeros with the same shape and type as `a`.
Examples
--------
>>> x = np.ones((2, 3), dtype='i8')
>>> zeros_like(x).todense() # doctest: +NORMALIZE_WHITESPACE
array([[0, 0, 0],
[0, 0, 0]])
"""
if format is None and not isinstance(a, np.ndarray):
format = type(a).__name__.lower()
elif format is None:
format = "coo"
if hasattr(a, "compressed_axes") and compressed_axes is None:
compressed_axes = a.compressed_axes
return zeros(
a.shape,
dtype=(a.dtype if dtype is None else dtype),
format=format,
compressed_axes=compressed_axes,
)
def ones(shape, dtype=float, format="coo", compressed_axes=None):
"""Return a SparseArray of given shape and type, filled with ones.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
format : str, optional
A format string.
compressed_axes : iterable, optional
The axes to compress if returning a GCXS array.
Returns
-------
out : SparseArray
Array of ones with the given shape and dtype.
Examples
--------
>>> ones(5).todense() # doctest: +SKIP
array([1., 1., 1., 1., 1.])
>>> ones((2, 2), dtype=int).todense() # doctest: +NORMALIZE_WHITESPACE
array([[1, 1],
[1, 1]])
"""
if compressed_axes is not None:
check_compressed_axes(shape, compressed_axes)
return full(shape, 1, np.dtype(dtype)).asformat(
format, compressed_axes=compressed_axes
)
def ones_like(a, dtype=None, format=None, compressed_axes=None):
"""Return a SparseArray of ones with the same shape and type as ``a``.
Parameters
----------
a : array_like
The shape and data-type of the result will match those of `a`.
dtype : data-type, optional
Overrides the data type of the result.
format : str, optional
A format string.
compressed_axes : iterable, optional
The axes to compress if returning a GCXS array.
Returns
-------
out : SparseArray
Array of ones with the same shape and type as `a`.
Examples
--------
>>> x = np.ones((2, 3), dtype='i8')
>>> ones_like(x).todense() # doctest: +NORMALIZE_WHITESPACE
array([[1, 1, 1],
[1, 1, 1]])
"""
if format is None and not isinstance(a, np.ndarray):
format = type(a).__name__.lower()
else:
format = "coo"
if hasattr(a, "compressed_axes") and compressed_axes is None:
compressed_axes = a.compressed_axes
return ones(
a.shape,
dtype=(a.dtype if dtype is None else dtype),
format=format,
compressed_axes=compressed_axes,
)
def outer(a, b, out=None):
"""
Return outer product of two sparse arrays.
Parameters
----------
a, b : sparse.SparseArray
The input arrays.
out : sparse.SparseArray
The output array.
Examples
--------
>>> import numpy as np
>>> import sparse
>>> a = sparse.COO(np.arange(4))
>>> o = sparse.outer(a, a)
>>> o.todense()
array([[0, 0, 0, 0],
[0, 1, 2, 3],
[0, 2, 4, 6],
[0, 3, 6, 9]])
"""
from sparse import SparseArray, COO
if isinstance(a, SparseArray):
a = COO(a)
if isinstance(b, SparseArray):
b = COO(b)
return np.multiply.outer(a.flatten(), b.flatten(), out=out)
def asnumpy(a, dtype=None, order=None):
"""Returns a dense numpy array from an arbitrary source array.
Args:
a: Arbitrary object that can be converted to :class:`numpy.ndarray`.
order ({'C', 'F', 'A'}): The desired memory layout of the output
array. When ``order`` is 'A', it uses 'F' if ``a`` is
fortran-contiguous and 'C' otherwise.
Returns:
numpy.ndarray: Converted array on the host memory.
"""
from ._sparse_array import SparseArray
if isinstance(a, SparseArray):
a = a.todense()
return np.array(a, dtype=dtype, copy=False, order=order)
# this code was taken from numpy.moveaxis
# (cf. numpy/core/numeric.py, lines 1340-1409, v1.18.4)
# https://github.com/numpy/numpy/blob/v1.18.4/numpy/core/numeric.py#L1340-L1409
def moveaxis(a, source, destination):
"""
Move axes of an array to new positions.
Other axes remain in their original order.
Parameters
----------
a : COO
The array whose axes should be reordered.
source : int or List[int]
Original positions of the axes to move. These must be unique.
destination : int or List[int]
Destination positions for each of the original axes. These must also be unique.
Returns
-------
COO
Array with moved axes.
Examples
--------
>>> import numpy as np
>>> import sparse
>>> x = sparse.COO.from_numpy(np.ones((2, 3, 4, 5)))
>>> sparse.moveaxis(x, (0, 1), (2, 3))
<COO: shape=(4, 5, 2, 3), dtype=float64, nnz=120, fill_value=0.0>
"""
if not isinstance(source, Iterable):
source = (source,)
if not isinstance(destination, Iterable):
destination = (destination,)
source = normalize_axis(source, a.ndim)
destination = normalize_axis(destination, a.ndim)
if len(source) != len(destination):
raise ValueError(
"`source` and `destination` arguments must have "
"the same number of elements"
)
order = [n for n in range(a.ndim) if n not in source]
for dest, src in sorted(zip(destination, source)):
order.insert(dest, src)
result = a.transpose(order)
return result
| [
11748,
299,
32152,
355,
45941,
198,
11748,
997,
7012,
198,
11748,
629,
541,
88,
13,
82,
29572,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
6738,
17268,
13,
39305,
1330,
40806,
540,
198,
198,
... | 2.251762 | 10,923 |
from collections import defaultdict
from typing import Dict
from typing import List
import numpy as np
class ArrayStore:
"""Storage class for keeping track of arrays."""
class LiFoStack:
"""Last in first out stack for numpy ndarrays."""
| [
6738,
17268,
1330,
4277,
11600,
198,
6738,
19720,
1330,
360,
713,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
299,
32152,
355,
45941,
628,
198,
4871,
15690,
22658,
25,
198,
220,
220,
220,
37227,
31425,
1398,
329,
5291,
2610,
286,
265... | 3.521127 | 71 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat May 11 21:41:44 2019
@author: xiaodanchen
"""
from util_code import data_preprocessing
from util_code import lstm_train | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
7031,
1737,
1367,
2310,
25,
3901,
25,
2598,
13130,
198,
198,
31,
9800,
25,
2124,
544,
375... | 2.633803 | 71 |
import pytest
from schemathesis.stateful import ParsedData
from schemathesis.utils import NOT_SET
@pytest.mark.parametrize(
"parameters, body", (({"a": 1}, None), ({"a": 1}, NOT_SET), ({"a": 1}, {"value": 1}), ({"a": 1}, [1, 2, 3]))
)
| [
11748,
12972,
9288,
198,
198,
6738,
3897,
6759,
8497,
13,
5219,
913,
1330,
23042,
276,
6601,
198,
6738,
3897,
6759,
8497,
13,
26791,
1330,
5626,
62,
28480,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
7,
198,
220,
... | 2.494845 | 97 |
#(C) Copyright Syd Logan 2017-2020
#(C) Copyright Thousand Smiles Foundation 2017-2020
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
unit tests for consent application. Assumes django server is up
and running on the specific host and port
'''
import unittest
import getopt, sys
import json
from tschartslib.service.serviceapi import ServiceAPI
from tschartslib.tscharts.tscharts import Login, Logout
from tschartslib.patient.patient import CreatePatient, DeletePatient
from tschartslib.clinic.clinic import CreateClinic, DeleteClinic
from tschartslib.register.register import CreateRegistration, GetRegistration, UpdateRegistration, DeleteRegistration
if __name__ == "__main__":
main()
| [
2,
7,
34,
8,
15069,
11415,
22221,
2177,
12,
42334,
198,
2,
7,
34,
8,
15069,
39255,
2439,
2915,
5693,
2177,
12,
42334,
198,
2,
198,
2,
26656,
15385,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,... | 3.62614 | 329 |
import numpy as np
import cv2
import glob
import imageio
connections = [
(0,1),
(0,2),
(3,5),
(4,6),
(5,7),
(6,8)
]
for txtfile in glob.glob("data/LH7/**/*.txt"):
fname = txtfile.split("/")[-1].split(".")[0]
folder = "/".join(txtfile.split("/")[:-1])
images = []
with open(txtfile) as fp:
content = fp.readlines()
content_combined = ",".join(content).replace("\n","")
line_lst = content_combined.split(",")
line_lst = [float(n) for n in line_lst]
v = iter(line_lst)
li = [(i, next(v), next(v)) for i in v] # creates list of tuples
min_x = min([pnt[0] for pnt in li])-0.1
min_y = min([pnt[1] for pnt in li])-0.1
max_x = max([pnt[0] for pnt in li])+0.1
max_y = max([pnt[1] for pnt in li])+0.1
w_x = abs(max_x-min_x) + 0.1
w_y = abs(max_y-min_y) + 0.1
for cnt, line in enumerate(content):
line_lst = line.split("\n")[0].split(",")
line_lst = [float(n) for n in line_lst]
v = iter(line_lst)
li = [(i, next(v), next(v)) for i in v] # creates list of tuples
print(cnt, li)
scale = 100
image = np.ones((int(w_y*100),int(w_x*100),3))*255
print(min_x, max_x, w_x)
print(min_y, max_y, w_y)
pnts = []
for pnt in li:
x = int((pnt[0]-min_x)*100)
y = int((pnt[1]-min_y)*100)
print(x,y)
if pnt[0] != 0 or pnt[1] != 0:
image = cv2.circle(image, (x,y), radius=2, color=(0, 0, 255), thickness=-1)
# cv2.imwrite(f"{fname}-{cnt}.png",image)
pnts.append([x,y])
else:
pnts.append([-1,-1])
for conn in connections:
x1, y1 = pnts[conn[0]]
x2, y2 = pnts[conn[1]]
if not -1 in [x1, x2, y1, y2]:
image = cv2.line(image, (x1, y1), (x2, y2), (0, 255, 0), thickness=2)
images.append(image.astype(np.uint8))
with imageio.get_writer(f'{folder}/{fname}-skel.gif', mode='I') as writer:
for image in images:
writer.append_data(image)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
15095,
198,
11748,
2939,
952,
198,
198,
8443,
507,
796,
685,
198,
220,
220,
220,
357,
15,
11,
16,
828,
198,
220,
220,
220,
357,
15,
11,
17,
828,
198,
220,
220,
... | 1.623663 | 1,496 |
from etldjango.settings import DEBUG
from .utiles import get_client_and_log_resource
import io
import logging
import json
client, _LOG_RESOURCE = get_client_and_log_resource()
| [
6738,
2123,
335,
73,
14208,
13,
33692,
1330,
16959,
198,
6738,
764,
315,
2915,
1330,
651,
62,
16366,
62,
392,
62,
6404,
62,
31092,
198,
11748,
33245,
198,
11748,
18931,
198,
11748,
33918,
628,
198,
16366,
11,
4808,
25294,
62,
19535,
3... | 3.314815 | 54 |