content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from tuprolog import logger
# noinspection PyUnresolvedReferences
import jpype.imports
from tuprolog.core import Term, Clause, Integer
from tuprolog.solve import ExecutionContext, Signature, Solution, current_time_instant, MAX_TIMEOUT
# from tuprolog.solve.sideffcts import SideEffect
from tuprolog.pyutils import iterable_or_varargs
from tuprolog.jvmutils import jlist
from typing import List, Iterable, Callable
# noinspection PyUnresolvedReferences
import it.unibo.tuprolog.solve.primitive as _primitive
Primitive = _primitive.Primitive
Solve = _primitive.Solve
PrimitiveWrapper = _primitive.PrimitiveWrapper
SolveRequest = Solve.Request
SolveResponse = Solve.Request
@jpype.JImplements(Primitive)
logger.debug("Loaded JVM classes from it.unibo.tuprolog.solve.primitive.*")
| [
6738,
256,
929,
40329,
1330,
49706,
198,
2,
645,
1040,
14978,
9485,
3118,
411,
5634,
19927,
198,
11748,
474,
79,
2981,
13,
320,
3742,
198,
6738,
256,
929,
40329,
13,
7295,
1330,
35118,
11,
28081,
11,
34142,
198,
6738,
256,
929,
40329,... | 3.071429 | 266 |
import cv2
import sys
import numpy as np
import threading
import repair_last as rp
global find
find=False
global mejor_x_inicial
mejor_x_inicial = 0
global mejor_y_inicial
mejor_y_inicial = 0
# global resultado1
# resultado1=[]
# global resultado2
# resultado2=[]
# global resultado3
# resultado3=[]
# global resultado4
# resultado4=[]
pathIn = 'piano144.mp4'
sec = 0
frameRate = 1
alpha = 0.9675
beta = (1.0 - alpha)
pathOut = 'video.avi'
fps = 15
vidcap = cv2.VideoCapture(pathIn)
vidcap.set(cv2.CAP_PROP_POS_MSEC, sec*1000)
success, image = vidcap.read()
img = cv2.imread('open.png')
if success:
height, width, layers = image.shape
size = (width, height)
global src1
src1 = image
r = cv2.selectROI(src1)
imCrop = src1[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
cv2.imshow("Image", imCrop)
cv2.waitKey(1)
sec=33
# sec = round(sec + frameRate, 2)
vidcap.set(cv2.CAP_PROP_POS_MSEC, 1*1000)
success, image = vidcap.read()
if success:
src2 = image
print(r)
print(f'filas:{src1.shape[0]},columnas:{src1.shape[1]}')
# r (xinicial, yinicial, xfinal , yfinalc)
# r (columnas, filas, xfinal , yfinalc)
print(r)
print(imCrop.shape[1])
print(imCrop.shape[0])
print(f'rangodecolumnas:{imCrop.shape[1]*5}')
margenenfilas=imCrop.shape[0]*4
margenencolumnas=imCrop.shape[0]*4
print(np.sum((imCrop[1][1]-src1[1][1])**2))
print(np.power(np.sum(np.subtract(imCrop[1][1], src1[1][1])), 2))
print(f'rangodefilas:{r[0]-margenenfilas},{(r[0]+r[2])+margenenfilas}')
print(f'rangodecolumnas:{r[1]-margenencolumnas},{(r[1]+r[3])+margenencolumnas}')
cv2.imshow("alineada",rp.alinear(src2,imCrop,r))
cv2.waitKey(0)
| [
11748,
269,
85,
17,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4704,
278,
220,
198,
11748,
9185,
62,
12957,
355,
374,
79,
198,
198,
20541,
1064,
198,
19796,
28,
25101,
198,
20541,
502,
73,
273,
62,
87,
62,
25... | 2.127907 | 774 |
from typing import Union
import psycopg2
from lib.config import Config, MODE_WORKER
from lib.platform import Platform
from lib.platforms.steam import init_platform as init_steam
from lib.platforms.retroachievements import init_platform as init_retro
from lib.player import STATUS_VALID, Player
from .log import get_logger
global load_log
| [
6738,
19720,
1330,
4479,
198,
198,
11748,
17331,
22163,
70,
17,
198,
198,
6738,
9195,
13,
11250,
1330,
17056,
11,
337,
16820,
62,
33249,
1137,
198,
6738,
9195,
13,
24254,
1330,
19193,
198,
6738,
9195,
13,
24254,
82,
13,
21465,
1330,
2... | 3.510204 | 98 |
"""Spinbox module."""
from PyQt6 import QtWidgets # type: ignore[import]
class SpinBox(QtWidgets.QWidget):
"""SpinBox class."""
_label: "QtWidgets.QLabel"
_spin_box: "QtWidgets.QSpinBox"
_lay: "QtWidgets.QHBoxLayout"
def __init__(self, parent=None):
"""Initialize."""
super().__init__(parent)
self._lay = QtWidgets.QHBoxLayout(self)
self._label = QtWidgets.QLabel()
self._spin_box = QtWidgets.QSpinBox()
self._lay.addWidget(self._label)
self._lay.addWidget(self._spin_box)
def getMax(self) -> int:
"""Return Maximum."""
return self._spin_box.maximum()
def setMax(self, max: int) -> None:
"""Set Maximum."""
self._spin_box.setMaximum(max)
def getMin(self) -> int:
"""Return Minimum."""
return self._spin_box.minimum()
def setMin(self, min: int) -> None:
"""Set Minimum."""
self._spin_box.setMinimum(min)
def getValue(self) -> int:
"""Return value."""
return self._spin_box.value()
def setValue(self, value: int) -> None:
"""Set Minimum."""
self._spin_box.setValue(value)
def getLabel(self) -> str:
"""Return label."""
return self._label.text()
def setLabel(self, label: str) -> None:
"""Set label."""
self._label.setText(label)
maximum: int = property(getMax, setMax) # type: ignore [assignment] # noqa: F821
minimum: int = property(getMin, setMin) # type: ignore [assignment] # noqa: F821
value: int = property(getValue, setValue) # type: ignore [assignment] # noqa: F821
label: str = property(getLabel, setLabel) # type: ignore [assignment] # noqa: F821
| [
37811,
4561,
259,
3524,
8265,
526,
15931,
198,
198,
6738,
9485,
48,
83,
21,
1330,
33734,
54,
312,
11407,
220,
1303,
2099,
25,
8856,
58,
11748,
60,
628,
198,
4871,
28002,
14253,
7,
48,
83,
54,
312,
11407,
13,
48,
38300,
2599,
198,
... | 2.292715 | 755 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.891892 | 37 |
#!/usr/bin/env python
# generates list of IP addresses from range IP start - IP stop
#
import sys
usage = """
### IP range address generator
### by bl4de | twiiter.com/_bl4de | hackerone.com/bl4de | github.com/bl4de
Generates list of IP addresses, starting from IP passed as first argument and ended up with
IP address passed as second argument.
Sample usage: to generate all IPs between 192.168.1.1 and 192.168.2.255 (512 addresses):
./ip_generator.py 192.168.1.1 192.168.2.255
"""
if __name__ == "__main__":
f = open("ip_list.log", "w+")
if len(sys.argv) != 3:
help()
exit(0)
start = sys.argv[1].split(".")
stop = sys.argv[2].split(".")
print "\n[+] generating IP addresses in range from {} to {}...".format(sys.argv[1], sys.argv[2])
generate(start, stop, f)
print "[+] addresses generated...\n"
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
18616,
1351,
286,
6101,
9405,
422,
2837,
6101,
923,
532,
6101,
2245,
198,
2,
198,
11748,
25064,
198,
198,
26060,
796,
37227,
198,
21017,
6101,
2837,
2209,
17301,
198,
21017,
416,
69... | 2.648148 | 324 |
import argparse
import time
import matplotlib as mpl
mpl.use("agg")
import numpy
import matplotlib.pyplot as plt
import emcee
import corner
import math
import sys
import pickle
from multiprocessing import Pool
from astropy.io import fits
#from emcee.utils import MPIPool
#from pathos.multiprocessing import Pool
debug = True
num_dithers = None
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Script to run MCMC fitter on the simulated data")
parser.add_argument("--filename", dest="filename", default="example_result.fits")
parser.add_argument("--fiber-no", dest="fiber", default=0, type=int)
parsed_args = parser.parse_args()
start_time = time.time()
filename = parsed_args.filename
fibers = [parsed_args.fiber]
fiber_radius = 55.
# the ones below may not be needed
x = None
y = None
yerr = None
fibers = fibers if fibers is not None else [1, 2]
hdus = fits.open(filename)
data = hdus[1].data
known_offset_x = data['known_offset_x'][fibers] # of size num_fibers
known_offset_y = data['known_offset_y'][fibers] # of size num_fibers
dither_pos_x = data['dither_pos_x'][fibers] # of size num_fibersx9
dither_pos_y = data['dither_pos_y'][fibers] # of size num_fibersx9
signals = data['calc_signals'][fibers] # of size num_fibersx9
snrs = data['calc_snrs'][fibers] # of size num_fibersx9
est_pos_x = data['calc_offset_x'][fibers] # of size num_fibers
est_pos_y = data['calc_offset_y'][fibers] # of size num_fibers
est_sigma = data['calc_sigma_x'][fibers] # of size num_fibers
num_fibers = len(fibers)
num_dithers = len(dither_pos_x[0])
num_params = num_fibers * 3 + num_dithers
if debug:
print("number of fibers in the simulation: {}".format(num_fibers))
print("number of dithers in the simulations: {}".format(num_dithers))
print("number of unknowns to solve: {}".format(num_params))
sampler = create_sampler()
print("sampler generated")
samples = run_MCMC(sampler)
print("total time elapsed for MCMC: {}".format(time.time()-start_time))
save_results(samples)
plot_results(samples)
| [
198,
11748,
1822,
29572,
198,
11748,
640,
198,
198,
11748,
2603,
29487,
8019,
355,
285,
489,
198,
76,
489,
13,
1904,
7203,
9460,
4943,
198,
198,
11748,
299,
32152,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
117... | 2.39381 | 937 |
"""
多线程
threading
"""
import threading, time
from threading import Thread
print('thread %s is run...' % threading.current_thread().name)
t = Thread(target=loop, name='LoopThread')
t.start()
t.join() # 同步
print('thread %s done' % threading.current_thread().name)
| [
37811,
198,
13783,
248,
163,
118,
123,
163,
101,
233,
198,
16663,
278,
198,
37811,
198,
11748,
4704,
278,
11,
640,
198,
6738,
4704,
278,
1330,
14122,
628,
198,
4798,
10786,
16663,
4064,
82,
318,
1057,
986,
6,
4064,
4704,
278,
13,
14... | 2.731959 | 97 |
import logging as log
| [
198,
11748,
18931,
355,
2604,
628
] | 4 | 6 |
from gatekeeping.db import get_db
from flask import abort
| [
6738,
8946,
19934,
13,
9945,
1330,
651,
62,
9945,
198,
6738,
42903,
1330,
15614,
198
] | 3.866667 | 15 |
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce.core.memories import Queue
class Latest(Queue):
"""
Memory which always retrieves most recent experiences.
"""
def __init__(self, states, internals, actions, include_next_states, capacity, scope='latest', summary_labels=None):
"""
Latest memory.
Args:
states: States specifiction.
internals: Internal states specification.
actions: Actions specification.
include_next_states: Include subsequent state if true.
capacity: Memory capacity.
"""
super(Latest, self).__init__(
states=states,
internals=internals,
actions=actions,
include_next_states=include_next_states,
capacity=capacity,
scope=scope,
summary_labels=summary_labels
)
| [
2,
15069,
2177,
19594,
13,
952,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
137... | 2.966312 | 564 |
# -*- coding: utf-8 -*-
"""Testing code for the (Python) UCB bandit policies.
**Files in this package**
* :mod:`moe.tests.bandit.ucb.ucb_test_case`: base test case for UCB tests
* :mod:`moe.tests.bandit.ucb.ucb1_test`: tests for :mod:`moe.bandit.ucb.ucb1.UCB1`
* :mod:`moe.tests.bandit.ucb.ucb1_tuned_test`: tests for :mod:`moe.bandit.ucb.ucb1_tuned.UCB1Tuned`
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
44154,
2438,
329,
262,
357,
37906,
8,
14417,
33,
4097,
270,
4788,
13,
198,
198,
1174,
25876,
287,
428,
5301,
1174,
198,
9,
1058,
4666,
25,
63,
76,
2577,
13,
4... | 2.109195 | 174 |
import sys
#this code asks the user to input the rental code B, D, or W. as well as if the user inputs a lower case letter, changes it
#to uppercase to match the code.
rentalCode = input("(B)udget, (D)aily, or (W)eekly rental?\n").upper()
#this block of code provides a loop wherein if the user inputs anything besides B, D, or W, it asks the user to re-enter the
#correct code. It also ensures that when the correct input is entered, it stops the loop and proceeds to the next block of code.
while rentalCode not in ("W", "D", "B", "w", "d", "b"):
print("rental code not valid, please re-enter the rental code.")
rentalCode = input.upper("(B)udget, (D)aily, or (W)eekly rental?\n")
if rentalCode in ("W", "D", "B"):
break;
#this block of code say for each letter code is a seperate input depending on the code entered.
#for each letter, the variable changes based on what the user enters, and then the system asks for another variable entered by
#the user.
if rentalCode == "W":
rentalPeriod = input("Number of weeks rented:\n");
elif rentalCode == "B":
rentalPeriod = input("Number of Days Rented:\n");
elif rentalCode == "D":
rentalPeriod = input("Number of Days Rented:\n");
#this block assigns the cost for each rental code.
budget_charge = 40.00
daily_charge = 60.00
weekly_charge = 190.00
#this block indicates the different calculations for each rental code and assigns the solution to the variable baseCharge
#based on the input of the user.
if rentalCode == "B":
baseCharge = int(rentalPeriod) * int(budget_charge)
elif rentalCode == "D":
baseCharge = int(rentalPeriod) * int(daily_charge)
elif rentalCode == "W":
baseCharge = int(rentalPeriod) * int(weekly_charge)
#this block prompts the user to input the starting and ending mileage and assigns them to the variable odoStart and odoEnd.
odoStart = input("Starting odometer reading:\n")
odoEnd = input("Ending odometer reading:\n")
#this block of code calculates the number of miles driven while the vehicle was rented
#and assigns the number to the variable totalMiles.
totalMiles = int(odoEnd) - int(odoStart)
#this code states that if the user inputs B for the rental code, then the mile charge is calculated.
if rentalCode == "B":
mileCharge = float(totalMiles) * 0.25;
#this block of code states if the user inputs D as the rental code, then the average
#day miles is calculated. if the average day miles is less than or equal to 100, then the #mile charge is 0. if it is more,
#than 100 is subtracted from it, and the calculation of
#.25 cents per mile times the number of days is calculated an assigned to the variable.
#mileCharge.
elif rentalCode == "D":
averageDayMiles = float(totalMiles)/float(rentalPeriod);
if float(averageDayMiles) <= 100:
extraMiles = 0;
else:
extraMiles = float(averageDayMiles) - 100;
mileCharge = (.25 * float(extraMiles)) * float(rentalPeriod);
#this block of code states that if the user inputed W for the rental code, then the
#average weekly miles is calculated. if the average weekly miles is less than or equal
#to 900, the mile chwrge is 0.if it is more, the calculation of 100 times the number of #weeks rented is computed and assigned
# to the variable mile charge.
elif rentalCode == "W":
averageWeekMiles = float(totalMiles)/ float(rentalPeriod);
if averageWeekMiles <= 900:
mileCharge = 0;
else:
mileCharge = 100 * float(rentalPeriod);
#this code calculates the base charge + the mile charge and assigns it to the variable
#amtdue for the cost.
amtDue = float(baseCharge) + float(mileCharge)
#this block of code prints out a 'reciept' to the customer with a summary of charges.
print("Rental Summary")
print("Rental Code: " + str(rentalCode))
print("Rental Period: " + str(rentalPeriod))
print("Starting Odometer: " + str(odoStart))
print("Ending Odometer: " + str(odoEnd))
print("Miles Driven: " + str(totalMiles))
print("Amount Due: " + "$" + str(amtDue) + '0')
| [
11748,
25064,
198,
198,
2,
5661,
2438,
7893,
262,
2836,
284,
5128,
262,
14447,
2438,
347,
11,
360,
11,
393,
370,
13,
355,
880,
355,
611,
262,
2836,
17311,
257,
2793,
1339,
3850,
11,
2458,
340,
220,
198,
2,
1462,
334,
39921,
589,
2... | 3.245329 | 1,231 |
#how to get a measure of batting or bowling strength? why it's simple! take top-100 rankings list at given period...
#what is score held by #1 player? normalise against this score (i.e. this score now equivalent to 1)
#now add up top 8 normalised scores for each nation's top 8 players in top 100 (set x: 0 <= x <= 1) .
#(8 an arbitrary number yes, but seems to be fair, on the evidence (8 players in top 100 means good depth, but above 8...
#we just bias nations that chop and change, and ingrain bias against minnows))
#now we normalise against nation with highest total of normalised scores. this nation has overall strength 1. others have 0.a
import csv
maindict = { "AUS":[0,0], "SA":[0,0], "IND":[0,0], "NZ":[0,0], "WI":[0,0], "PAK":[0,0], "BAN": [0,0], "AFG": [0,0], "SL": [0,0], "IRE": [0,0], "ZIM": [0,0], "ENG": [0,0], "SCO": [0,0], "CAN": [0,0], "NED": [0,0], "KEN": [0,0]}
j = 0
topValue = 0
with open('[insert cute name for list of 100 rankings here]') as file:
reader = csv.reader(file)
for row in reader:
if j == 0:
j += 1
continue
if j == 1:
topValue = int(row[1])
print('[insert cute heading for output here]\n' + row[1])
j += 1
#excluding the super miniature minnows... these three countries are the ones to leave out from 2011-2013.
#but the last three in the dict should be swapped with these three for the years 2014-the present
if row[3] == 'P.N.G.' or row[3] == 'U.A.E.' or row[3] == 'H.K.':
continue
x = maindict[row[3]]
normalValue = int(row[1])/topValue
if x[1] < 8:
x[0] += normalValue
x[1] += 1
#printing the dict with raw scores of each nation as first element followed by # of players who contributed to the score
print(maindict)
print('\n')
biggestValue = 0
for entries in maindict:
x = maindict[entries][0]
if x > biggestValue:
biggestValue = x
for entries in maindict:
#normalising with respect to strongest nation
maindict[entries] = maindict[entries][0]/biggestValue
#the output will now show us the dict with each nation's normalised strength-value first, followed by # of players again (unchanged)
print(maindict)
| [
2,
4919,
284,
651,
257,
3953,
286,
23761,
393,
33564,
4202,
30,
1521,
340,
338,
2829,
0,
1011,
1353,
12,
3064,
16905,
1351,
379,
1813,
2278,
986,
220,
198,
2,
10919,
318,
4776,
2714,
416,
1303,
16,
2137,
30,
3487,
786,
1028,
428,
... | 2.856563 | 739 |
"""Support for Eight Sleep binary sensors."""
from __future__ import annotations
import logging
from pyeight.eight import EightSleep
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import EightSleepBaseEntity
from .const import DATA_API, DATA_HEAT, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the eight sleep binary sensor."""
if discovery_info is None:
return
eight: EightSleep = hass.data[DOMAIN][DATA_API]
heat_coordinator: DataUpdateCoordinator = hass.data[DOMAIN][DATA_HEAT]
entities = []
for user in eight.users.values():
entities.append(
EightHeatSensor(heat_coordinator, eight, user.userid, "bed_presence")
)
async_add_entities(entities)
class EightHeatSensor(EightSleepBaseEntity, BinarySensorEntity):
"""Representation of a Eight Sleep heat-based sensor."""
_attr_device_class = BinarySensorDeviceClass.OCCUPANCY
def __init__(
self,
coordinator: DataUpdateCoordinator,
eight: EightSleep,
user_id: str | None,
sensor: str,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator, eight, user_id, sensor)
assert self._user_obj
_LOGGER.debug(
"Presence Sensor: %s, Side: %s, User: %s",
sensor,
self._user_obj.side,
user_id,
)
@property
def is_on(self) -> bool:
"""Return true if the binary sensor is on."""
assert self._user_obj
return bool(self._user_obj.bed_presence)
| [
37811,
15514,
329,
18087,
17376,
13934,
15736,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
11748,
18931,
198,
198,
6738,
279,
5948,
432,
13,
26022,
1330,
18087,
40555,
198,
198,
6738,
1363,
562,
10167,
13,
5589,
3906,... | 2.65557 | 781 |
import os
import sys
import logging
from umbra.design.configs import Profile, Topology, Scenario
from umbra.design.configs import FabricTopology
from base_configtx.fabric import org1_policy, org2_policy, org3_policy, org4_policy, orderer_policy, configtx
def setup_logging(log_level=logging.DEBUG):
"""Set up the logging."""
logging.basicConfig(level=log_level)
fmt = ("%(asctime)s %(levelname)s (%(threadName)s) "
"[%(name)s] %(message)s")
colorfmt = "%(log_color)s{}%(reset)s".format(fmt)
datefmt = '%Y-%m-%d %H:%M:%S'
try:
from colorlog import ColoredFormatter
logging.getLogger().handlers[0].setFormatter(ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
))
except ImportError:
pass
logger = logging.getLogger('')
logger.setLevel(log_level)
if __name__ == "__main__":
setup_logging()
builds()
| [
11748,
28686,
198,
11748,
25064,
198,
11748,
18931,
198,
198,
6738,
20810,
430,
13,
26124,
13,
11250,
82,
1330,
13118,
11,
5849,
1435,
11,
1446,
39055,
198,
6738,
20810,
430,
13,
26124,
13,
11250,
82,
1330,
37759,
9126,
1435,
198,
198,
... | 2.068716 | 553 |
import re
from typing import List
from flask import current_app
from models.word import Word
from sqlalchemy.sql.expression import func
db = current_app.db
def get_word(word_id: int = None, text: str = None):
"""
Returns a word from the word database.
:param text: The text of the word to return
:param word_id: The ID of the word to return.
:returns: The word if found.
"""
if word_id is not None and text is not None:
return Word.query.filter(Word.id == word_id).filter(Word.text == text).first()
elif word_id is not None:
return Word.query.filter(Word.id == word_id).first()
elif text is not None:
return Word.query.filter(Word.text == text).first()
else:
return None
def get_words(word_ids: List[int] = None, texts: List[str] = None):
"""
Returns a list of words from the database
:param word_ids: The IDs of the words to return
:param texts: The texts of the words to return
:returns: A list of words
"""
words = []
if word_ids is not None and texts is not None:
if len(word_ids) != len(texts):
return None
for i in range(len(word_ids)):
word_id = word_ids[i]
text = texts[i]
word = get_word(word_id=word_id, text=text)
if word is not None:
words.append(word)
return words
elif word_ids is not None:
for word_id in word_ids:
word = get_word(word_id=word_id)
if word is not None:
words.append(word)
return words
elif texts is not None:
for text in texts:
word = get_word(text=text)
if word is not None:
words.append(word)
return words
else:
return None
def get_random_words(length: int):
"""
Returns a random list of words from the word database.
:param length: The count of words to return.
:returns: A list of words.
"""
words = Word.query.order_by(func.random()).limit(length).all()
return words
def create_word(text: str):
"""
Creates a new word in the word database if it doesn't exist yet (case insensitive)
:param text: The text for the new word
:returns: The created word if successful
"""
if not re.search("[a-zA-Z]", text):
return None
lower_text = text.lower()
existing_word = Word.query.filter(func.lower(Word.text) == lower_text).first()
if existing_word is not None:
return existing_word
word = Word(text=text)
db.session.add(word)
db.session.commit()
return word
def delete_word(word_id: int):
"""
Deletes a word from the word database.
:param word_id: The ID of the word to delete.
"""
Word.query.filter(Word.id == word_id).delete()
db.session.commit()
def delete_words(word_ids: List[int]):
"""
Deletes words from the word database.
:param word_ids: The IDs of the words to delete.
"""
for word_id in word_ids:
Word.query.filter(Word.id == word_id).delete()
db.session.commit()
| [
11748,
302,
198,
6738,
19720,
1330,
7343,
198,
6738,
42903,
1330,
1459,
62,
1324,
198,
6738,
4981,
13,
4775,
1330,
9678,
198,
6738,
44161,
282,
26599,
13,
25410,
13,
38011,
1330,
25439,
628,
198,
9945,
796,
1459,
62,
1324,
13,
9945,
6... | 2.459055 | 1,270 |
from django.shortcuts import render
from django.http import JsonResponse, HttpRequest, HttpResponse
from django.http import HttpResponseBadRequest
import json
import logging
from .models import User
from django.db.models.query import *
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
4023,
1330,
449,
1559,
31077,
11,
367,
29281,
18453,
11,
367,
29281,
31077,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
22069,
18453,
198,
1174... | 3.643836 | 73 |
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import (ascii, bytes, chr, dict, filter, hex, input, int, map, next, oct, open, pow, range, round, str,
super, zip)
import copy
from enum import Enum
| [
6738,
11593,
37443,
834,
1330,
357,
48546,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
8,
198,
6738,
3170,
1040,
1330,
357,
292,
979,
72,
11,
9881,
11,
442,
81,
11,
8633,
11,
8106,
11,
17910,
11,
5128... | 2.68932 | 103 |
"""
Converts the FIRE 2013 dataset to TSV
http://au-kbc.org/nlp/NER-FIRE2013/index.html
The dataset is in six tab separated columns. The columns are
word tag chunk ner1 ner2 ner3
This script keeps just the word and the ner1. It is quite possible that using the tag would help
"""
import argparse
import glob
import os
import random
if __name__ == '__main__':
random.seed(1234)
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', type=str, default="/home/john/extern_data/ner/FIRE2013/hindi_train", help="Directory with raw files to read")
parser.add_argument('--train_file', type=str, default="/home/john/stanza/data/ner/hi_fire2013.train.csv", help="Where to put the train file")
parser.add_argument('--dev_file', type=str, default="/home/john/stanza/data/ner/hi_fire2013.dev.csv", help="Where to put the dev file")
parser.add_argument('--test_file', type=str, default="/home/john/stanza/data/ner/hi_fire2013.test.csv", help="Where to put the test file")
args = parser.parse_args()
convert_fire_2013(args.input_path, args.train_file, args.dev_file, args.test_file)
| [
37811,
198,
3103,
24040,
262,
39110,
2211,
27039,
284,
26136,
53,
198,
198,
4023,
1378,
559,
12,
74,
15630,
13,
2398,
14,
21283,
79,
14,
21479,
12,
11674,
2200,
6390,
14,
9630,
13,
6494,
198,
198,
464,
27039,
318,
287,
2237,
7400,
1... | 2.881013 | 395 |
import numpy as np
from pyscf import gto, scf, ao2mo
from traniest.bootstrap import mp2_emb
if __name__ == "__main__":
#def main():
from functools import reduce
from pyscf import mp, lo, ao2mo
from frankenstein.tools.tensor_utils import get_symm_mat_pow
import time
mol = gto.Mole()
mol.fromfile("/work/henry/Calculations/BE/C16_ex/geom/C16_ether.xyz")
mol.basis = 'cc-pvdz'
mol.build()
N = mol.nbas
nOcc = mol.nelec[0]
nVir = N - nOcc
mf = scf.RHF(mol)
mf.kernel()
#mymp = mp.MP2(mf)
#mymp.kernel(with_t2=False)
#S = mol.intor_symmetric("int1e_ovlp")
#StoOrth = get_symm_mat_pow(S, 0.50)
#StoOrig = get_symm_mat_pow(S, -0.50)
hAO = mf.get_hcore()
I = np.eye(hAO.shape[0])
VAO = ao2mo.kernel(mol, I)
VAO = ao2mo.restore(1, VAO, hAO.shape[0])
hMO = mf.mo_coeff.T @ mf.get_hcore() @ mf.mo_coeff
VMO = ao2mo.kernel(mol, mf.mo_coeff)
VMO = ao2mo.restore(1, VMO, hMO.shape[0])
EHF = CalcHFEnergy(hMO, VMO, nOcc)
print("HF Energy:", EHF)
ECorrMP2 = CalcMP2Energy(VMO, mf.mo_energy, nOcc)
print("MP2 Correlation Energy:", ECorrMP2)
VMO = None
Ranks = [50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 700, 800, 900, 1000, 2000, 3000, 4000, 5000] #7500, 10000, 15000, 20000] #[50, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600]
#TE_E1 = []
#TE_E2 = []
HF_E1 = []
HF_E2 = []
MP_E1 = []
MP_E2 = []
t1 = []
t2 = []
for Rank in Ranks:
print("Running for Rank", Rank)
startCD = time.time()
L_CD = AuxBasisCD(VAO, Rank = Rank)
endCD = time.time()
startDG = time.time()
L_DG = AuxBasisDIAG(VAO, Rank = Rank)
endDG = time.time()
print("... calculated L in following time:", endCD - startCD, endDG - startDG)
VMO_CD = RotateTEIAuxBasis(L_CD, mf.mo_coeff)
VMO_DG = RotateTEIAuxBasis(L_DG, mf.mo_coeff)
print("... calculated VMO")
# V_E1, V_E2 = CompareV(VMO, VMO_CD, VMO_DG)
E_E1, E_E2 = CompareHF(hMO, VMO_CD, VMO_DG, nOcc)
F_E1, F_E2 = CompareMP2(VMO_CD, VMO_DG, mf.mo_energy, nOcc)
print("... calculated Energy")
HF_E1.append(E_E1)
HF_E2.append(E_E2)
MP_E1.append(F_E1)
MP_E2.append(F_E2)
t1.append(endCD - startCD)
t2.append(endDG - startDG)
print("HF Energies")
print(HF_E1)
print(HF_E2)
print("MP2 Energies")
print(MP_E1)
print(MP_E2)
N = len(Ranks)
Results = np.zeros((N, 7))
Results[:, 0] = Ranks
Results[:, 1] = HF_E1
Results[:, 2] = HF_E2
Results[:, 3] = MP_E1
Results[:, 4] = MP_E2
Results[:, 5] = t1
Results[:, 6] = t2
np.savetxt("df_results.txt", Results, delimiter = '\t')
| [
11748,
299,
32152,
355,
45941,
198,
6738,
279,
893,
12993,
1330,
308,
1462,
11,
629,
69,
11,
257,
78,
17,
5908,
198,
6738,
491,
3216,
395,
13,
18769,
26418,
1330,
29034,
17,
62,
24419,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834... | 1.973165 | 1,267 |
import pytest
import falcon
from lupin import constructor
from clustaar.schemas import v1
from clustaar.webhook import EventsHandler
from clustaar.webhook.routing import Router
from tests.utils import FACTORY
@pytest.fixture
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
198,
11748,
24215,
1102,
198,
6738,
300,
929,
259,
1330,
23772,
198,
6738,
32966,
64,
283,
13,
1416,
4411,
292,
1330,
410,
16,
198,
6738,
32966,
64,
283,
13,
12384,
25480,
1330,
18715,
25060,
198,
6738,
32966,
64,
... | 3.065217 | 92 |
import time
from sqlalchemy import desc
from anchore_engine import db
from anchore_engine.db import PolicyEval
# specific DB interface helpers for the 'policyeval' table
def add_all_for_digest(userId, records, session):
"""
Assumes these are all valid records.
:param records: list of dicts from PolicyEval json dumps
:param session:
:return:
"""
recs = []
for r in records:
r['userId'] = userId
rec = PolicyEval()
rec.update(r)
recs.append(session.add(rec))
return recs
| [
11748,
640,
198,
198,
6738,
44161,
282,
26599,
1330,
1715,
198,
198,
6738,
12619,
382,
62,
18392,
1330,
20613,
198,
6738,
12619,
382,
62,
18392,
13,
9945,
1330,
7820,
36,
2100,
198,
198,
2,
2176,
20137,
7071,
49385,
329,
262,
705,
305... | 2.647343 | 207 |
"""
https://leetcode.com/problems/find-n-unique-integers-sum-up-to-zero/
Given an integer n, return any array containing n unique integers such that they add up to 0.
Example 1:
Input: n = 5
Output: [-7,-1,1,3,4]
Explanation: These arrays also are accepted [-5,-1,1,2,3] , [-3,-1,2,-2,4].
Example 2:
Input: n = 3
Output: [-1,0,1]
Example 3:
Input: n = 1
Output: [0]
Constraints:
1 <= n <= 1000
"""
# time complexity: O(n), space complexity: O(1)
| [
37811,
198,
5450,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
19796,
12,
77,
12,
34642,
12,
18908,
364,
12,
16345,
12,
929,
12,
1462,
12,
22570,
14,
198,
15056,
281,
18253,
299,
11,
1441,
597,
7177,
7268,
299,
3748,
37014,
... | 2.483696 | 184 |
import os
import tkinter as tk
from .components import MainFrame
from .img import IMG_PATH
| [
11748,
28686,
198,
198,
11748,
256,
74,
3849,
355,
256,
74,
198,
198,
6738,
764,
5589,
3906,
1330,
8774,
19778,
198,
6738,
764,
9600,
1330,
8959,
38,
62,
34219,
628
] | 3.133333 | 30 |
#!/usr/bin/python
#
# 2022 Ryan Martin, ryan@ensomniac.com
# Andrew Stet, stetandrew@gmail.com
import os
import sys
Centimeter = _Centimeter()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
198,
2,
33160,
6047,
5780,
11,
374,
4121,
31,
641,
296,
8461,
330,
13,
785,
198,
2,
220,
220,
220,
220,
220,
6858,
520,
316,
11,
336,
316,
392,
1809,
31,
14816,
13,
785,
198,
198,
... | 2.491803 | 61 |
import numpy as np
def linearize(x, f, tolerance=0.001):
"""Return a tabulated representation of a one-variable function
Parameters
----------
x : Iterable of float
Initial x values at which the function should be evaluated
f : Callable
Function of a single variable
tolerance : float
Tolerance on the interpolation error
Returns
-------
numpy.ndarray
Tabulated values of the independent variable
numpy.ndarray
Tabulated values of the dependent variable
"""
# Make sure x is a numpy array
x = np.asarray(x)
# Initialize output arrays
x_out = []
y_out = []
# Initialize stack
x_stack = [x[0]]
y_stack = [f(x[0])]
for i in range(x.shape[0] - 1):
x_stack.insert(0, x[i + 1])
y_stack.insert(0, f(x[i + 1]))
while True:
x_high, x_low = x_stack[-2:]
y_high, y_low = y_stack[-2:]
x_mid = 0.5*(x_low + x_high)
y_mid = f(x_mid)
y_interp = y_low + (y_high - y_low)/(x_high - x_low)*(x_mid - x_low)
error = abs((y_interp - y_mid)/y_mid)
if error > tolerance:
x_stack.insert(-1, x_mid)
y_stack.insert(-1, y_mid)
else:
x_out.append(x_stack.pop())
y_out.append(y_stack.pop())
if len(x_stack) == 1:
break
x_out.append(x_stack.pop())
y_out.append(y_stack.pop())
return np.array(x_out), np.array(y_out)
def thin(x, y, tolerance=0.001):
"""Check for (x,y) points that can be removed.
Parameters
----------
x : numpy.ndarray
Independent variable
y : numpy.ndarray
Dependent variable
tolerance : float
Tolerance on interpolation error
Returns
-------
numpy.ndarray
Tabulated values of the independent variable
numpy.ndarray
Tabulated values of the dependent variable
"""
# Initialize output arrays
x_out = x.copy()
y_out = y.copy()
N = x.shape[0]
i_left = 0
i_right = 2
while i_left < N - 2 and i_right < N:
m = (y[i_right] - y[i_left])/(x[i_right] - x[i_left])
for i in range(i_left + 1, i_right):
# Determine error in interpolated point
y_interp = y[i_left] + m*(x[i] - x[i_left])
if abs(y[i]) > 0.:
error = abs((y_interp - y[i])/y[i])
else:
error = 2*tolerance
if error > tolerance:
for i_remove in range(i_left + 1, i_right - 1):
x_out[i_remove] = np.nan
y_out[i_remove] = np.nan
i_left = i_right - 1
i_right = i_left + 1
break
i_right += 1
for i_remove in range(i_left + 1, i_right - 1):
x_out[i_remove] = np.nan
y_out[i_remove] = np.nan
return x_out[np.isfinite(x_out)], y_out[np.isfinite(y_out)]
| [
11748,
299,
32152,
355,
45941,
628,
198,
4299,
14174,
1096,
7,
87,
11,
277,
11,
15621,
28,
15,
13,
8298,
2599,
198,
220,
220,
220,
37227,
13615,
257,
7400,
4817,
10552,
286,
257,
530,
12,
45286,
2163,
628,
220,
220,
220,
40117,
198,... | 1.971223 | 1,529 |
from ... import metrics as fr_metrics
| [
6738,
2644,
1330,
20731,
355,
1216,
62,
4164,
10466,
198
] | 3.8 | 10 |
# Python code for 2D random walk.
import json
import sys
import random
import time
import math
import logging
import asyncio
from pycollisionavoidance.pub_sub.AMQP import PubSubAMQP
from pycollisionavoidance.raycast.Particle import Particle
from pycollisionavoidance.raycast.StaticMap import StaticMap
from pycollisionavoidance.collision.Detection import ParticleCollisionDetection
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler('/tmp/walkgen.log')
handler.setLevel(logging.ERROR)
formatter = logging.Formatter('%(levelname)-8s-[%(filename)s:%(lineno)d]-%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# ========================================= WALK PATTERN GENERATOR ===================================================
| [
2,
11361,
2438,
329,
362,
35,
4738,
2513,
13,
198,
11748,
33918,
198,
11748,
25064,
198,
11748,
4738,
198,
11748,
640,
198,
11748,
10688,
198,
11748,
18931,
198,
11748,
30351,
952,
198,
6738,
12972,
26000,
1166,
27080,
590,
13,
12984,
6... | 3.49345 | 229 |
from runner.inference_runner import *
| [
6738,
17490,
13,
259,
4288,
62,
16737,
1330,
1635,
198
] | 3.8 | 10 |
import numpy as np
import pandas as pd
from .AST_CORE import *
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
764,
11262,
62,
34,
6965,
1330,
1635,
198
] | 2.863636 | 22 |
import setuptools
with open('README.md') as f:
long_description = f.read()
setuptools.setup(
name="webptools",
version="0.0.3",
scripts=['webptools/webplib.py'],
author="Sai Kumar Yava",
author_email="saikumar.geek@gmail.com",
description="webptools is a Webp image conversion package for python",
long_description=long_description,
long_description_content_type='text/markdown',
url="https://github.com/scionoftech/webptools",
packages=['webptools', 'lib', 'lib/libwebp_linux', 'lib/libwebp_osx', 'lib/libwebp_win64'],
package_data={'': ['lib/*']},
include_package_data=True,
keywords=['webp', 'converter', 'image'],
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'License :: OSI Approved :: MIT License',
],
)
# packages=setuptools.find_packages(include=['lib', 'lib.*', 'frames']),
# package_data = {'': ['*.py', 'lib/*']},
# include_package_data = True,
# data_files = [('lib/*')],
| [
11748,
900,
37623,
10141,
198,
198,
4480,
1280,
10786,
15675,
11682,
13,
9132,
11537,
355,
277,
25,
198,
220,
220,
220,
890,
62,
11213,
796,
277,
13,
961,
3419,
198,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
220,
220,
220,
1438,
2... | 2.630435 | 506 |
import os
import time
import json
import csv
import pandas as pd
committee_for_analysis = "actblue"
DATAFILE_SOURCE = f"../data/processed_data/contributions/{committee_for_analysis}_candidate_donor_mappings.csv"
DONOR_LIST_SOURCE = f"../data/processed_data/overlap/{committee_for_analysis}_donor_lists.json"
OVERLAP_OUTFILE_DESTINATION = f"../data/processed_data/overlap/{committee_for_analysis}_donor_overlap.csv"
TOTALS_OUTFILE_DESTINATION = f"../data/processed_data/overlap/{committee_for_analysis}_donor_totals.csv"
if __name__ == "__main__":
main()
| [
11748,
28686,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
269,
21370,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
26799,
62,
1640,
62,
20930,
796,
366,
529,
17585,
1,
198,
198,
35,
1404,
8579,
41119,
62,
47690,
796,
277,
1,
... | 2.600917 | 218 |
# Measure segmentation of sheet music
# Imports
import os.path as path
import sys
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import argrelextrema
from scipy.signal import find_peaks
import warnings
# Score-retrieval inputs
from benchmarks import call_benchmark
try:
import score_retrieval.data
except:
warnings.warn("Warning: Install the score-retrieval repository")
########################
# Measure Segmentation #
########################
def binarize_score(score):
'''
params:
score: a gray scale image of score
returns:
a binarized image of the score
'''
gray = cv.bitwise_not(score)
# Binarize image using adaptive threshold
bw = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 15, -2)
return bw
def find_vertical_lines(score, filter_height = 30):
'''
params:
score: a gray scale image of the score
filter_height: magic number that adjusts the filter height
returns:
a numpy array with only vertical arrays using erosion and dilation
'''
# get vertical lines only image
verticals = np.copy(binarize_score(score))
# Create erosion and dilation filter
rows, _ = verticals.shape
vertical_size = rows // filter_height
vertical_structure = cv.getStructuringElement(cv.MORPH_RECT, (1, vertical_size))
# Perform erosion and dilation
verticals = cv.erode(verticals, vertical_structure)
verticals = cv.dilate(verticals, vertical_structure)
return verticals
def find_staves(score, split_type = 'average'):
'''
params:
score: a gray scale image of the score
split_type: 'average' or 'strict'. 'average' takes the average
between where one staff is predicted to end and the
next is predicted to start. It classifies everything up
to that line as the first staff and everything after
that line as the second (so on and so forth for the
rest of the staves). 'strict' splits the staves by
where the each staff is predicted to start and end.
Another way to think of it is that 'average' guarantees
that if you stacked all of the split staves together,
you'd get the original image, whereas 'split' does not.
returns:
a list of tuples containing staff start and end positions: [(stave_start, stave_end)]
'''
# get vertical lines
verticals = find_vertical_lines(score)
# Normalize the verticals
if verticals.max() != 0:
verts_norm = verticals // verticals.max()
else:
verts_norm = verticals
# Get a horizontal sum of the verticals
horiz_sum_verts = verts_norm.sum(axis=1)
# find the mode of the distribution
horiz_sum_hist = np.bincount(horiz_sum_verts.astype(int))
mode = np.argmax(horiz_sum_hist)
# tuples of (start,end) denoting where to split the image at
staff_split_indices = None
if split_type == 'average':
staff_split_indices = list(split_indices_average(horiz_sum_verts, lambda x: x <= mode))
elif split_type == 'strict':
staff_split_indices = list(split_indices(horiz_sum_verts, lambda x: x <= mode))
else:
raise Exception('Invalid split_type given')
# Staff split indices
staves_start_end = staff_split_indices
if len(staff_split_indices) == 0:
staves_start_end = [(0, score.shape[1])]
return staves_start_end
def find_bars(score):
'''
params:
score: A gray scale version of score
returns:
A list contaning a 3-tuple of (staff-index, bar start and bar end)
'''
#######
# Hyperparameters:
thresholder = True # Use thresholding for minMax thresholding
switch_magic_number = 0.01 # Threshold for deciding whether to add all bars or no
clean_up = True # Use the bar cleanup algorithm to remove small bars
width_magic_number = 10 # Minimum % width threshold for bar cleanup algorithm
#######
staves_start_end = find_staves(score)
bars_start_end = []
noisy_verticals = find_vertical_lines(score, filter_height=40)
for start, end in staves_start_end:
# for each staff, find maxima
one_staff = list(cut_array(noisy_verticals, [(start, end)]))[0]
sum_array = one_staff.sum(axis=0)
maxima = find_peaks(sum_array)
maxima_list = [(sum_array[i], i) for i in maxima[0]]
maxima_list = sorted(maxima_list)
if maxima_list != []:
minimum = maxima_list[0][0]
maximum = maxima_list[-1][0]
# Perform min_max threshold
if thresholder:
if abs(maximum - minimum) / noisy_verticals.shape[1] > switch_magic_number:
threshold = (maxima_list[0][0] + maxima_list[-1][0]) / 2
filtered = [x[1] for x in maxima_list if x[0] > threshold ]
else:
filtered = [x[1] for x in maxima_list]
else:
filtered = [x[1] for x in maxima_list]
# Sort out the bars by width
filtered = sorted(filtered)
bars_in_this_staff = []
for i in filtered:
bars_in_this_staff += [(i, start, end)]
# Perform the cleanup algorithm
if clean_up:
cleaned_up_bars = cleanup_bars(bars_in_this_staff, score.shape[0] / width_magic_number )
if cleaned_up_bars is not None:
bars_start_end += cleaned_up_bars
else:
bars_start_end += bars_in_this_staff
else:
bars_start_end += [(0, start, end)]
bars_start_end += [(score.shape[0], start, end)]
return bars_start_end
##########################
# Retrieval and Printing #
##########################
def create_bar_waveforms(score):
'''
params:
score: a gray scale input image
returns:
a benchmark call to the pytorch cnn
Note: this function utilizes score-retrieval
'''
#################
# Hyperparameters
bar_height = 128
bar_width = 128
#################
bars_start_end = find_bars(score)
im_list = []
if len(bars_start_end) <= 1: # if there is one bar, split staff into 2 parts
im_list.append(score[bars_start_end[0][1]:bars_start_end[0][2], 0:bars_start_end[0][0]])
im_list.append(score[bars_start_end[0][1]:bars_start_end[0][2], bars_start_end[0][0]:score.shape[1]])
# Cycle through all bars and create crops
for i in range(len(bars_start_end) - 1):
cropped_bar = score[bars_start_end[i][1]:bars_start_end[i][2], bars_start_end[i][0]:bars_start_end[i+1][0]]
if cropped_bar.size != 0:
im_list.append(cropped_bar)
# Downsample all images
images = [downsample_image(cv.cvtColor(bar,cv.COLOR_GRAY2RGB), height=bar_height, width=bar_width)
for bar in im_list ]
if images ==[]:
return None
# Perform the benchmark call
return call_benchmark(images=images)
##################
# Helper Methods #
##################
def split_indices(array, comparator=(lambda x: x == 0)):
'''Input: 1-D array of indicies of zeros of horizontal summation
Output: Generator of indicies to split images by discontinuities in zeros'''
indices = np.where(comparator(array))[0]
# we dont want to add 1 to last element
for i in range(indices.size - 1):
if indices[i+1] - indices[i] != 1:
yield (indices[i], indices[i+1])
def split_indices_average(array, comparator=(lambda x: x == 0)):
'''Input: 1-D array of indicies of zeros of horizontal summation
Output: Iterator of indicies to split image at by average of zeros'''
line_pair = list(split_indices(array, comparator))
line_pair = [(0, 0)] + line_pair + [(array.size-1, array.size-1)]
for i in range(len(line_pair) - 2):
a = line_pair[i][1]
b = line_pair[i+1][0]
a1 = line_pair[i+1][1]
b1 = line_pair[i+2][0]
yield ( a + ((b-a)//2) , a1 + ((b1-a1)//2))
def cut_array(array, positions, direction="H"):
'''Input: array: image array, positions: array of start end tuples
Output: array of image arrays cut by positions'''
for start , end in positions:
if (direction == "H"):
yield array[start:end, :]
else:
yield array[:, start:end]
def cleanup_bars(bars, width):
'''
Cleans up a set of bars in staves globally using a recursive approach
params:
bars: bars in a staff to clean up
width: width threshold above which a bar is considered a bar
returns:
cleaned up bars
'''
# Atleast have 2 bars
if len(bars) > 1:
l_diffs = []
# calculate the distances between bars
for i in range(len(bars) - 1):
l_diffs.append(abs(bars[i][0] - bars[i+1][0]))
# Check if base case is triggered
if min(l_diffs) < width:
# Select the appropriate bar to omit
lowest_index = l_diffs.index(min(l_diffs))
if lowest_index == 0:
new_bars = [bars[0]] + bars[2:]
elif lowest_index == len(l_diffs) - 1:
new_bars = bars[0:-2] + [bars[-1]]
else:
if l_diffs[lowest_index - 1] < l_diffs[lowest_index+1]:
new_bars = bars[0:lowest_index] + bars[lowest_index+1:]
else:
new_bars = bars[0:lowest_index+1] + bars[lowest_index+2:]
# recursively cleanup remaining bars
return cleanup_bars(new_bars, width)
else:
# base case
return bars
else:
return bars
def downsample_image(image, rate=None, width=None, height=None):
'''
Downsamples 'image' by a ratio 'rate' or by a mentioned size ('width' and 'height')
'''
if rate is not None:
new_shape = (int(image.shape[0] * rate), int(image.shape[1] * rate))
if width is not None and height is not None:
new_shape = (width, height)
return cv.resize(image, new_shape)
################
# Image Output #
################
def write_verticals(score, filter_height=30, name='verticals'):
'''
Saves the vertical lines found from score 'score' in a .png file
with name 'name'.
'''
verticals = find_vertical_lines(score, filter_height)
img = cv.cvtColor(cv.bitwise_not(verticals), cv.COLOR_GRAY2RGB)
cv.imwrite(name + '.png', img)
def write_staff_lines(score, split_type='average', name='staves',
start_color=(255,0,0), end_color=(255,0,0), width=2):
'''
Overlays staff lines onto the score 'score' and saves as a .png
file with name 'name'.
'start_color' and 'end_color' specify the color of the lines drawn for the
staves (the former is for the top line, the latter is for the bottom).
If the split type is 'strict', the start color will be overwritten for
most of the lines.
'''
staves_start_end = find_staves(score, split_type)
img = cv.cvtColor(score, cv.COLOR_GRAY2RGB)
for staff_start, staff_end in staves_start_end:
# draw staff start line
cv.line(img, (0, staff_start), (self._score.shape[1], staff_start),
start_color, width )
# draw staff end line
cv.line(img, (0, staff_end), (self._score.shape[1], staff_end),
end_color, width)
cv.imwrite(name + '.png', img)
def write_staff_bar_lines(score, split_type='average', name='bar_and_staff',
staff_color=(255,0,0), bar_color=(255,0,0), width=2):
'''
Overlays staff and bar lines onto the score 'score' and saves as a .png
file with name 'name'.
'staff_color' and 'bar_color' specify the color of the lines drawn for the
staves and bars.
'''
staves_start_end = find_staves(score, split_type)
bars_start_end = find_bars(score)
img = cv.cvtColor(score, cv.COLOR_GRAY2RGB)
for staff_start, staff_end in staves_start_end:
# draw staff start line
cv.line(img, (0, staff_start), (self._score.shape[1], staff_start),
staff_color, width )
# draw staff end line
cv.line(img, (0, staff_end), (self._score.shape[1], staff_end),
staff_color, width)
for i, start, end in bars_start_end:
cv.line(img, (i, start), (i, end), (0, 0,255), 2)
cv.imwrite(name + '.png', img)
def write_staves_separately(score, split_type='average', name='staff'):
'''
Saves the staffs found from score 'score' each in separate .png
files with name 'name'-'i', where 'i' is the staff number.
'''
staves_start_end = find_staves(score, split_type)
for i, (staff_start, staff_end) in enumerate(staves_start_end):
staff = score[staff_start:staff_end]
staff_img = cv.cvtColor(staff, cv.COLOR_GRAY2RGB)
cv.imwrite('{name}-{i}.png'.format(name, i), img)
| [
2,
24291,
10618,
341,
286,
9629,
2647,
198,
198,
2,
1846,
3742,
198,
11748,
28686,
13,
6978,
355,
3108,
198,
11748,
25064,
198,
11748,
269,
85,
17,
355,
269,
85,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
117... | 2.339378 | 5,625 |
"""
Solr Blueprint
==============
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.solr
settings:
solr:
version: 4.10.2 # Solr version to install (Required)
# memory: 1024m # Specify minimum and maximum heap size in mb (Default: 512m)
"""
import os
from fabric.context_managers import cd, settings
from fabric.contrib import files
from fabric.decorators import task, parallel
from refabric.api import info, run
from refabric.context_managers import sudo, silent, hide_prefix
from refabric.contrib import blueprints
from . import user
from . import debian
__all__ = ['start', 'stop', 'restart', 'setup', 'configure', 'tail']
blueprint = blueprints.get(__name__)
version = blueprint.get('version')
version_tuple = tuple(map(int, version.split('.')))
if version >= (4, 0, 0):
start = debian.service_task('solr', 'start', check_status=True)
stop = debian.service_task('solr', 'stop', check_status=True)
restart = debian.service_task('solr', 'restart', check_status=True)
else:
__all__ = ['setup', 'configure', 'tail']
solr_home = '/usr/share/solr'
@task
def setup():
"""
Install Solr
"""
install()
configure()
@task
def configure():
"""
Configure Solr
"""
updated_confs = blueprint.upload('solr_home/', '/etc/solr', user='solr')
if version_tuple >= (4, 0, 0):
if debian.lsb_release() >= '16.04':
updated_init = blueprint.upload('system/', '/etc/systemd/system/', context={
'memory': blueprint.get('memory', '512m')
})
if updated_init:
run('systemctl daemon-reload', use_sudo=True)
else:
updated_init = blueprint.upload('init/', '/etc/init/', context={
'memory': blueprint.get('memory', '512m')
})
if updated_confs or updated_init:
restart()
@task
@parallel
| [
37811,
198,
36949,
81,
39932,
198,
25609,
855,
198,
198,
1174,
43957,
1173,
2858,
25,
1174,
198,
198,
492,
2438,
12,
9967,
3712,
331,
43695,
628,
220,
220,
220,
4171,
17190,
25,
198,
220,
220,
220,
220,
220,
532,
25570,
13,
34453,
8... | 2.495484 | 775 |
from django.http import HttpResponse, Http404
from django.shortcuts import render
from .models import Train
from .models import Station
from .models import Passenger
# Create your views here.
| [
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
367,
29281,
26429,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
6738,
764,
27530,
1330,
16835,
198,
6738,
764,
27530,
1330,
9327,
198,
6738,
764,
27530,
1330,
... | 3.843137 | 51 |
from agents.common import PLAYER1, initialize_game_state, apply_player_action, determine_row
| [
6738,
6554,
13,
11321,
1330,
28180,
1137,
16,
11,
41216,
62,
6057,
62,
5219,
11,
4174,
62,
7829,
62,
2673,
11,
5004,
62,
808,
628,
628,
628,
628
] | 3.571429 | 28 |
from shapash.explainer.smart_explainer import SmartExplainer
import pandas as pd | [
6738,
427,
499,
1077,
13,
20676,
10613,
13,
27004,
62,
20676,
10613,
1330,
10880,
18438,
10613,
198,
11748,
19798,
292,
355,
279,
67
] | 3.478261 | 23 |
from flask import Blueprint, render_template
from majavahbot.api.consts import *
from majavahbot.api.database import task_database
from majavahbot.api.utils import get_revision
from majavahbot.tasks import task_registry
blueprint = Blueprint('majavah-bot', __name__)
# utils to be used in tempale
@blueprint.context_processor
@blueprint.route('/')
@blueprint.route('/jobs/wiki/<wiki>')
| [
6738,
42903,
1330,
39932,
11,
8543,
62,
28243,
198,
198,
6738,
16486,
615,
993,
13645,
13,
15042,
13,
1102,
6448,
1330,
1635,
198,
6738,
16486,
615,
993,
13645,
13,
15042,
13,
48806,
1330,
4876,
62,
48806,
198,
6738,
16486,
615,
993,
... | 2.992481 | 133 |
# coding: utf-8
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
import numpy as np
lena= Image.open("lena.bmp")
pix=lena.load()
coulmn,row=lena.size
Histogram_Equalization(coulmn,row,pix)
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
25302,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
75,
8107,
28,
7412,
13,
9654,
7203,
... | 2.390805 | 87 |
import turtle
print('Draw Any polygon')
t = turtle.Turtle()
n = int(input("Enter the no of the sides of the polygon from 3 to 10 : "))
l = 80
for i in range(n):
turtle.forward(l)
turtle.right(360 / n)
| [
11748,
28699,
201,
198,
4798,
10786,
25302,
4377,
7514,
14520,
11537,
201,
198,
83,
796,
28699,
13,
51,
17964,
3419,
201,
198,
77,
796,
493,
7,
15414,
7203,
17469,
262,
645,
286,
262,
5389,
286,
262,
7514,
14520,
422,
513,
284,
838,
... | 2.389474 | 95 |
#!/usr/bin/python
import os
from roundup import instance
from roundup.password import Password, encodePassword
from optparse import OptionParser
dir = os.getcwd ()
tracker = instance.open (dir)
db = tracker.open ('admin')
"""
Fix roles: loop over all users and clean up roles that don't exist.
"""
cmd = OptionParser ()
cmd.add_option \
( '-u', '--update'
, dest = 'update'
, help = 'Really remove roles (do a commit)'
, action = 'store_true'
)
cmd.add_option \
( '-v', '--verbose'
, dest = 'verbose'
, help = 'Verbose reporting'
, action = 'store_true'
)
opt, args = cmd.parse_args ()
if len (args) :
cmd.error ('No arguments please')
sys.exit (23)
for uid in db.user.getnodeids () :
u = db.user.getnode (uid)
if u.roles is None :
if opt.verbose :
print "User %s: has no roles" % u.username
continue
elif not u.roles :
db.user.set (uid, roles = None)
if opt.verbose :
print "User %s: set empty role to None" % u.username
continue
roles = dict.fromkeys (r.strip () for r in u.roles.split (','))
change = False
for r in roles.keys () :
rl = r.lower ()
if rl not in db.security.role :
change = True
del roles [r]
print "User %s: delete role: %s" % (u.username, r)
if change :
db.user.set (uid, roles = ','.join (roles.iterkeys ()))
if opt.update :
db.commit()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
28686,
198,
6738,
48390,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1330,
4554,
198,
6738,
48390,
13,
28712,
220,
1330,
30275,
11,
37773,
35215,
198,
6738,
2172,
29572,
220,
220... | 2.251114 | 673 |
"""
A module with a function to read text
This module shows off a more complicated type of precondition
Author: Walker M. White
Date: February 14, 2019
"""
import introcs
def get_number(filename):
"""
Returns the number stored in the file <filename>
When we read a file, we get a string. This function changes the result to
an int before returning it.
Parameter filename: The file to get the number from
Precondition: filename is a string and a reference to a valid file.
In addition, this file only has a single integer in it.
"""
contents = introcs.read_txt(filename)
# Change string to int before returning
result = int(contents)
return result
| [
37811,
198,
32,
8265,
351,
257,
2163,
284,
1100,
2420,
198,
198,
1212,
8265,
2523,
572,
257,
517,
8253,
2099,
286,
3718,
623,
653,
198,
198,
13838,
25,
10120,
337,
13,
2635,
198,
10430,
25,
220,
220,
3945,
1478,
11,
13130,
198,
3781... | 3.22973 | 222 |
from __future__ import print_function
import argparse
import os
import fleep
import arabic_reshaper
from bidi.algorithm import get_display
from src.mic import MicrophoneReader
from src.audio import AudioReader
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-T",
"--type",
choices=['mic', 'audio'],
help="Choose the type of source for speech recognition `mic or audio`"
)
parser.add_argument(
"-S",
"--source",
help="Choose the source of audio file (.wav)"
)
parser.add_argument(
"-L",
"--language",
default='fa-IR',
choices=['fa-IR', 'en-US'],
help="Choose your language"
)
parser.add_argument(
"--safe_rtl",
type=str_to_bool,
const=True,
default=False,
nargs='?',
help="Safe RTL format."
)
args = parser.parse_args()
if args.type == 'audio':
if not args.source:
parser.error(
'The source file must be specified! (use --source=YOUR_AUDIO_PATH or -S YOUR_AUDIO_PATH)')
elif not os.path.exists(args.source):
parser.error('Your audio file does not exist!')
else:
with open(args.source, 'rb') as af:
audio = fleep.get(af.read(128))
if not audio.extension_matches('wav'):
parser.error('Your audio file does not correct format (.wav)!')
if args.type == 'mic':
reader = MicrophoneReader(
language=args.language, verbose=True, safe_rtl=args.safe_rtl)
elif args.type == 'audio':
reader = AudioReader(
language=args.language, audio_file=args.source, verbose=True, safe_rtl=args.safe_rtl)
else:
reader = None
if reader:
status, text = reader.read()
if status:
if args.safe_rtl:
text = get_display(arabic_reshaper.reshape(text))
print(text)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
5104,
538,
198,
11748,
610,
397,
291,
62,
3447,
2136,
198,
6738,
8406,
72,
13,
282,
42289,
1330,
651,
62,
13812,
198,
198,
6738,
12... | 2.185263 | 950 |
from rest_framework import serializers
from enum import Enum
from django.utils import timezone
from .models import Attendance, Course, Student, Section, Mentor, Override, Spacetime, Coordinator, DayOfWeekField
def make_omittable(field_class, omit_key, *args, predicate=None, **kwargs):
"""
Behaves exactly as if the field were defined directly by calling `field_class(*args, **kwargs)`,
except that if `omit_key` is present in the context when the field is serialized and predicate returns True,
the value is omitted and `None` is returned instead.
Useful for when you want to leave out one or two fields in one view, while including them in
another view, without having to go through the trouble of writing two completely separate serializers.
This is a marked improvement over using a `SerializerMethodField` because this approach still allows
writing to the field to work without any additional machinery.
"""
predicate_provided = predicate is not None
predicate = predicate or (lambda _: True)
return OmittableField(*args, **kwargs)
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
6738,
33829,
1330,
2039,
388,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
6738,
764,
27530,
1330,
46502,
590,
11,
20537,
11,
13613,
11,
7275,
11,
31879,
273,
11,
3827,
13154... | 3.741497 | 294 |
import hashlib
import hmac
import uuid
from urllib.parse import quote
from django.utils.encoding import force_str, smart_bytes, smart_str
def get_sign(secret, querystring=None, **params):
"""
Return sign for querystring.
Logic:
- Sort querystring by parameter keys and by value if two or more parameter keys share the same name
- URL encode sorted querystring
- Generate a hex digested hmac/sha1 hash using given secret
"""
if querystring:
params = dict(param.split("=") for param in querystring.split("&"))
sorted_params = []
for key, value in sorted(params.items(), key=lambda x: x[0]):
if isinstance(value, (bytes, str)):
sorted_params.append((key, value))
else:
try:
value = list(value)
except TypeError as e:
assert "is not iterable" in smart_str(e)
value = smart_bytes(value)
sorted_params.append((key, value))
else:
sorted_params.extend((key, item) for item in sorted(value))
return get_pairs_sign(secret, sorted_params)
| [
11748,
12234,
8019,
198,
11748,
289,
20285,
198,
11748,
334,
27112,
198,
6738,
2956,
297,
571,
13,
29572,
1330,
9577,
198,
198,
6738,
42625,
14208,
13,
26791,
13,
12685,
7656,
1330,
2700,
62,
2536,
11,
4451,
62,
33661,
11,
4451,
62,
2... | 2.440605 | 463 |
#!/usr/bin/python
######################################################################
# Ascii TMS Viewer
#
#--------------------------------------------------------------------
# Brian Hone | Initial Release
#--------------------------------------------------------------------
#
#--------------------------------------------------------------------
# Copyright (c) 2009 Brian Hone
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
######################################################################
import curses, time, sys, os, string, random, math
import pprint
from TileUtils import TileUtils
debug = 0
false = 0
true = 1
# end class TileLoader
if __name__=="__main__":
a = TileLoader( (24,24), "def" )
a.getTile( 1,1,0 )
a.getTile( 1,1,0 )
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
29113,
29113,
4242,
2235,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1081,
979,
72,
309,
5653,
3582,
263,
198,
2,
198,
2... | 3.559738 | 611 |
name = "colorstats" | [
3672,
796,
366,
8043,
34242,
1
] | 3.166667 | 6 |
import pytest
import torch
import subprocess
import sys
import builtins
from piq import GS
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture(scope='module')
@pytest.fixture
# ================== Test class: `GS` ==================
@pytest.mark.usefixtures('hide_available_pkg')
@pytest.mark.skip(reason="Randomnly fails, fix in separate PR")
@pytest.mark.skip(reason="Randomnly fails, fix in separate PR")
| [
11748,
12972,
9288,
198,
11748,
28034,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
3170,
1040,
198,
198,
6738,
279,
25011,
1330,
26681,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
11639,
21412,
11537,
628,
198,
31,
9078... | 3.053691 | 149 |
"""
MIRAGE Console
mirage.py
Created by Shota Shimazu on 2018/06/05
Copyright (c) 2018-2020 Shota Shimazu All Rights Reserved.
This software is released under the Apache License, see LICENSE for detail.
https://github.com/shotastage/mirageframework/blob/master/LICENSE
"""
import sys
from console.mgargparse import CommandActionStore, ArgumentsParser
from console.appcollector import collect
| [
37811,
198,
8895,
3861,
8264,
24371,
198,
10793,
496,
13,
9078,
198,
198,
41972,
416,
911,
4265,
31698,
1031,
84,
319,
2864,
14,
3312,
14,
2713,
198,
198,
15269,
357,
66,
8,
2864,
12,
42334,
911,
4265,
31698,
1031,
84,
1439,
6923,
3... | 3.410256 | 117 |
import os
import unittest
from firexapp.discovery import discover_package_modules
| [
11748,
28686,
198,
11748,
555,
715,
395,
198,
6738,
2046,
87,
1324,
13,
67,
40821,
1330,
7073,
62,
26495,
62,
18170,
628
] | 3.772727 | 22 |
#!/usr/bin/env python
import pytest
from olympus import Observations, ParameterVector
from olympus.planners import Grid
import numpy as np
# use parametrize to test multiple configurations of the planner
@pytest.mark.parametrize("levels, budget, shuffle, random_seed",
[(2, None, False, None),
(3, None, False, None),
(3, 30, False, None),
(2, None, True, None),
(2, None, True, 42),
(2, 30, True, 42)])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
12972,
9288,
198,
6738,
267,
6760,
385,
1330,
19243,
602,
11,
25139,
2357,
38469,
198,
6738,
267,
6760,
385,
13,
489,
15672,
1330,
24846,
198,
11748,
299,
32152,
355,
45941,
... | 1.948097 | 289 |
# Sample code
print('Hello World, Python!')
| [
2,
27565,
2438,
198,
198,
4798,
10786,
15496,
2159,
11,
11361,
0,
11537,
198
] | 3.214286 | 14 |
import collections
n = int(input())
s = [input() for _ in range(n)]
s = collections.Counter(s).most_common()
ans = []
for i in range(1, len(s) + 1):
ans.append(s[i - 1][0])
if i == len(s):
break
if s[i - 1][1] != s[i][1]:
break
ans.sort()
print(*ans, sep='\n')
| [
11748,
17268,
198,
77,
796,
493,
7,
15414,
28955,
198,
82,
796,
685,
15414,
3419,
329,
4808,
287,
2837,
7,
77,
15437,
198,
82,
796,
17268,
13,
31694,
7,
82,
737,
1712,
62,
11321,
3419,
198,
504,
796,
17635,
198,
1640,
1312,
287,
2... | 2.140741 | 135 |
import enum
from typing import Any, Iterable, Optional, Tuple, Type, TypeVar, Union
__all__ = [
"Choices",
"Enum",
"auto", # also export auto for convenience
"Switch",
"is_choices",
"is_enum",
"is_optional",
"unwrap_optional",
]
auto = enum.auto
NoneType = type(None)
T = TypeVar('T')
Choices: Any = _Choices()
# pylint: disable=no-self-argument, unused-argument
# pylint: enable=no-self-argument, unused-argument
# Switch is a type that's different but equivalent to `bool`.
# It is defined as the `Union` of `bool` and a dummy type, because:
# 1. `bool` cannot be sub-typed.
# >> Switch = type('Switch', (bool,), {})
# 2. `Union` with a single (possibly duplicated) type is flattened into that type.
# >> Switch = Union[bool]
# 3. `NewType` forbids implicit casts from `bool`.
# >> Switch = NewType('Switch', bool)
__dummy_type__ = type("__dummy_type__", (), {}) # the names must match for pickle to work
Switch = Union[bool, __dummy_type__] # type: ignore[valid-type]
HAS_LITERAL = False
_Literal = None
try:
from typing import Literal # type: ignore
HAS_LITERAL = True
except ImportError:
try:
from typing_extensions import Literal # type: ignore
try:
from typing_extensions import _Literal # type: ignore # compat. with Python 3.6
except ImportError:
pass
HAS_LITERAL = True
except ImportError:
pass
if HAS_LITERAL:
def is_choices(typ: type) -> bool:
r"""Check whether a type is a choices type (:class:`Choices` or :class:`Literal`). This cannot be checked using
traditional methods, since :class:`Choices` is a metaclass.
"""
return (isinstance(typ, _Choices) or
getattr(typ, '__origin__', None) is Literal or
type(typ) is _Literal) # pylint: disable=unidiomatic-typecheck
def unwrap_choices(typ: type) -> Tuple[str, ...]:
r"""Return the string literals associated with the choices type. Literal type in Python 3.7+ stores the literals
in ``typ.__args__``, but in Python 3.6- it's in ``typ.__values__``.
"""
return typ.__values__ if hasattr(typ, "__values__") else typ.__args__ # type: ignore[attr-defined]
else:
def is_choices(typ: type) -> bool:
r"""Check whether a type is a choices type (:class:`Choices`). This cannot be checked using traditional methods,
since :class:`Choices` is a metaclass.
"""
return isinstance(typ, _Choices)
def unwrap_choices(typ: type) -> Tuple[str, ...]:
r"""Return the string literals associated with the choices type."""
return typ.__values__ # type: ignore[attr-defined]
def is_enum(typ: Any) -> bool:
r"""Check whether a type is an Enum type. Since we're using ``issubclass``, we need to check whether :arg:`typ`
is a type first."""
return isinstance(typ, type) and issubclass(typ, enum.Enum)
def is_optional(typ: type) -> bool:
r"""Check whether a type is `Optional[T]`. `Optional` is internally implemented as `Union` with `type(None)`."""
return getattr(typ, '__origin__', None) is Union and NoneType in typ.__args__ # type: ignore
def unwrap_optional(typ: Type[Optional[T]]) -> Type[T]:
r"""Return the inner type inside an `Optional[T]` type."""
return next(t for t in typ.__args__ if not isinstance(t, NoneType)) # type: ignore
| [
11748,
33829,
198,
6738,
19720,
1330,
4377,
11,
40806,
540,
11,
32233,
11,
309,
29291,
11,
5994,
11,
5994,
19852,
11,
4479,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
22164,
1063,
1600,
198,
220,
220,
220,
366,
4834... | 2.629119 | 1,305 |
import numpy as np
from ctgan import CTGANSynthesizer
from scipy.spatial.distance import cdist
from sklearn.linear_model import Ridge
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from faster_lime.utils import dict_disc_to_bin
if __name__ == '__main__':
from experiments.utils.dataset_utils import get_and_preprocess_compas_data
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
data = get_and_preprocess_compas_data()
X, y, _ = data['data'], data['target'], data['cols']
X['unrelated_column'] = np.random.choice([0, 1], size=X.shape[0])
features = list(X.columns)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
print(clf.score(X_test, y_test))
categorical_feature_name = ['two_year_recid', 'c_charge_degree_F', 'c_charge_degree_M',
'sex_Female', 'sex_Male', 'race', 'unrelated_column']
categorical_feature_indcs = [features.index(c) for c in categorical_feature_name]
explainer = NumpyRobustTabularExplainer(
training_data=X_train.values,
ctgan_sampler=None,
feature_names=features,
categorical_feature_idxes=categorical_feature_indcs,
ctgan_epochs=2
)
exp = explainer.explain_instance(
data_row=X_test.values[0],
predict_fn=clf.predict_proba,
num_samples=100
)
print(exp)
| [
11748,
299,
32152,
355,
45941,
198,
6738,
269,
83,
1030,
1330,
16356,
45028,
13940,
429,
956,
7509,
198,
6738,
629,
541,
88,
13,
2777,
34961,
13,
30246,
1330,
269,
17080,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
20614,
198,
... | 2.406699 | 627 |
#!/usr/bin/env python
NUM_ROUNDS = {
# (block_size, key_size): num_rounds
(32, 64): 32,
(48, 96): 36,
(64, 128): 44,
}
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
628,
198,
41359,
62,
49,
19385,
5258,
796,
1391,
198,
220,
220,
220,
1303,
357,
9967,
62,
7857,
11,
1994,
62,
7857,
2599,
997,
62,
744,
82,
198,
220,
220,
220,
357,
2624,
11,
5598,
2599... | 1.989011 | 91 |
import mysql.connector
from decouple import config
TABLE = 'serversettings'
| [
11748,
48761,
13,
8443,
273,
198,
6738,
875,
43846,
1330,
4566,
628,
198,
38148,
796,
705,
2655,
690,
12374,
6,
628
] | 3.761905 | 21 |
from .unet import *
from .resnet import *
from .networks import * | [
6738,
764,
403,
316,
1330,
1635,
198,
6738,
764,
411,
3262,
1330,
1635,
198,
6738,
764,
3262,
5225,
1330,
1635
] | 3.25 | 20 |
import math
try:
testcase=int(input())
except:
print("Please enter a number")
testcase = 0
while testcase!=0:
gcd=0
try:
num=int(input())
except:
print("please enter a number")
num = 0
while gcd==0:
suma=0
for i in str(num):
suma=suma+int(i)
gc=math.gcd(num,suma)
if gc<=1:
num=num+1
elif gc>1:
print(num)
gcd=1
testcase=testcase-1
| [
11748,
10688,
628,
198,
28311,
25,
198,
220,
220,
1332,
7442,
28,
600,
7,
15414,
28955,
198,
16341,
25,
198,
220,
220,
3601,
7203,
5492,
3802,
257,
1271,
4943,
198,
220,
220,
1332,
7442,
796,
657,
198,
4514,
1332,
7442,
0,
28,
15,
... | 1.674497 | 298 |
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
requires = ['']
setup(name='Savoir',
version='1.0.5',
description='A python wrapper for Multichain Json-RPC API ',
long_description=read('README.mkdn'),
license="BSD",
author='Federico Cardoso',
author_email='federico.cardoso@dxmarkets.com',
url='https://github.com/DXMarkets/Savoir',
keywords='multichain python blockchain jsonrpc',
packages=find_packages(),
install_requires=[
'requests',
],
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
2,
34030,
2163,
284,
1100,
262,
20832,
11682,
2393,
13,
198,
2,
16718,
329,
262,
890,
62,
11213,
... | 2.85283 | 265 |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 14 15:50:17 2015
@author: closedloop
"""
from setuptools import setup, find_packages
from setuptools import setup
import datetime
cur_year = datetime.datetime.today().year
setup(name='financial_planner',
version=open('VERSION').read(),
description='Financial Planning Calculations by Astrocyte Research',
url='https://github.com/AstrocyteResearch/financial-planner',
author='Sean Kruzel - Astrocyte Research',
author_email='support@astrocyte.io',
license='Astrocyte Research - Copyright 2015-{} - All rights reserved'.format(cur_year),
# packages=[
# 'financial_planner'
# ],
packages=find_packages(),
install_requires=['pyliferisk'],
include_package_data=True,
long_description=open('README.md').read(),
zip_safe=False)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
3300,
2556,
1478,
1315,
25,
1120,
25,
1558,
1853,
201,
198,
201,
198,
31,
9800,
25,
4838,
26268,
201,
198,
37811,
201,
198,
6738,
900... | 2.59292 | 339 |
#!/usr/bin/env python3
#
# Author:
# Tamas Jos (@skelsec)
#
from pypykatz.alsadecryptor.win_datatypes import ULONG, PVOID, POINTER
from pypykatz.commons.common import KatzSystemArchitecture, WindowsMinBuild, WindowsBuild
from pypykatz.alsadecryptor.package_commons import PackageTemplate
templates = {
'nt6' : {
'x64' : {
'1' : LSA_x64_1(),
'2' : LSA_x64_2(),
'3' : LSA_x64_3(),
'4' : LSA_x64_4(),
'5' : LSA_x64_5(),
'6' : LSA_x64_6(),
},
'x86': {
'1' : LSA_x86_1(),
'2' : LSA_x86_2(),
'3' : LSA_x86_3(),
'4' : LSA_x86_4(),
'5' : LSA_x86_5(),
'6' : LSA_x86_6(),
}
}
} | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
198,
2,
6434,
25,
198,
2,
220,
11552,
292,
22568,
4275,
82,
7750,
2363,
8,
198,
2,
628,
198,
6738,
279,
4464,
48361,
27906,
13,
874,
671,
29609,
273,
13,
5404,
62,
19608,
... | 1.830409 | 342 |
from collections import deque
from aocd import data
from intcode import Computer
droid = Droid(data)
droid.execute()
| [
6738,
17268,
1330,
390,
4188,
198,
198,
6738,
257,
420,
67,
1330,
1366,
198,
198,
6738,
493,
8189,
1330,
13851,
628,
198,
198,
67,
3882,
796,
46546,
7,
7890,
8,
198,
67,
3882,
13,
41049,
3419,
198
] | 3.297297 | 37 |
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class SiteIdentity(object):
"""Implementation of the 'SiteIdentity' model.
O365 Sharepoint online Site Identity. These may be obtained by Graph/REST
or PnP cmdlets. All fields are case insensitive.
Attributes:
id (string): Unique guid for the site in SPO. This is a unqiue
identifier that can be used to compare sites.
server_relativeurl (string): Optional ServerRelativeUrl. Not required.
title (string): Optional Title of site for display and logging
purpose. Not mandatory.
url (string): Full Url of the site. Its of the form
https://yourtenant.sharepoint.com/sites/yoursite or
https://yourtenant.sharepoint.com/yoursite
This parameter is required for all PnP operations.
webid (string): Unique guid for the site root web.
"""
# Create a mapping from Model property names to API property names
_names = {
"id":'id',
"server_relativeurl":'serverRelativeurl',
"title":'title',
"url":'url',
"webid":'webid'
}
def __init__(self,
id=None,
server_relativeurl=None,
title=None,
url=None,
webid=None):
"""Constructor for the SiteIdentity class"""
# Initialize members of the class
self.id = id
self.server_relativeurl = server_relativeurl
self.title = title
self.url = url
self.webid = webid
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get('id')
server_relativeurl = dictionary.get('serverRelativeurl')
title = dictionary.get('title')
webid = dictionary.get('webid')
url = dictionary.get('url')
# Return an object of this model
return cls(id,
server_relativeurl,
title,
url,
webid)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
33448,
1766,
956,
414,
3457,
13,
198,
198,
4871,
14413,
7390,
26858,
7,
15252,
2599,
628,
220,
220,
220,
37227,
3546,
32851,
286,
262,
705,
29123,
7390,
26858,... | 2.321168 | 1,096 |
import typing
import attr
from dbnd._core.constants import MetricSource
from dbnd._core.tracking.log_data_request import LogDataRequest
from dbnd._core.tracking.schemas.metrics import Metric
from dbnd._core.utils.timezone import utcnow
if typing.TYPE_CHECKING:
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
# keep it below VALUE_PREVIEW_MAX_LEN at web
_DEFAULT_VALUE_PREVIEW_MAX_LEN = 10000
@attr.s(slots=True)
@attr.s
| [
11748,
19720,
198,
198,
11748,
708,
81,
198,
198,
6738,
20613,
358,
13557,
7295,
13,
9979,
1187,
1330,
3395,
1173,
7416,
198,
6738,
20613,
358,
13557,
7295,
13,
36280,
13,
6404,
62,
7890,
62,
25927,
1330,
5972,
6601,
18453,
198,
6738,
... | 2.904459 | 157 |
"""Home routing
/login/ and /logout/ routes are automatically added by flask-CAS extension
"""
# Third party imports
from flask import Blueprint
from flask import render_template
bp = Blueprint('main', __name__)
@bp.route('/')
@bp.route('/about')
@bp.route('/help')
| [
37811,
16060,
28166,
198,
14,
38235,
14,
290,
1220,
6404,
448,
14,
11926,
389,
6338,
2087,
416,
42903,
12,
34,
1921,
7552,
198,
37811,
198,
198,
2,
10467,
2151,
17944,
198,
6738,
42903,
1330,
39932,
198,
6738,
42903,
1330,
8543,
62,
2... | 3.261905 | 84 |
from sklearn.neighbors import KNeighborsClassifier
class KNN(object):
"""This class implements the K Nearest Neighborhood Algorithm.
"""
def fit(self, chips):
"""Primary execution point where a trained model set is formed from chip
parameters and their gnd values.
Parameters
----------
chip_list: list
Contains a stored array of Chip objects
"""
X = [chip.LCT.values() for chip in chips]
y = [chip.gnd for chip in chips]
self.knnmodel.fit(X, y)
def predict(self, chip):
"""Here each chip is tested on the basis of the trained model and
predicts the outcome of the chip using KNN implementation predict
method.
Parameters:
----------
chip: chip model object
Contains a chip's test parameters
"""
return self.knnmodel.predict(chip.LCT.values())
| [
6738,
1341,
35720,
13,
710,
394,
32289,
1330,
509,
46445,
32289,
9487,
7483,
198,
198,
4871,
509,
6144,
7,
15252,
2599,
198,
220,
220,
220,
37227,
1212,
1398,
23986,
262,
509,
3169,
12423,
37914,
978,
42289,
13,
198,
220,
220,
220,
37... | 2.286364 | 440 |
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from apps.authorities.models import AccreditationAuthority, CertificationAuthority
from apps.user_profile.models import Certifier, Recipient
from apps.api.helpers import CustomAPIException
from apps.api.helpers.permission import (
IsAccreditationAuthority,
IsCertificationAuthority
)
from apps.api.serializers import (
CertificationAuthorityTokenSerializer,
CertifierTokenSerializer,
RecipientTokenSerializer
)
| [
6738,
1334,
62,
30604,
13,
18439,
30001,
13,
33571,
1330,
1835,
3153,
30515,
30642,
198,
6738,
1334,
62,
30604,
13,
18439,
30001,
13,
27530,
1330,
29130,
198,
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
6738,
6725,
13,
9800,
871... | 3.727848 | 158 |
from accent_analyser.InputWord import InputWord
from accent_analyser.rules.EngRule import EngRule
from accent_analyser.rules.IpaRule import IpaRule
from accent_analyser.rules.SentenceRule import SentenceRule
| [
6738,
18702,
62,
272,
26266,
263,
13,
20560,
26449,
1330,
23412,
26449,
198,
6738,
18702,
62,
272,
26266,
263,
13,
38785,
13,
7936,
31929,
1330,
1985,
31929,
198,
6738,
18702,
62,
272,
26266,
263,
13,
38785,
13,
40,
8957,
31929,
1330,
... | 3.333333 | 63 |
from flask import render_template,request,redirect,url_for
from . import main
from ..request import get_sources, get_articles, search_source
from ..models import Source, Article
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
#Getting news Sources
general_sources = get_sources('general')
entertainment_sources = get_sources('entertainment')
business_sources = get_sources('business')
sports_sources = get_sources('sports')
technology_sources = get_sources('technology')
health_sources = get_sources('health')
science_sources = get_sources('science')
title = 'Get the latest news!'
search_source = request.args.get('source_query')
if search_source:
return redirect(url_for('.search',source_name=search_source))
else:
return render_template('index.html', title = title, general = general_sources,entertainment = entertainment_sources, business = business_sources, sports = sports_sources, technology = technology_sources, health = health_sources, science = science_sources)
@main.route('/articles/<id>')
def articles(id):
'''
View Source page function that returns news source details and its data
'''
source = get_articles(id)
return render_template('articles.html',id = id, source = source)
@main.route('/search/<source_name>')
def search(source_name):
'''
View function to display the search results
'''
source_name_list = source_name.split(" ")
source_name_format = "+".join(source_name_list)
searched_sources = search_source(source_name_format)
title = f'search results for {source_name}'
return render_template('search.html',sources = searched_sources) | [
6738,
42903,
1330,
8543,
62,
28243,
11,
25927,
11,
445,
1060,
11,
6371,
62,
1640,
198,
6738,
764,
1330,
1388,
198,
6738,
11485,
25927,
1330,
651,
62,
82,
2203,
11,
651,
62,
26845,
11,
2989,
62,
10459,
198,
6738,
11485,
27530,
1330,
... | 3.031088 | 579 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pickle
d = dict(name='bob', age=20, score=80)
print(pickle.dumps(d))
f = open('dump.txt','wb')
pickle.dump(d,f)
f.close()
f = open('dump.txt', 'rb')
d = pickle.load(f)
f.close()
print(d) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
2298,
293,
198,
67,
796,
8633,
7,
3672,
11639,
65,
672,
3256,
2479,
28,
1238,
11,
4776,
28,
1795,
8... | 2.076923 | 117 |
# TODO
'''
class LiveGuildMembersAudited(LiveGuildMembers):
def __init__(self):
self.audit_kick_stack = AuditStackKicks(self.client_bind, self.guild_id)
self.audit_ban_add_stack = AuditStackBanAdd(self.client_bind, self.guild_id)
self.invite_stack = InviteStack(self.client_bind, self.guild_id)
self.queue_dispenser = QueueDispenser(('ADD', 'UPDATE', 'SELF-LEAVE', 'BAN', 'KICK'))
async def on_guild_member_self_leave(self) -> typing.AsyncGenerator[GuildMember, None]:
queue = asyncio.Queue()
self.queue_dispenser.queue_add_single_slot(queue, 'SELF-LEAVE')
while True:
yield (await queue.get())[0]
async def on_guild_member_update(self) -> typing.AsyncGenerator[GuildMember, None]:
queue = asyncio.Queue()
self.queue_dispenser.queue_add_single_slot(queue, 'UPDATE')
while True:
yield (await queue.get())[0]
async def on_guild_member_kicked(self) -> typing.AsyncGenerator[typing.Tuple[GuildMember, AuditKick], None]:
queue = asyncio.Queue()
self.queue_dispenser.queue_add_single_slot(queue, 'KICK')
while True:
yield (await queue.get())[0]
async def on_guild_member_banned(self) -> typing.AsyncGenerator[typing.Tuple[GuildMember, AuditBanAdd], None]:
queue = asyncio.Queue()
self.queue_dispenser.queue_add_single_slot(queue, 'BAN')
while True:
yield (await queue.get())[0]
''' | [
2,
16926,
46,
198,
198,
7061,
6,
198,
4871,
7547,
38,
3547,
25341,
16353,
863,
7,
18947,
38,
3547,
25341,
2599,
628,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
2599,
198,
220,
220,
220,
220,
220,
220,
220,
2116,
13,
3885,
270,... | 2.284389 | 647 |
from time import sleep
from wsps import WSPSClient
if __name__ == "__main__":
subscribe_key = "subscribe-key"
publish_key = "publish-key"
wsps = WSPSClient("ws://127.0.0.1:52525", on_close)
wsps.connect()
wsps.subscribe("some-channel", on_msg, subscribe_key)
sleep(0.25)
wsps.publish("some-channel", {"msg": "Hello, WSPS!"}, publish_key)
sleep(0.25)
wsps.disconnect()
| [
6738,
640,
1330,
3993,
198,
6738,
266,
82,
862,
1330,
25290,
3705,
11792,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
12383,
62,
2539,
796,
366,
7266,
12522,
12,
2539,
1,
198,
220,
220,
220... | 2.306818 | 176 |
from typing import Optional, List
"""
You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order, and each of their nodes contains a single digit. Add the two numbers and return the sum as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
Example 1:
Input: l1 = [2,4,3], l2 = [5,6,4]
Output: [7,0,8]
Explanation: 342 + 465 = 807.
Example 2:
Input: l1 = [0], l2 = [0]
Output: [0]
Example 3:
Input: l1 = [9,9,9,9,9,9,9], l2 = [9,9,9,9]
Output: [8,9,9,9,0,0,0,1]
"""
"""
Given the head of a linked list, remove the nth node from the end of the list and return its head.
Example 1:
Input: head = [1,2,3,4,5], n = 2
Output: [1,2,3,5]
Example 2:
Input: head = [1], n = 1
Output: []
Example 3:
Input: head = [1,2], n = 1
Output: [1]
"""
"""
You are given the heads of two sorted linked lists list1 and list2.
Merge the two lists in a one sorted list. The list should be made by splicing together the nodes of the first two lists.
Return the head of the merged linked list.
Example 1:
Input: list1 = [1,2,4], list2 = [1,3,4]
Output: [1,1,2,3,4,4]
Example 2:
Input: list1 = [], list2 = []
Output: []
Example 3:
Input: list1 = [], list2 = [0]
Output: [0]
"""
"""
Given the head of a sorted linked list, delete all duplicates such that each element appears only once. Return the linked list sorted as well.
Example 1:
Input: head = [1,1,2]
Output: [1,2]
Example 2:
Input: head = [1,1,2,3,3]
Output: [1,2,3]
"""
"""
Given head, the head of a linked list, determine if the linked list has a cycle in it.
There is a cycle in a linked list if there is some node in the list that can be reached again by continuously following the next pointer. Internally, pos is used to denote the index of the node that tail's next pointer is connected to. Note that pos is not passed as a parameter.
Return true if there is a cycle in the linked list. Otherwise, return false.
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: true
Explanation: There is a cycle in the linked list, where the tail connects to the 1st node (0-indexed).
Example 2:
Input: head = [1,2], pos = 0
Output: true
Explanation: There is a cycle in the linked list, where the tail connects to the 0th node.
Example 3:
Input: head = [1], pos = -1
Output: false
Explanation: There is no cycle in the linked list.
"""
"""
Design a data structure that follows the constraints of a Least Recently Used (LRU) cache.
Implement the LRUCache class:
LRUCache(int capacity) Initialize the LRU cache with positive size capacity.
int get(int key) Return the value of the key if the key exists, otherwise return -1.
void put(int key, int value) Update the value of the key if the key exists. Otherwise, add the key-value pair to the cache. If the number of keys exceeds the capacity from this operation, evict the least recently used key.
The functions get and put must each run in O(1) average time complexity.
Example 1:
Input
["LRUCache", "put", "put", "get", "put", "get", "put", "get", "get", "get"]
[[2], [1, 1], [2, 2], [1], [3, 3], [2], [4, 4], [1], [3], [4]]
Output
[null, null, null, 1, null, -1, null, -1, 3, 4]
Explanation
LRUCache lRUCache = new LRUCache(2);
lRUCache.put(1, 1); // cache is {1=1}
lRUCache.put(2, 2); // cache is {1=1, 2=2}
lRUCache.get(1); // return 1
lRUCache.put(3, 3); // LRU key was 2, evicts key 2, cache is {1=1, 3=3}
lRUCache.get(2); // returns -1 (not found)
lRUCache.put(4, 4); // LRU key was 1, evicts key 1, cache is {4=4, 3=3}
lRUCache.get(1); // return -1 (not found)
lRUCache.get(3); // return 3
lRUCache.get(4); // return 4
"""
"""
Given the heads of two singly linked-lists headA and headB, return the node at which the two lists intersect. If the two linked lists have no intersection at all, return null.
For example, the following two linked lists begin to intersect at node c1:
The test cases are generated such that there are no cycles anywhere in the entire linked structure.
Note that the linked lists must retain their original structure after the function returns.
Custom Judge:
The inputs to the judge are given as follows (your program is not given these inputs):
intersectVal - The value of the node where the intersection occurs. This is 0 if there is no intersected node.
listA - The first linked list.
listB - The second linked list.
skipA - The number of nodes to skip ahead in listA (starting from the head) to get to the intersected node.
skipB - The number of nodes to skip ahead in listB (starting from the head) to get to the intersected node.
The judge will then create the linked structure based on these inputs and pass the two heads, headA and headB to your program. If you correctly return the intersected node, then your solution will be accepted.
Example 1:
Input: intersectVal = 8, listA = [4,1,8,4,5], listB = [5,6,1,8,4,5], skipA = 2, skipB = 3
Output: Intersected at '8'
Explanation: The intersected node's value is 8 (note that this must not be 0 if the two lists intersect).
From the head of A, it reads as [4,1,8,4,5]. From the head of B, it reads as [5,6,1,8,4,5]. There are 2 nodes before the intersected node in A; There are 3 nodes before the intersected node in B.
Example 2:
Input: intersectVal = 2, listA = [1,9,1,2,4], listB = [3,2,4], skipA = 3, skipB = 1
Output: Intersected at '2'
Explanation: The intersected node's value is 2 (note that this must not be 0 if the two lists intersect).
From the head of A, it reads as [1,9,1,2,4]. From the head of B, it reads as [3,2,4]. There are 3 nodes before the intersected node in A; There are 1 node before the intersected node in B.
Example 3:
Input: intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2
Output: No intersection
Explanation: From the head of A, it reads as [2,6,4]. From the head of B, it reads as [1,5]. Since the two lists do not intersect, intersectVal must be 0, while skipA and skipB can be arbitrary values.
Explanation: The two lists do not intersect, so return null.
"""
"""
Given the head of a linked list and an integer val, remove all the nodes of the linked list that has Node.val == val, and return the new head.
Example 1:
Input: head = [1,2,6,3,4,5,6], val = 6
Output: [1,2,3,4,5]
Example 2:
Input: head = [], val = 1
Output: []
Example 3:
Input: head = [7,7,7,7], val = 7
Output: []
"""
"""
Given the head of a singly linked list, reverse the list, and return the reversed list.
Example 1:
Input: head = [1,2,3,4,5]
Output: [5,4,3,2,1]
Example 2:
Input: head = [1,2]
Output: [2,1]
Example 3:
Input: head = []
Output: []
"""
"""
Write a function to delete a node in a singly-linked list. You will not be given access to the head of the list, instead you will be given access to the node to be deleted directly.
It is guaranteed that the node to be deleted is not a tail node in the list.
Example 1:
Input: head = [4,5,1,9], node = 5
Output: [4,1,9]
Explanation: You are given the second node with value 5, the linked list should become 4 -> 1 -> 9 after calling your function.
Example 2:
Input: head = [4,5,1,9], node = 1
Output: [4,5,9]
Explanation: You are given the third node with value 1, the linked list should become 4 -> 5 -> 9 after calling your function.
Example 3:
Input: head = [1,2,3,4], node = 3
Output: [1,2,4]
Example 4:
Input: head = [0,1], node = 0
Output: [1]
Example 5:
Input: head = [-3,5,-99], node = -3
Output: [5,-99]
"""
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
pass
"""
Given the head of a singly linked list, group all the nodes with odd indices together followed by the nodes with even indices, and return the reordered list.
The first node is considered odd, and the second node is even, and so on.
Note that the relative order inside both the even and odd groups should remain as it was in the input.
You must solve the problem in O(1) extra space complexity and O(n) time complexity.
Example 1:
Input: head = [1,2,3,4,5]
Output: [1,3,5,2,4]
Example 2:
Input: head = [2,1,3,5,6,4,7]
Output: [2,3,6,7,1,5,4]
"""
"""
You are given the head of a linked list containing unique integer values and an integer array nums that is a subset of the linked list values.
Return the number of connected components in nums where two values are connected if they appear consecutively in the linked list.
Example 1:
Input: head = [0,1,2,3], nums = [0,1,3]
Output: 2
Explanation: 0 and 1 are connected, so [0, 1] and [3] are the two connected components.
Example 2:
Input: head = [0,1,2,3,4], nums = [0,3,1,4]
Output: 2
Explanation: 0 and 1 are connected, 3 and 4 are connected, so [0, 1] and [3, 4] are the two connected components.
"""
"""
Design your implementation of the circular double-ended queue (deque).
Implement the MyCircularDeque class:
MyCircularDeque(int k) Initializes the deque with a maximum size of k.
boolean insertFront() Adds an item at the front of Deque. Returns true if the operation is successful, or false otherwise.
boolean insertLast() Adds an item at the rear of Deque. Returns true if the operation is successful, or false otherwise.
boolean deleteFront() Deletes an item from the front of Deque. Returns true if the operation is successful, or false otherwise.
boolean deleteLast() Deletes an item from the rear of Deque. Returns true if the operation is successful, or false otherwise.
int getFront() Returns the front item from the Deque. Returns -1 if the deque is empty.
int getRear() Returns the last item from Deque. Returns -1 if the deque is empty.
boolean isEmpty() Returns true if the deque is empty, or false otherwise.
boolean isFull() Returns true if the deque is full, or false otherwise.
Example 1:
Input
["MyCircularDeque", "insertLast", "insertLast", "insertFront", "insertFront", "getRear", "isFull", "deleteLast", "insertFront", "getFront"]
[[3], [1], [2], [3], [4], [], [], [], [4], []]
Output
[null, true, true, true, false, 2, true, true, true, 4]
Explanation
MyCircularDeque myCircularDeque = new MyCircularDeque(3);
myCircularDeque.insertLast(1); // return True
myCircularDeque.insertLast(2); // return True
myCircularDeque.insertFront(3); // return True
myCircularDeque.insertFront(4); // return False, the queue is full.
myCircularDeque.getRear(); // return 2
myCircularDeque.isFull(); // return True
myCircularDeque.deleteLast(); // return True
myCircularDeque.insertFront(4); // return True
myCircularDeque.getFront(); // return 4
"""
# Your MyCircularDeque object will be instantiated and called as such:
# obj = MyCircularDeque(k)
# param_1 = obj.insertFront(value)
# param_2 = obj.insertLast(value)
# param_3 = obj.deleteFront()
# param_4 = obj.deleteLast()
# param_5 = obj.getFront()
# param_6 = obj.getRear()
# param_7 = obj.isEmpty()
# param_8 = obj.isFull()
"""
Given the head of a singly linked list, return the middle node of the linked list.
If there are two middle nodes, return the second middle node.
Example 1:
Input: head = [1,2,3,4,5]
Output: [3,4,5]
Explanation: The middle node of the list is node 3.
Example 2:
Input: head = [1,2,3,4,5,6]
Output: [4,5,6]
Explanation: Since the list has two middle nodes with values 3 and 4, we return the second one.
"""
"""
Given the head of a linked list, reverse the nodes of the list k at a time, and return the modified list.
k is a positive integer and is less than or equal to the length of the linked list. If the number of nodes is not a multiple of k then left-out nodes, in the end, should remain as it is.
You may not alter the values in the list's nodes, only nodes themselves may be changed.
Example 1:
Input: head = [1,2,3,4,5], k = 2
Output: [2,1,4,3,5]
Example 2:
Input: head = [1,2,3,4,5], k = 3
Output: [3,2,1,4,5]
"""
"""
You are given the head of a linked list with n nodes.
For each node in the list, find the value of the next greater node. That is, for each node, find the value of the first node that is next to it and has a strictly larger value than it.
Return an integer array answer where answer[i] is the value of the next greater node of the ith node (1-indexed). If the ith node does not have a next greater node, set answer[i] = 0.
Example 1:
Input: head = [2,1,5]
Output: [5,5,0]
Example 2:
Input: head = [2,7,4,3,5]
Output: [7,0,5,5,0]
"""
| [
198,
198,
6738,
19720,
1330,
32233,
11,
7343,
198,
198,
37811,
198,
1639,
389,
1813,
734,
1729,
12,
28920,
6692,
8341,
10200,
734,
1729,
12,
31591,
37014,
13,
383,
19561,
389,
8574,
287,
9575,
1502,
11,
290,
1123,
286,
511,
13760,
490... | 3.01401 | 4,140 |
from cfoundation import Service
from pydash import _
from os import path
import gdapi
import os
import requests
import json
| [
6738,
269,
42526,
1330,
4809,
198,
6738,
279,
5173,
1077,
1330,
4808,
198,
6738,
28686,
1330,
3108,
198,
11748,
308,
67,
15042,
198,
11748,
28686,
198,
11748,
7007,
198,
11748,
33918,
198
] | 3.875 | 32 |
# Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
import logging
from django.apps import AppConfig
from django.db.models.signals import post_migrate
import warnings
logger = logging.getLogger(__name__)
| [
2,
15069,
357,
66,
8,
2177,
48920,
10501,
198,
2,
2312,
4237,
389,
2716,
739,
262,
2846,
286,
262,
17168,
5964,
25,
766,
38559,
24290,
198,
198,
11748,
18931,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
198,
6738,
4262... | 3.605263 | 76 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 07:51:51 2021
@author: sblair
"""
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank();
size = comm.Get_size();
print 'Hello from process %d of %d!!' % (rank, size);
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
2892,
7653,
1478,
8753,
25,
4349,
25,
4349,
33448,
198,
198,
31,
9800,
25,
264,
2436,
958,
19... | 2.401869 | 107 |
#!/usr/bin/python3
import dh.data
import dh.image
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
34590,
13,
7890,
198,
11748,
34590,
13,
9060,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.358974 | 39 |
#!/bin/python3
import sys
a0, a1, a2 = input().strip().split(' ')
a0, a1, a2 = [int(a0), int(a1), int(a2)]
b0, b1, b2 = input().strip().split(' ')
b0, b1, b2 = [int(b0), int(b1), int(b2)]
result = solve(a0, a1, a2, b0, b1, b2)
print (" ".join(map(str, result)))
| [
2,
48443,
8800,
14,
29412,
18,
198,
198,
11748,
25064,
198,
198,
64,
15,
11,
257,
16,
11,
257,
17,
796,
5128,
22446,
36311,
22446,
35312,
10786,
705,
8,
198,
64,
15,
11,
257,
16,
11,
257,
17,
796,
685,
600,
7,
64,
15,
828,
493... | 1.955882 | 136 |
from parking_permits_app.constants import VAT_PERCENTAGE
from parking_permits_app.pricing.value_added_tax import calculate_price_without_vat
| [
6738,
7647,
62,
16321,
896,
62,
1324,
13,
9979,
1187,
1330,
32313,
62,
18973,
43960,
11879,
198,
6738,
7647,
62,
16321,
896,
62,
1324,
13,
1050,
6345,
13,
8367,
62,
29373,
62,
19290,
1330,
15284,
62,
20888,
62,
19419,
62,
85,
265,
6... | 3.222222 | 45 |
from typing import Any, Dict, Generic, List, Optional, Tuple, Type, Union
from blacksmith.domain.exceptions import (
NoContractException,
UnregisteredRouteException,
WrongRequestTypeException,
)
from blacksmith.domain.model import (
CollectionIterator,
HTTPRequest,
HTTPResponse,
HTTPTimeout,
Request,
Response,
ResponseBox,
)
from blacksmith.domain.model.params import (
AbstractCollectionParser,
TCollectionResponse,
TResponse,
)
from blacksmith.domain.registry import ApiRoutes, HttpCollection, HttpResource
from blacksmith.domain.typing import AsyncMiddleware
from blacksmith.middleware._async.base import AsyncHTTPMiddleware
from blacksmith.typing import ClientName, HTTPMethod, Path, ResourceName, Url
from .base import AsyncAbstractTransport
ClientTimeout = Union[HTTPTimeout, float, Tuple[float, float]]
HTTPAuthentication = AsyncHTTPMiddleware
def build_timeout(timeout: ClientTimeout) -> HTTPTimeout:
"""Build the timeout from the convenient timeout."""
if isinstance(timeout, float):
timeout = HTTPTimeout(timeout)
elif isinstance(timeout, tuple):
timeout = HTTPTimeout(*timeout)
return timeout
class AsyncRouteProxy(Generic[TCollectionResponse, TResponse]):
"""Proxy from resource to its associate routes."""
client_name: ClientName
name: ResourceName
endpoint: Url
routes: ApiRoutes
transport: AsyncAbstractTransport
timeout: HTTPTimeout
collection_parser: Type[AbstractCollectionParser]
middlewares: List[AsyncHTTPMiddleware]
| [
6738,
19720,
1330,
4377,
11,
360,
713,
11,
42044,
11,
7343,
11,
32233,
11,
309,
29291,
11,
5994,
11,
4479,
198,
198,
6738,
2042,
21453,
13,
27830,
13,
1069,
11755,
1330,
357,
198,
220,
220,
220,
1400,
45845,
16922,
11,
198,
220,
220... | 3.15493 | 497 |
import os
import re
import numpy as np
import math
from PIL import Image, ImageDraw, ImageOps, ImageEnhance, ImageFont
import argparse
import gpxpy
import utils
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
class Tiles:
"""Class for tiles"""
def get_tiles(self, path):
"""Read avaliable tiles into a set"""
tiles = set({})
for files in os.walk(path):
if len(files[2]) == 0:
continue
x = re.search(r'/([0-9]+)$', files[0])[1]
for f in files[2]:
y = re.search('[0-9]+', f)[0]
tiles.add((int(x), int(y)))
print("Found %s tiles." % len(tiles))
return tiles
def update(self):
"""Read again tiles from the given path"""
self.tiles = self.get_tiles(os.path.join(self.path, str(self.zoom)))
def update_array(self, pt1, pt2):
"""Update the tile array and extent with new extent from 2 points"""
x = [xy[0] for xy in [pt1, pt2]]
y = [xy[1] for xy in [pt1, pt2]]
self.tile_range = {
'x': [min(x), max(x)],
'y': [min(y), max(y)],
'dx': max(x) - min(x),
'dy': max(y) - min(y)
}
self.tile_array = np.zeros((self.tile_range['dy'], self.tile_range['dx']))
for c in self.tiles:
if c[1] - self.tile_range['y'][0] < 0 or c[0] - self.tile_range['x'][0] < 0:
continue
if c[1] >= self.tile_range['y'][1] or c[0] >= self.tile_range['x'][1]:
continue
self.tile_array[c[1] - self.tile_range['y'][0], c[0] - self.tile_range['x'][0]] = 1
def gen_tile_array(self, tiles):
"""Generate array with avaliable tiles for legend"""
if len(tiles) == 0:
tile_array = np.zeros((1,1))
tile_range = {'x': [0,0], 'y':[0,0], 'dx': 0, 'dy': 0}
return tile_array, tile_range
x = [xy[0] for xy in tiles]
y = [xy[1] for xy in tiles]
tile_range = {
'x': [min(x), max(x)],
'y': [min(y), max(y)],
'dx': max(x) - min(x),
'dy': max(y) - min(y)
}
print("X: %s - %s" % (tile_range['x'][0], tile_range['x'][1]))
print("Y: %s - %s" % (tile_range['y'][0], tile_range['y'][1]))
tile_array = np.zeros((tile_range['y'][1] - tile_range['y'][0] + 1, tile_range['x'][1] - tile_range['x'][0] + 1))
for c in tiles:
tile_array[c[1] - tile_range['y'][0], c[0] - tile_range['x'][0]] = 1
return tile_array, tile_range
def import_gpx(self, gpx):
"""Import GPX track"""
def add_middle_points():
"""Add middle points for sections skipping tiles"""
dx = x - lon[-1]
dy = y - lat[-1]
if abs(dx) >= 1 or abs(dy) >= 1:
nx = int(abs(dx))
ny = int(abs(dy))
n = max(nx, ny)
for _ in range(n):
lon.append(lon[-1] + dx / (n+1))
lat.append(lat[-1] + dy / (n+1))
lat = []
lon = []
gpx_track = gpxpy.parse(open(gpx, 'r'))
for track in gpx_track.tracks:
for segment in track.segments:
for point in segment.points:
x, y = utils.deg2num(point.latitude, point.longitude, zoom=self.zoom)
# Add middle points in case abs diff >= 1
if len(lat) > 1:
add_middle_points()
lon.append(x)
lat.append(y)
return (lon, lat)
def generate_maps(self, rectangles, path, track=None, prefix='stitch', water=[None, None, None], gray=False):
"""Generate maps from a list of Boxes."""
# Import track
lat = []
lon = []
if track is not None:
(lon, lat) = track
# save Image for each Box
for idx, r in enumerate(rectangles):
img_file = os.path.join(path, '{}_{}.png'.format(prefix, idx))
print("Saving %s/%s: %s" % (idx+1, len(rectangles), img_file))
# Create canvas
img = r.stitch_array(self)
# Add page number
font = ImageFont.truetype("DejaVuSans.ttf", 48)
ImageDraw.Draw(img).text((10, 10), str(idx), (0, 0, 0), font=font)
# Add scale
r.add_scale(img, self)
# Add track
if track is not None:
cut_track = r.cut_path(lon, lat)
if len(cut_track) > 1:
print("Track length: %s segments" % len(cut_track))
draw = ImageDraw.Draw(img)
draw.line(cut_track, fill=(0, 0, 0), width=2)
del draw
# Remove water; color as white
if water[0] is not None:
# Replace water with white colour
data = np.array(img) # "data" is a height x width x 3 numpy array
red, green, blue = data.T # Temporarily unpack the bands for readability
# Replace colors
water_px = (red == water[0]) & (green == water[1]) & (blue == water[2])
data[:, :][water_px.T] = (255, 255, 255) # Transpose back needed
img = Image.fromarray(data)
# Enhance contrast and convert to grayscale
if gray:
enhancer = ImageEnhance.Contrast(img)
img = enhancer.enhance(1.25).convert('L')
# Save as PNG
img.save(img_file, 'PNG')
class Box:
""" Define boundary Box."""
def add_plot(self, ax, tiles):
"""Add Rectangle to legend plot."""
rectangle = Rectangle(
(self.x - tiles.tile_range['x'][0], self.y - tiles.tile_range['y'][0]),
self.dx,
self.dy,
linewidth=1,
edgecolor='r',
facecolor='none'
)
# Add the patch to the Axes
ax.add_patch(rectangle)
def stitch_array(self, tiles):
"""Generate image from Box and tiles."""
px_size = tiles.tile_px
x_size = px_size[0] * math.ceil(self.dx)
y_size = px_size[1] * math.ceil(self.dy)
# Create an empty image
result = Image.new('RGB', (x_size, y_size), color=(255, 255, 255))
# Create tiles for box
range_x = range(int(self.x), int(self.x1))
range_y = range(int(self.y), int(self.y1))
box_tiles = set([(x, y) for x in range_x for y in range_y])
missing_tiles = box_tiles - tiles.tiles
if len(missing_tiles) > 0:
print("Missing tiles: ", len(missing_tiles))
if tiles.downloader is not None:
print("Downloading missing tiles...")
tiles.downloader.download_tiles(missing_tiles, tiles.zoom)
tiles.update()
missing_tiles = box_tiles - tiles.tiles
# Read tiles if present and paste into canvas
for tile in box_tiles:
if tile in missing_tiles:
continue
tile_file = os.path.join(tiles.path, str(tiles.zoom), str(tile[0]), str(tile[1]) + '.png')
tile_image = Image.open(tile_file)
result.paste(im=tile_image, box=((tile[0]-self.x) * px_size[0], (tile[1]-self.y) * px_size[1]))
# FUTURE: crop if box coords are float
crop = False
for dim in [self.x, self.y, self.dx, self.dy]:
crop |= (int(dim) - dim) != 0
if crop:
x0 = int((self.x - int(self.x)) * px_size[0])
y0 = int((self.y - int(self.y)) * px_size[1])
x1 = int((self.x1 - int(self.x1)) * px_size[0])
y1 = int((self.y1 - int(self.y1)) * px_size[1])
result = result.crop((x0, y0, x1, y1))
return result
def _is_in_range(self, pt, pt1, pt2):
"""Check if a point is within a boundary box defined by two points."""
max_x = max(pt1[0], pt2[0])
min_x = min(pt1[0], pt2[0])
max_y = max(pt1[1], pt2[1])
min_y = min(pt1[1], pt2[1])
within_x = (pt[0] >= min_x) and (pt[0] <= max_x)
within_y = (pt[1] >= min_y) and (pt[1] <= max_y)
return within_x and within_y
def cut_path(self, lon, lat, size='256x256'):
""" Cut path fragment using the rectangle."""
def add_intersection():
""" Add intersection with the Box border for begining or end."""
# Generate line equations Ax + By = C from two points and edges
line = utils.line(prev_pt, pt)
top = {'a': 0, 'b': 1, 'c': self.y1}
bottom = {'a': 0, 'b': 1, 'c': self.y}
left = {'a': 1, 'b': 0, 'c': self.x}
right = {'a': 1, 'b': 0, 'c': self.x1}
# Check intersection of segment with edges
pt_begin = None
for edge in [top, bottom, left, right]:
intersection = utils.line_intersection(line, edge)
if intersection is None:
continue
if self._is_in_range(intersection, pt, prev_pt):
pt_begin = intersection
if pt_begin is not None:
result.append(((pt_begin[0] - self.x) * px_size[0], (pt_begin[1] - self.y) * px_size[1]))
px_size = [int(px) for px in size.split('x')]
result = []
path_len = len(lon)
prev_within = False
for pt_idx, pt in enumerate(zip(lon, lat)):
within_box = pt[0] >= self.x and pt[0] <= self.x1 and pt[1] >= self.y and pt[1] <= self.y1
if within_box:
# Add previous point if idx != 0
if len(result) == 0 and pt_idx > 0:
prev_pt = (lon[pt_idx - 1], lat[pt_idx - 1])
add_intersection()
# Add current point
result.append(((pt[0] - self.x) * px_size[0], (pt[1] - self.y) * px_size[1]))
# Add new point if previous was within and current is not and is not the last point
elif prev_within and pt_idx < path_len:
prev_pt = (lon[pt_idx - 1], lat[pt_idx - 1])
add_intersection()
prev_within = within_box
return result
def add_scale(self, img, tiles, pos=(0.03, 0.97)):
""" Add distance scale to image. """
px_size = tiles.tile_px
x = pos[0] * self.dx * px_size[0]
y = pos[1] * self.dy * px_size[1]
# Get lat for px y coordinate
# From: https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Tile_numbers_to_lon..2Flat._2
n = 2.0 ** tiles.zoom
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * (tiles.tile_range['y'][0] + self.y + pos[1] * self.dy) / n)))
# Get m / px
# https://wiki.openstreetmap.org/wiki/Zoom_levels
C = 2 * math.pi * 6378137.0
m_per_px = C * math.cos(lat_rad) / 2 ** (tiles.zoom + math.log2(px_size[0]))
km1 = 1000 / m_per_px
km5 = 5000 / m_per_px
km10 = 10000 / m_per_px
# Draw lines
draw = ImageDraw.Draw(img)
# Horizontal 10 km line
draw.line((x, y) + (x + km10, y), fill=(0, 0, 0), width=2)
# Add ticks
draw.line((x + km5, y) + (x + km5, y-5), fill=(0, 0, 0), width=2)
draw.line((x + km1, y-5) + (x + km1, y+5), fill=(0, 0, 0), width=2)
draw.line((x, y-7) + (x, y+7), fill=(0, 0, 0), width=2)
# Add annotations
font = ImageFont.truetype("DejaVuSans.ttf", 12)
draw.text(((x, y + 10) ), "0 km", (0, 0, 0), font=font)
draw.text(((x + km1, y + 10) ), "1 km", (0, 0, 0), font=font)
draw.text(((x + km5, y + 10) ), "5 km", (0, 0, 0), font=font)
draw.text(((x + km10, y + 10) ), "10 km", (0, 0, 0), font=font)
del draw
def gen_boxes(tiles, dx=10, dy=10, minpx=0):
""" Generate boxes by dividing the canvas. """
box_list = []
(size_y, size_x) = tiles.tile_array.shape
for n_x in range(int(np.ceil(size_x/dx))):
for n_y in range(int(np.ceil(size_y/dy))):
d_x = min((n_x+1)*dx, size_x) - n_x * dx
d_y = min((n_y+1)*dy, size_y) - n_y * dy
box = Box(tiles.tile_range['x'][0]+ n_x*dx, tiles.tile_range['y'][0] + n_y*dy, d_x, d_y)
subarr = tiles.tile_array[
box.y - tiles.tile_range['y'][0]:box.y1 - tiles.tile_range['y'][0],
box.x - tiles.tile_range['x'][0]:box.x1 - tiles.tile_range['x'][0]
]
if subarr.sum((0, 1)) > minpx:
box_list.append(box)
return box_list
def gen_boxes_from_track(track, dx=10, dy=10, border=1):
""" Generate boxes from track."""
# Range calculating class
# Initialize Range with empty range
Range.reset()
number_points = len(track[0])
boxes = []
prev_P = False
prev_extent = {}
# Loop over each point of track
for idx, point in enumerate(zip(track[0], track[1])):
extent = Range.get_extent(point)
# TODO: Allow for float coords in boxes
P = (extent['dx'] <= (dx - border)) and (extent['dy'] <= (dy - border))
L = (extent['dx'] <= (dy - border)) and (extent['dy'] <= (dx - border))
# If neither Portrait nor Landscape view fits, create end the rectangle
# from the previous fitting view
if len(prev_extent) > 0 and (((P or L) == False) or idx == number_points - 1):
if prev_P:
rot_dx = dx
rot_dy = dy
x = prev_extent['x'][0] - (rot_dx - prev_extent['dx']) / 2
y = prev_extent['y'][0]
else:
rot_dx = dy
rot_dy = dx
x = prev_extent['x'][0]
y = prev_extent['y'][0] - (rot_dy - prev_extent['dy']) / 2
boxes.append(Box(int(x), int(y), rot_dx, rot_dy))
prev_P = False
prev_extent = {}
Range.reset()
continue
prev_extent = extent
prev_P = P
return boxes
if __name__ == "__main__":
# Parse command line
parser = argparse.ArgumentParser(description='Create printable maps from downloaded OSM tiles.')
parser.add_argument("tile_path", help=r"Directory with OSM PNG tiles: /{zoom}/{x}/{y}.png")
parser.add_argument(
'-z',
action="store",
dest="zoom",
type=int,
default=13,
help="OSM zoom level"
)
parser.add_argument(
'--gpx',
action="store",
dest="gpx",
type=str,
help="GPX trace to produce map",
required=True
)
parser.add_argument(
'-o',
action="store",
dest="output_dir",
type=str,
default=".",
help="output directory"
)
parser.add_argument(
'-p',
action="store",
dest="map_prefix",
type=str,
default="stitch",
help="output map prefix, 'stitch' by default"
)
parser.add_argument(
'--gray',
action="store_true",
dest="gray",
default=False,
help="output as grayscale"
)
parser.add_argument(
'--water',
action="store",
dest="water",
type=int,
nargs=3,
default=[None, None, None],
help="removes water color given as R G B; 170 211 223 for OSM"
)
parser.add_argument(
'-x',
action="store",
dest="nx",
type=int,
default=8,
help="number of tiles in X dimension to load per chart; 8 by default"
)
parser.add_argument(
'-y',
action="store",
dest="ny",
type=int,
default=11,
help="number of tiles in Y dimension to load per chart; 11 by default"
)
parser.add_argument(
'--dl',
action="store",
dest="tile_dl",
type=str,
default='',
help=r"URL for downloading missing tiles, e.g.: https://a.tile.openstreetmap.org/{z}/{x}/{y}.png for OSM"
)
args = parser.parse_args()
# Start plotting
# Generate tile canvas
tiles = Tiles(args.tile_path, zoom=args.zoom, web_source=args.tile_dl)
# Load GPX trace
gpx_trace = tiles.import_gpx(args.gpx)
# TODO: Create maps with all tiles if no GPX is given
# boxes = gen_boxes(tiles, dx=11, dy=15, minpx=1)
boxes = gen_boxes_from_track(gpx_trace, dx=args.nx, dy=args.ny)
print("Number of charts: ", len(boxes))
# Update Tiles with new extent from boxes
old_extent = tiles.tile_range
p1 = [None, None]
p2 = [None, None]
for idx, box in enumerate(boxes):
if idx == 0:
p1 = [box.x, box.y]
p2 = [box.x1, box.y1]
else:
min_x = int(min(p1[0], box.x))
min_y = int(min(p1[1], box.y))
max_x = int(max(p2[0], box.x1))
max_y = int(max(p2[1], box.y1))
p1 = [min_x, min_y]
p2 = [max_x, max_y]
tiles.update_array(p1, p2)
# Plot legend with rectangles and track
# Import track
lat = []
lon = []
if gpx_trace is not None:
(lon, lat) = gpx_trace
# Get the current reference
plt.figure(figsize=(10,10))
plt.matshow(tiles.tile_array)
ax = plt.gca()
# Create a Rectangle patch for each rectangle
for idx, r in enumerate(boxes):
r.add_plot(ax, tiles)
# Add rectangle number
plt.text(
r.x + r.dx * 0.25 - tiles.tile_range['x'][0],
r.y + r.dy * 0.75 - tiles.tile_range['y'][0],
str(idx), fontsize=9, color='white')
plt.plot([x - tiles.tile_range['x'][0] for x in lon], [y - tiles.tile_range['y'][0] for y in lat])
plt.savefig(os.path.join(args.output_dir, 'legend.png'))
# Save separate maps
tiles.generate_maps(boxes, track=gpx_trace, path=args.output_dir, gray=args.gray, water=args.water)
| [
11748,
28686,
198,
11748,
302,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
25302,
11,
7412,
41472,
11,
7412,
35476,
590,
11,
7412,
23252,
198,
11748,
1822,
29572,
198,
11748,
308,
8... | 1.924502 | 9,338 |
import numpy as np
from BDSpace.Coordinates import Cartesian
from BDSpace import Space
from BDSpace.Curve.Parametric import Helix
from BDSpace.Field import HyperbolicPotentialCurveConservativeField
import BDSpaceVis as Visual
from mayavi import mlab
space = Space('Charged helix Space')
coordinate_system = Cartesian()
coordinate_system.rotate_axis_angle(np.ones(3, dtype=np.double), np.deg2rad(45))
coordinate_system.origin = np.array([0.0, 0.0, 0.0]) + -2.0
left_helix = Helix(name='Left Helix', coordinate_system=coordinate_system,
radius=5, pitch=10, start=0, stop=np.pi * 4, right=False)
helix_r = 1.5
print('Helix length:', left_helix.length())
pos_electrostatic_field = HyperbolicPotentialCurveConservativeField(name='Pos Charged Helix field',
field_type='electrostatic',
curve=left_helix, r=helix_r)
pos_electrostatic_field.a = 1.0
space.add_element(left_helix)
fig = mlab.figure('CS demo', bgcolor=(0.0, 0.0, 0.0)) # Create the mayavi figure
space_view = Visual.SpaceView(fig, space)
space_view.set_cs_visible(True)
space_view.draw()
left_helix_view = Visual.CurveView(fig=fig, curve=left_helix)
left_helix_view.set_color((1.0, 0.0, 0.0), 0.9)
left_helix_view.set_thickness(helix_r)
left_helix_view.set_cs_visible(True)
left_helix_view.draw()
pos_field_vis = Visual.FieldView(fig, pos_electrostatic_field)
grid = np.mgrid[-10:10:11j, -10:10:11j, -10:10:11j]
pos_field_vis.set_grid(grid)
pos_field_vis.set_cs_visible(True)
pos_field_vis.draw()
mlab.show()
| [
11748,
299,
32152,
355,
45941,
198,
198,
6738,
28023,
14106,
13,
7222,
585,
17540,
1330,
13690,
35610,
198,
6738,
28023,
14106,
1330,
4687,
198,
6738,
28023,
14106,
13,
26628,
303,
13,
22973,
19482,
1330,
5053,
844,
198,
6738,
28023,
1410... | 2.247934 | 726 |
from uiza.api_resources.base import *
| [
6738,
334,
23638,
13,
15042,
62,
37540,
13,
8692,
1330,
1635,
198
] | 3.166667 | 12 |
import sys
import operator
from enum import Enum, IntEnum, auto
from typing import Any, List, Type, Union, Tuple, Generic, TypeVar, Optional, Callable
from .core import OPCODE
# stack accept type
CT = TypeVar("CT", int, float, bool, str)
F = Callable[..., None]
# operation class
| [
11748,
25064,
198,
11748,
10088,
198,
6738,
33829,
1330,
2039,
388,
11,
2558,
4834,
388,
11,
8295,
198,
6738,
19720,
1330,
4377,
11,
7343,
11,
5994,
11,
4479,
11,
309,
29291,
11,
42044,
11,
5994,
19852,
11,
32233,
11,
4889,
540,
198,
... | 3.113402 | 97 |
import streamlit as st
from pdf2image import convert_from_path
import os
from PIL import Image
from main import predict
import numpy as np
import cv2
import pandas as pd
import base64
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:/Program Files (x86)/Tesseract-OCR/tesseract.exe'
tessdata_dir_config = r'--tessdata-dir "C:/Program Files (x86)/Tesseract-OCR/tessdata"'
st.title("TableNet with OCR Detection")
st.markdown("Hello There")
method = st.selectbox("Image or PDF", ['PDF', 'Image'])
if method == "PDF":
uploaded_file = st.file_uploader("Choose a file", type=['pdf'])
if uploaded_file is not None:
with open("selected.pdf", "wb") as f:
f.write(uploaded_file.getbuffer())
for _ in os.listdir("extracted_images"):
os.remove(os.path.join("extracted_images", _))
images = convert_from_path("selected.pdf")
for i in range(len(images)):
images[i].save('extracted_images/page'+'_'+ str(i+1) +'.jpg', 'JPEG')
img_cols = st.beta_columns(len(images))
for i in range(len(img_cols)):
img_cols[i].subheader("page"+str(i+1))
img_cols[i].image(Image.open("extracted_images/page_"+str(i+1)+".jpg"), use_column_width=True)
selected_page = st.selectbox("Select the page", os.listdir("extracted_images"))
image = Image.open('extracted_images/'+selected_page)
st.image(image)
selected_approach = st.selectbox("select approach",['Image Processing', 'TableNet approach'])
if selected_approach == 'Image Processing':
df, img = table_detection('extracted_images/'+selected_page)
st.image(img, "processed image")
st.dataframe(df)
if not df.empty:
csv = df.to_csv().encode()
b64 = base64.b64encode(csv).decode()
href = f'<a href="data:file/csv;base64,{b64}" download="out.csv" target="_blank">Download csv file</a>'
st.markdown(href, unsafe_allow_html=True)
if selected_approach == 'TableNet approach':
out, tb, cl = predict('extracted_images/'+selected_page, 'best_model.ckpt')
st.image(tb, "Table Mask")
st.image(cl, "Column Mask")
for i in range(len(out)):
st.dataframe(out[i])
csv = out[i].to_csv().encode()
b64 = base64.b64encode(csv).decode()
href = f'<a href="data:file/csv;base64,{b64}" download="out.csv" target="_blank">Download csv file</a>'
st.markdown(href, unsafe_allow_html=True)
if method == "Image":
st.write(method)
uploaded_file = st.file_uploader("Choose an Image", type=['jpg','jpeg','png','bmp'])
if uploaded_file is not None:
with open("selected_img.jpg", "wb") as f:
f.write(uploaded_file.getbuffer())
st.image(Image.open('selected_img.jpg'), width=200)
selected_approach = st.selectbox("select approach",['Image Processing', 'TableNet approach'])
if selected_approach == 'Image Processing':
df, img = table_detection('selected_img.jpg')
st.image(img, "processed image")
st.dataframe(df)
if not df.empty:
csv = df.to_csv().encode()
b64 = base64.b64encode(csv).decode()
href = f'<a href="data:file/csv;base64,{b64}" download="out.csv" target="_blank">Download csv file</a>'
st.markdown(href, unsafe_allow_html=True)
if selected_approach == 'TableNet approach':
out, tb, cl = predict('selected_img.jpg', 'best_model.ckpt')
st.image(tb, "Table Mask")
st.image(cl, "Column Mask")
for i in range(len(out)):
st.dataframe(out[i])
csv = out[i].to_csv().encode()
b64 = base64.b64encode(csv).decode()
href = f'<a href="data:file/csv;base64,{b64}" download="out.csv" target="_blank">Download csv file</a>'
st.markdown(href, unsafe_allow_html=True) | [
11748,
4269,
18250,
355,
336,
198,
6738,
37124,
17,
9060,
1330,
10385,
62,
6738,
62,
6978,
198,
11748,
28686,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
1388,
1330,
4331,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
... | 2.130005 | 1,923 |
import random
from Personagem import Personagem
| [
11748,
4738,
198,
198,
6738,
7755,
363,
368,
1330,
7755,
363,
368,
198
] | 3.769231 | 13 |
#!/usr/bin/python
import os
import struct
##This payload is for vuln1.exe
payload = "A"*0x6b
payload += "BBBB" #old sp
#return to not_called function
#payload +=\x64\x01\x01
#payload+=struct.pack("I",0x0002dd24)
payload+=struct.pack("I",0x0002dd24)
print "\"%s\""%payload
#(python -c 'print "A"*0x6c + "BBBB" + "\x1c\xdd\x02"
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
28686,
198,
11748,
2878,
198,
198,
2235,
1212,
21437,
318,
329,
24477,
77,
16,
13,
13499,
220,
198,
15577,
2220,
796,
366,
32,
1,
9,
15,
87,
21,
65,
198,
15577,
2220,
15853,
3... | 2.178808 | 151 |
score = float(input("백분위(0~100)점수를 입력해 주세요 >>"))
if score <= 30:
print("당신의 학점은 A입니다.")
elif score <= 70:
print("당신의 학점은 B입니다.")
else:
print("당신의 학점은 C입니다.") | [
26675,
796,
12178,
7,
15414,
7203,
167,
108,
109,
167,
114,
226,
168,
250,
226,
7,
15,
93,
3064,
8,
168,
254,
238,
168,
230,
246,
167,
98,
120,
23821,
252,
227,
167,
254,
98,
47991,
112,
23821,
96,
120,
168,
226,
116,
168,
248,
... | 1.096774 | 155 |
#! usr/bin/env python3
# -*- coding:utf-8 -*-
import tensorflow as tf
matrix1 = tf.constant([[2,3]])
matrix2 = tf.constant([[2],
[3]])
product = tf.matmul(matrix1,matrix2)#matrix multipy 同np.dot(x1,x1
#method1
# sess = tf.Session()
# result = sess.run(product)
# print(result)
# sess.close()
#method2
with tf.Session() as sess:
result2 = sess.run(product)
print(result2)
| [
2,
0,
514,
81,
14,
8800,
14,
24330,
21015,
18,
201,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
201,
198,
201,
198,
11748,
11192,
273,
11125,
355,
48700,
201,
198,
201,
198,
201,
198,
6759,
8609,
16,
796,
48700,
... | 1.9 | 230 |
""" Pages to be migrated to a wiki-like system """
from flask import Blueprint, request, redirect, url_for, jsonify
from ..misc import engine
from ..forms import LoginForm
bp = Blueprint('errors', __name__)
@bp.app_errorhandler(401)
def unauthorized(error):
""" 401 Unauthorized """
return redirect(url_for('auth.login'))
@bp.app_errorhandler(403)
def forbidden_error(error):
""" 403 Forbidden """
return engine.get_template('errors/403.html').render({'loginform': LoginForm()}), 403
@bp.app_errorhandler(404)
def not_found(error):
""" 404 Not found error """
if request.path.startswith('/api'):
if request.path.startswith('/api/v3'):
return jsonify(msg="Method not found or not implemented"), 404
return jsonify(status='error', error='Method not found or not implemented'), 404
return engine.get_template('errors/404.html').render({}), 404
@bp.app_errorhandler(417)
def forbidden_error(error):
""" 418 I'm a teapot """
return engine.get_template('errors/417.html').render({}), 418
@bp.app_errorhandler(500)
def server_error(error):
""" 500 Internal server error """
if request.path.startswith('/api'):
if request.path.startswith('/api/v3'):
return jsonify(msg="Internal error"), 500
return jsonify(status='error', error='Internal error'), 500
return engine.get_template('errors/500.html').render({}), 500
| [
37811,
28221,
284,
307,
40227,
284,
257,
22719,
12,
2339,
1080,
37227,
198,
6738,
42903,
1330,
39932,
11,
2581,
11,
18941,
11,
19016,
62,
1640,
11,
33918,
1958,
198,
6738,
11485,
44374,
1330,
3113,
198,
6738,
11485,
23914,
1330,
23093,
... | 2.89613 | 491 |
import sys
n = 875714 # number of verticies 875.714
t = 0 # Number of nodes explored at this point. (1st pass)
s = None # Current source vertex (2nd pass)
# small_graph = 6,3,2,1,1
if __name__ == "__main__":
main()
| [
11748,
25064,
198,
198,
77,
796,
807,
39251,
1415,
1303,
1271,
286,
9421,
291,
444,
807,
2425,
13,
45722,
198,
83,
796,
657,
1303,
7913,
286,
13760,
18782,
379,
428,
966,
13,
357,
16,
301,
1208,
8,
198,
82,
796,
6045,
1303,
9236,
... | 2.616279 | 86 |
#!/usr/bin/env python
"""
@author Jesse Haviland
"""
import os
from subprocess import call, Popen
from ropy.backend.Connector import Connector
import zerorpc
import ropy as rp
import numpy as np
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
31,
9800,
18033,
23284,
40855,
198,
37811,
198,
198,
11748,
28686,
198,
6738,
850,
14681,
1330,
869,
11,
8099,
268,
198,
6738,
686,
9078,
13,
1891,
437,
13,
34525,
1330,
81... | 3.126984 | 63 |
from __future__ import absolute_import
import ast
from collections import deque
from typing import Iterable, Tuple
import asttokens
from raincoat.constants import ELEMENT_NOT_FOUND
Line = str
Lines = Iterable[Line]
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
6468,
198,
6738,
17268,
1330,
390,
4188,
198,
6738,
19720,
1330,
40806,
540,
11,
309,
29291,
198,
198,
11748,
6468,
83,
482,
641,
198,
198,
6738,
6290,
31434,
13,
9979,
... | 3.348485 | 66 |
"""
Problem Statement
Welcome to Day 5! Check out the video review of loops here, or just jump
right into the problem.
In this problem you will test your knowledge of loops. Given three integers
a, b, and N, output the following series:
a+2^0b,a+2^0b+2^1b....a+2^0b+2^1b+...+2^N-1b
Input Format
The first line will contain the number of testcases T. Each of the next T
lines will have three integers, a, b, and N.
Constraints
0<=T<=500
0<=a,b<=50
1<=N<=15
Output Format
Print the answer to each test case in a separate line.
Sample Input
2
5 3 5
0 2 10
Sample Output
8 14 26 50 98
2 6 14 30 62 126 254 510 1022 2046
Explanation
There are two test cases.
In the first case: a=5, b=3 ,N=5
1st term = 5+(2^0*3)=8
2nd term = 5+(2^0*3)+(2^1*3)=14
3rd term = 5+(2^0*3)+(2^1*3)+(2^2*3)=26
4th term = 5+(2^0*3)+(2^1*3)+(2^2*3)+(2^3*3)=50
5th term = 5+(2^0*3)+(2^1*3)+(2^2*3)+(2^3*3)+(2^4*3)=98
"""
for _ in range(int(raw_input())):
a, b, N = map(int, raw_input().split())
series = 0
for i in range(N):
series = a
for j in range(i+1):
series += (pow(2, j) * b)
print series,
print | [
37811,
198,
40781,
21983,
198,
198,
14618,
284,
3596,
642,
0,
6822,
503,
262,
2008,
2423,
286,
23607,
994,
11,
393,
655,
4391,
198,
3506,
656,
262,
1917,
13,
198,
198,
818,
428,
1917,
345,
481,
1332,
534,
3725,
286,
23607,
13,
11259... | 2.210526 | 513 |
import subprocess
import urllib2, json
from datetime import datetime
import psutil
import transmissionrpc
if __name__ == '__main__':
nc = NotificationCenter()
print '\n'.join(nc.random_quotes())
print '----------------'
print '\n'.join(nc.status_date())
print '----------------'
print '\n'.join(nc.status_torrents())
print '----------------'
print '\n'.join(nc.status_weather())
print '----------------'
print '\n'.join(nc.status_disk())
print '----------------'
| [
11748,
850,
14681,
198,
11748,
2956,
297,
571,
17,
11,
33918,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
26692,
22602,
198,
11748,
11478,
81,
14751,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,... | 2.853933 | 178 |
import pygame
from settings import Settings
| [
11748,
12972,
6057,
198,
198,
6738,
6460,
1330,
16163,
628
] | 4.6 | 10 |
from burrolib.agents.refill_agent import RefillAgent
from burrolib.games.beer_game import BeerGame
from burrolib.run import run
| [
6738,
4356,
3225,
571,
13,
49638,
13,
5420,
359,
62,
25781,
1330,
6524,
359,
36772,
198,
6738,
4356,
3225,
571,
13,
19966,
13,
42428,
62,
6057,
1330,
16971,
8777,
198,
6738,
4356,
3225,
571,
13,
5143,
1330,
1057,
628
] | 3.307692 | 39 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: toddler
# date: 20171026
# 数据库使用sqlite, ORM使用sqlalchemy
import sqlite3
import sys
import os
sys.path.append("..")
from Lib.my_lib import WriteLog, re_joint_dir_by_os
from Conf.analyzecoding import db_path
db_path = os.path.join(os.path.abspath(".."), re_joint_dir_by_os(db_path))
if __name__ == '__main__':
create_coding_all_user()
create_coding_user_info()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
1772,
25,
30773,
198,
2,
3128,
25,
2177,
940,
2075,
198,
198,
2,
10545,
243,
108,
162,
235,
106,
41753,
241,
45635,
... | 2.257895 | 190 |