content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import math
from functools import cmp_to_key
locations = """#.#................#..............#......#......
.......##..#..#....#.#.....##...#.........#.#...
.#...............#....#.##......................
......#..####.........#....#.......#..#.....#...
.....#............#......#................#.#...
....##...#.#.#.#.............#..#.#.......#.....
..#.#.........#....#..#.#.........####..........
....#...#.#...####..#..#..#.....#...............
.............#......#..........#...........#....
......#.#.........#...............#.............
..#......#..#.....##...##.....#....#.#......#...
...#.......##.........#.#..#......#........#.#..
#.............#..........#....#.#.....#.........
#......#.#................#.......#..#.#........
#..#.#.....#.....###..#.................#..#....
...............................#..........#.....
###.#.....#.....#.............#.......#....#....
.#.....#.........#.....#....#...................
........#....................#..#...............
.....#...#.##......#............#......#.....#..
..#..#..............#..#..#.##........#.........
..#.#...#.......#....##...#........#...#.#....#.
.....#.#..####...........#.##....#....#......#..
.....#..#..##...............................#...
.#....#..#......#.#............#........##...#..
.......#.....................#..#....#.....#....
#......#..###...........#.#....#......#.........
..............#..#.#...#.......#..#.#...#......#
.......#...........#.....#...#.............#.#..
..##..##.............#........#........#........
......#.............##..#.........#...#.#.#.....
#........#.........#...#.....#................#.
...#.#...........#.....#.........#......##......
..#..#...........#..........#...................
.........#..#.......................#.#.........
......#.#.#.....#...........#...............#...
......#.##...........#....#............#........
#...........##.#.#........##...........##.......
......#....#..#.......#.....#.#.......#.##......
.#....#......#..............#.......#...........
......##.#..........#..................#........
......##.##...#..#........#............#........
..#.....#.................###...#.....###.#..#..
....##...............#....#..................#..
.....#................#.#.#.......#..........#..
#........................#.##..........#....##..
.#.........#.#.#...#...#....#........#..#.......
...#..#.#......................#...............#"""
locations = list(map(list, locations.splitlines()))
asteroids = []
for i in range(len(locations)):
for j in range(len(locations[i])):
if locations[i][j] == '#':
asteroids.append((j, i))
best = find_best_asteroid(asteroids)[0]
asteroids.remove(best)
print(find_nth_removed(asteroids, best, 200))
| [
11748,
10688,
198,
6738,
1257,
310,
10141,
1330,
269,
3149,
62,
1462,
62,
2539,
628,
628,
628,
628,
198,
198,
17946,
602,
796,
37227,
2,
32535,
4181,
2,
2109,
16317,
2,
16317,
2,
16317,
198,
25780,
2235,
492,
2,
492,
2,
1106,
2,
3... | 3.300481 | 832 |
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# hillshade.py
# Created on: 2020-01-19 18:50:09.00000
# (generated by ArcGIS/ModelBuilder)
# Description:
# ---------------------------------------------------------------------------
# Import arcpy module
import arcpy
# Local variables:
yose_valley_tiles_rst05 = ""
yose_valley_rst05_hillshade = ""
# Set Geoprocessing environments
arcpy.env.scratchWorkspace = "C:\\GIS\\scratch.gdb"
arcpy.env.workspace = "C:\\GIS\\scratch.gdb"
# Process: Hillshade
arcpy.gp.HillShade_sa(yose_valley_tiles_rst05, yose_valley_rst05_hillshade, "315", "45", "NO_SHADOWS", "1")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
32284,
198,
2,
12788,
1477,
671,
13,
9078,
198,
2,
15622,
319,
25,
12131,
12,
486,
12,
1129,
1248,
25,
1120,
25,
2931,
13,
20483,
198,
2,
220,
220,
357,
... | 2.892241 | 232 |
import ast
import dataclasses
import json
import re
import sys
import typing
from sbdata.repo import find_item_by_name, Item
from sbdata.task import register_task, Arguments
from sbdata.wiki import get_wiki_sources_by_title
@dataclasses.dataclass
@register_task("Fetch Dungeon Loot")
| [
11748,
6468,
198,
11748,
4818,
330,
28958,
198,
11748,
33918,
198,
11748,
302,
198,
11748,
25064,
198,
11748,
19720,
198,
198,
6738,
264,
65,
7890,
13,
260,
7501,
1330,
1064,
62,
9186,
62,
1525,
62,
3672,
11,
9097,
198,
6738,
264,
65,... | 3.107527 | 93 |
import os
# Load environmental variables from .env in development stage
basedir = os.path.abspath(os.path.dirname(__file__))
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'staging': StagingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
} | [
11748,
28686,
198,
198,
2,
8778,
6142,
9633,
422,
764,
24330,
287,
2478,
3800,
198,
3106,
343,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
628,
628,
628,
198,
198,
11250,
7... | 3.263736 | 91 |
"""
===========================
Calculate SHORE scalar maps
===========================
We show how to calculate two SHORE-based scalar maps: return to origin
probability (rtop) [Descoteaux2011]_ and mean square displacement (msd)
[Wu2007]_, [Wu2008]_ on your data. SHORE can be used with any multiple b-value
dataset like multi-shell or DSI.
First import the necessary modules:
"""
import nibabel as nib
import numpy as np
import matplotlib.pyplot as plt
from dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi, get_sphere
from dipy.data import get_data, dsi_voxels
from dipy.reconst.shore import ShoreModel
"""
Download and read the data for this tutorial.
"""
fetch_taiwan_ntu_dsi()
img, gtab = read_taiwan_ntu_dsi()
"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example, to read the b-values
it is possible to write print(gtab.bvals).
Load the raw diffusion data and the affine.
"""
data = img.get_data()
affine = img.affine
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
Instantiate the Model.
"""
asm = ShoreModel(gtab)
"""
Lets just use only one slice only from the data.
"""
dataslice = data[30:70, 20:80, data.shape[2] / 2]
"""
Fit the signal with the model and calculate the SHORE coefficients.
"""
asmfit = asm.fit(dataslice)
"""
Calculate the analytical rtop on the signal
that corresponds to the integral of the signal.
"""
print('Calculating... rtop_signal')
rtop_signal = asmfit.rtop_signal()
"""
Now we calculate the analytical rtop on the propagator,
that corresponds to its central value.
"""
print('Calculating... rtop_pdf')
rtop_pdf = asmfit.rtop_pdf()
"""
In theory, these two measures must be equal,
to show that we calculate the mean square error on this two measures.
"""
mse = np.sum((rtop_signal - rtop_pdf) ** 2) / rtop_signal.size
print("mse = %f" % mse)
"""
mse = 0.000000
Let's calculate the analytical mean square displacement on the propagator.
"""
print('Calculating... msd')
msd = asmfit.msd()
"""
Show the maps and save them in SHORE_maps.png.
"""
fig = plt.figure(figsize=(6, 6))
ax1 = fig.add_subplot(2, 2, 1, title='rtop_signal')
ax1.set_axis_off()
ind = ax1.imshow(rtop_signal.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax2 = fig.add_subplot(2, 2, 2, title='rtop_pdf')
ax2.set_axis_off()
ind = ax2.imshow(rtop_pdf.T, interpolation='nearest', origin='lower')
plt.colorbar(ind)
ax3 = fig.add_subplot(2, 2, 3, title='msd')
ax3.set_axis_off()
ind = ax3.imshow(msd.T, interpolation='nearest', origin='lower', vmin=0)
plt.colorbar(ind)
plt.savefig('SHORE_maps.png')
"""
.. figure:: SHORE_maps.png
:align: center
**rtop and msd calculated using the SHORE model**.
.. [Descoteaux2011] Descoteaux M. et. al , "Multiple q-shell diffusion
propagator imaging", Medical Image Analysis, vol 15,
No. 4, p. 603-621, 2011.
.. [Wu2007] Wu Y. et. al, "Hybrid diffusion imaging", NeuroImage, vol 36,
p. 617-629, 2007.
.. [Wu2008] Wu Y. et. al, "Computation of Diffusion Function Measures
in q -Space Using Magnetic Resonance Hybrid Diffusion Imaging",
IEEE TRANSACTIONS ON MEDICAL IMAGING, vol. 27, No. 6, p. 858-865,
2008.
.. include:: ../links_names.inc
"""
| [
37811,
198,
4770,
2559,
18604,
198,
9771,
3129,
378,
6006,
6965,
16578,
283,
8739,
198,
4770,
2559,
18604,
198,
198,
1135,
905,
703,
284,
15284,
734,
6006,
6965,
12,
3106,
16578,
283,
8739,
25,
1441,
284,
8159,
198,
1676,
65,
1799,
35... | 2.730994 | 1,197 |
from tensorflow.python.keras import backend as K
def f1_score(y_true, y_pred):
""" F1 score metric """
def recall_metric(labels, predictions):
""" Recall metric """
true_positives = K.sum(K.round(K.clip(labels * predictions, 0, 1)))
possible_positives = K.sum(K.round(K.clip(labels, 0, 1)))
recall_score = true_positives / (possible_positives + K.epsilon())
return recall_score
def precision_metric(labels, predictions):
""" Precision metric """
true_positives = K.sum(K.round(K.clip(labels * predictions, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(predictions, 0, 1)))
precision_score = true_positives / (predicted_positives + K.epsilon())
return precision_score
precision = precision_metric(y_true, y_pred)
recall = recall_metric(y_true, y_pred)
# K.epsilon() is a small number used to prevent division by zero
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
| [
6738,
11192,
273,
11125,
13,
29412,
13,
6122,
292,
1330,
30203,
355,
509,
628,
198,
4299,
277,
16,
62,
26675,
7,
88,
62,
7942,
11,
331,
62,
28764,
2599,
198,
220,
220,
220,
37227,
376,
16,
4776,
18663,
37227,
628,
220,
220,
220,
8... | 2.540404 | 396 |
from veros_extra_setups.setups.wave_propagation.wave_propagation import WavePropagationSetup # noqa: F401
| [
6738,
3326,
418,
62,
26086,
62,
2617,
4739,
13,
2617,
4739,
13,
19204,
62,
22930,
363,
341,
13,
19204,
62,
22930,
363,
341,
1330,
17084,
24331,
363,
341,
40786,
220,
1303,
645,
20402,
25,
376,
21844,
198
] | 2.891892 | 37 |
"""Goal: Create a program where users can input two locations and return a restaurant as equidistant as possible between them.
This will be done by connecting with the GoogleMaps Directions and Places API.
Once a suitable establishment has been found, the program will return the establishment's name, address, phone number, and hours of operation.
If a restaurant can not be found within 500 meters of the midpoint, Middle Ground will automatically expand the radius to 5000 meters."""
#!/usr/bin/env python3
import requests, json, pprint
#The requests and json modules allow me to access the JSON response from the Google APIs
#pprint allows me to return the data to the user in a format that is easier to read
DIRECTIONS_API_KEY = 'AIzaSyDmLGPIUErCNSmM-FPFSUGS9LIPFv9cbRI' #GoogleMaps Directions API Key
PLACES_API_KEY = 'AIzaSyDR_0dNH_A30KWnjz3s7GG7PZw6Vo3WkDQ' #Google Places API Key
#I do not expect a lot of people to be accessing this program on GitHub currently, so I feel comfortable leaving in the API Keys and am not worried about going over the request limit.
print('Welcome to Middle Ground.\n\nPlease use the following guidelines for the best results:')
print('\n1. Make sure the addresses are spelled completely and correctly.\nAddresses are not case-sensitive and standard postal abbreviations are acceptable.')
print('\n2. Restart the program if you have made an error while typing either address.')
print('\n3. Since Middle Ground aims to find restaurants within 500 meters of the midpoint, it is best suited for use in densely populated areas.\n')
print('*****************************************************\n\n')
restart = 1
#This code gives users the option to restart the program or exit when finished
while restart != "X" and restart != "x":
street_address_a = input('Please enter the street address only of Person A. \nDo not include city or state: ')
city_a = input('Enter the city of Person A: ')
state_a = input('Enter the state of Person A: ')
updated_address_a = str(street_address_a) + ' ' + str(city_a) + ' ' + str(state_a)
street_address_b = input('\nPlease enter the street address only of Person B. \nDo not include city or state: ')
city_b = input('Enter the city of Person B: ')
state_b = input('Enter the state of Person B: ')
updated_address_b = str(street_address_b) + ' ' + str(city_b) + ' ' + str(state_b)
#Should add way for user to confirm the address is correct, with options to revise if there is a typo.
#Should add exception handling to confirm that the user's input is valid (e.g. not entering numbers or special characters instead of letters for city and state)
print('\nLet\'s find a restaurant at the midpoint of those locations!\nPlease wait... this may take a few moments.')
#I have gathered all the necessary information from the user, now I need to connect with the GoogleMaps Directions API
api_response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(updated_address_a, DIRECTIONS_API_KEY))
api_response_dict = api_response.json()
if api_response_dict['status'] == 'OK':
latitude_a = api_response_dict['results'][0]['geometry']['location']['lat']
longitude_a = api_response_dict['results'][0]['geometry']['location']['lng']
api_response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(updated_address_b, DIRECTIONS_API_KEY))
api_response_dict = api_response.json()
if api_response_dict['status'] == 'OK':
latitude_b = api_response_dict['results'][0]['geometry']['location']['lat']
longitude_b = api_response_dict['results'][0]['geometry']['location']['lng']
#Now that the latitude and longitude of both addresses has been pulled from the GoogleMaps Directions API, I am going to average them together to find a midpoint
average_latitude = find_average(latitude_a, latitude_b)
average_longitude = find_average(longitude_a, longitude_b)
list_places = requests.get('https://maps.googleapis.com/maps/api/place/radarsearch/json?location=' + str(average_latitude) + ',' + str(average_longitude) + '&radius=500&type=restaurant&key=' + str(PLACES_API_KEY))
list_places_dict = list_places.json()
if list_places_dict['status'] == 'OK':
#Checking to make sure there an establishment is found within 500 meters of the midpoint
place_id = list_places_dict['results'][0]['place_id']
#This pulls the Place ID of the first result on the list of bars and restaurants within 500 meters of the middle point
place_details = requests.get('https://maps.googleapis.com/maps/api/place/details/json?placeid=' + str(place_id) + '&key=' + str(PLACES_API_KEY))
place_details = place_details.json()
if place_details['status'] == 'OK':
place_name = place_details['result']['name']
place_address = place_details['result']['formatted_address']
place_phone = place_details['result']['formatted_phone_number']
place_hours = place_details['result']['opening_hours']['weekday_text']
print('\nYou should meet at ' + place_name)
#This is the name of the restaurant closest to the midpoint.
print(place_address)
print(place_phone)
pprint.pprint(place_hours)
#Using pprint module to print days and hours on separate lines
restart = input('\nPress ENTER to input new addresses or type X to exit.\n')
else:
print('\nI\'m sorry, I could not find a restaurant within 500 meters of the midpoint. \nI am now checking for a restaurant within 5000 meters.')
#This addition allows for more flexibility in suburban areas or if both addresses are not located in the same city.
list_places = requests.get('https://maps.googleapis.com/maps/api/place/radarsearch/json?location=' + str(average_latitude) + ',' + str(average_longitude) + '&radius=5000&type=restaurant&key=' + str(PLACES_API_KEY))
list_places_dict = list_places.json()
if list_places_dict['status'] == 'OK':
#Checking to make sure there an establishment is found within 5000 meters of the midpoint
place_id = list_places_dict['results'][0]['place_id']
#This pulls the Place ID of the first result on the list of bars and restaurants within 5000 meters of the middle point
place_details = requests.get('https://maps.googleapis.com/maps/api/place/details/json?placeid=' + str(place_id) + '&key=' + str(PLACES_API_KEY))
place_details = place_details.json()
if place_details['status'] == 'OK':
place_name = place_details['result']['name']
place_address = place_details['result']['formatted_address']
place_phone = place_details['result']['formatted_phone_number']
place_hours = place_details['result']['opening_hours']['weekday_text']
print('\nYou should meet at ' + place_name)
#This is the name of the restaurant closest to the midpoint.
print(place_address)
print(place_phone)
pprint.pprint(place_hours)
#Using pprint module to print days and hours on separate lines
restart = input('\nPress ENTER to input new addresses or type X to exit.\n')
else:
print('\nI\'m sorry, there does not appear to be a restaurant within 5000 meters of the midpoint. \nMiddle Ground is working on expanding functionality to less densely populated areas, so stay tuned for future updates!')
restart = input('\nPress ENTER to input new addresses or type X to exit.\n')
"""FUTURE IMPROVEMENTS:
- Give users the choice to input restaurant, bar, cafe, museum, or any of the other supported "types" on the Google Places API
- Return a list of establishment choices, with options to rank by distance from midpoint or Yelp rating
- Make the acceptable radius of the midpoint establishment a percentage of the total distance between locations. Users traveling longer distances may be more willing to drive an extra few miles out of the way to visit a higher-rated establishment than users who plan on only walking a few city blocks to meet their friend.
- Take into consideration whether users will be driving, walking, public transportation as that can affect commuting time
- Explore whether I can connect Middle Ground with OpenTable to allow users to make a reservation
- Some results (McDonald's, Cozi, etc), while accurate, could be disappointing to users who are looking for a more elegant dining experience. Add a way for users to receive a second restaurant result if they are not happy with the first one. In some locations, chain restaurants may make up the bulk of available establishments, so I don't want to screen them out completely."""
| [
37811,
49045,
25,
13610,
257,
1430,
810,
2985,
460,
5128,
734,
7064,
290,
1441,
257,
7072,
355,
1602,
312,
10167,
355,
1744,
1022,
606,
13,
198,
1212,
481,
307,
1760,
416,
14320,
351,
262,
3012,
47010,
47426,
290,
37291,
7824,
13,
198... | 3.05565 | 2,947 |
#secure password generator
#written by boostOnTheBrain
import random
characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*-=+_?"
length = int(input('password length?'))
password = ''
for c in range(length):
password += random.choice(characters)
print(password)
length = int(input('password length?'))
password = ''
for c in range(length):
password += random.choice(characters)
print(password)
length = int(input('password length?'))
password = ''
for c in range(length):
password += random.choice(characters)
print(password)
input("PRESS ENTER TO EXIT") | [
2,
22390,
9206,
17301,
201,
198,
2,
15266,
416,
5750,
2202,
464,
44687,
201,
198,
11748,
4738,
201,
198,
201,
198,
10641,
19858,
796,
366,
39305,
4299,
456,
2926,
41582,
10295,
404,
80,
81,
301,
14795,
86,
5431,
89,
24694,
32988,
1751... | 2.800905 | 221 |
import json, subprocess
from ..... pyaz_utils import get_cli_name, get_params
| [
11748,
33918,
11,
850,
14681,
198,
6738,
11485,
986,
12972,
1031,
62,
26791,
1330,
651,
62,
44506,
62,
3672,
11,
651,
62,
37266,
628
] | 3.291667 | 24 |
import asyncio
import multiprocessing
from gabriel_protocol import gabriel_pb2
from gabriel_client.websocket_client import ProducerWrapper
| [
11748,
30351,
952,
198,
11748,
18540,
305,
919,
278,
198,
6738,
308,
397,
11719,
62,
11235,
4668,
1330,
308,
397,
11719,
62,
40842,
17,
198,
6738,
308,
397,
11719,
62,
16366,
13,
732,
1443,
5459,
62,
16366,
1330,
30436,
36918,
2848,
6... | 3.27907 | 43 |
nota = float(input('Insira a pontuação: '))
computar_notas(nota) | [
201,
198,
1662,
64,
796,
12178,
7,
15414,
10786,
20376,
8704,
257,
45443,
6413,
16175,
28749,
25,
705,
4008,
220,
201,
198,
785,
1996,
283,
62,
1662,
292,
7,
1662,
64,
8,
220,
220,
220,
220,
220,
220,
220
] | 1.923077 | 39 |
from subprocess import run
| [
6738,
850,
14681,
1330,
1057,
628
] | 4.666667 | 6 |
# -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action affecter."""
from primaires.format.fonctions import supprimer_accents
from primaires.perso.exceptions.action import ExceptionAction
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Donne une affection à un personnage ou une salle."""
@classmethod
@staticmethod
def affecter_personnage(personnage, affection, duree, force):
"""Donne l'affection au personnage.
Les paramètres à préciser sont :
* personnage : le personnage à affecter
* affection : la clé de l'affection sous la forme d'une chaîne
* duree : la durée de l'affection
* force : la force de l'affection
Si le personnage est déjà affecté par la même affection, les
nouvelles valeurs sont modulées (le résultat final dépend de
l'affection choisie).
Généralement, la durée et la force sont ajoutées aux anciennes
valeurs.
"""
# Essaye de trouver l'affection
cle = affection.lower()
try:
affection = importeur.affection.get_affection("personnage", cle)
except KeyError:
raise ErreurExecution("l'affection {} n'existe pas".format(repr(
cle)))
personnage.affecte(cle, int(duree), int(force))
@staticmethod
def affecter_salle(salle, affection, duree, force):
"""Donne l'affection à la salle.
Les paramètres à préciser sont :
* salle : la salle à affecter
* affection : la clé de l'affection sous la forme d'une chaîne
* duree : la durée de l'affection
* force : la force de l'affection
Si la salle est déjà affectée par la même affection, les
nouvelles valeurs sont modulées (le résultat final dépend de
l'affection choisie).
Généralement, la durée et la force sont ajoutées aux anciennes
valeurs.
"""
# Essaye de trouver l'affection
cle = affection.lower()
try:
affection = importeur.affection.get_affection("salle", cle)
except KeyError:
raise ErreurExecution("l'affection {} n'existe pas".format(repr(
cle)))
salle.affecte(cle, int(duree), int(force))
| [
2,
532,
9,
12,
66,
7656,
25,
18274,
69,
12,
23,
532,
9,
198,
198,
2,
15069,
357,
66,
8,
2321,
12509,
10351,
5777,
18653,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11... | 2.651731 | 1,473 |
#!/usr/bin/env python
from Bio import Entrez
import os
import argparse
import time
# =====================================================
# Takes a list of species names and queries GenBank for
# that species. If any data are in GenBank, a file is
# written that has the GenBank IDs for that species.
#
# Matt Gitzendanner
# University of Florida
# 3/07/16
#
# =====================================================
#####################
# Options
#
# -i input file with list of species.
# -e email address used for Entrez
# -o Output folder
#####################
parser = argparse.ArgumentParser()
parser.add_argument("-i", help="input file with GenBank accession IDs")
parser.add_argument("-e", help="email address")
parser.add_argument("-o", help="output folder name")
args = parser.parse_args()
infile = args.i
Entrez.email = args.e #sets the email for Entrez.
OutputFolder= args.o
#Function to test for non-zero file size.
try:
IN=open(infile, 'r')
except IOError:
print "Can't open file", infile
for Line in IN:
Line=Line.strip('\n')
Organism= Line + "[Orgn]"
OutFile=os.path.join(OutputFolder, Line.replace(" ", "_"))
#Check if we've already done this species--look for non-zero output file.
#This allows rerunning to catch failed runs without redoing all.
if is_non_zero_file(OutFile):
pass
else:
for i in range(3, 0, -1):
try:
GBSeq = Entrez.esearch(db="nucleotide", term=Organism ) #Get the sequence
except:
if i == 1:
raise
print('Failed to connect. Retrying')
time.sleep(5) #Wait 5 seconds and try again.
else:
break
Record= Entrez.read(GBSeq)
if int(Record["Count"]) > 0:
print ("%s had %d records in GenBank" %(Line, int(Record["Count"])))
try:
OUT=open(OutFile, 'w')
except:
print ("Can't open file: %s" %(OutFile))
for id in Record["IdList"]:
OUT.write(id + "\n")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
16024,
1330,
7232,
21107,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
11748,
640,
198,
198,
2,
46111,
4770,
1421,
198,
2,
220,
33687,
257,
1351,
286,
4693,
3891,
290,
2... | 2.737143 | 700 |
import unittest
import puremvc.patterns.observer
class ObserverTest(unittest.TestCase):
"""ObserverTest: Test Observer Pattern"""
__observerTestVar = None
def testObserverAccessors(self):
"""ObserverTest: Test Observer Accessors"""
obsrvr = puremvc.patterns.observer.Observer(None,None)
obsrvr.setNotifyContext(self)
obsrvr.setNotifyMethod(self.__observerTestMethod)
note = puremvc.patterns.observer.Notification('ObserverTestNote',10)
obsrvr.notifyObserver(note)
self.assertEqual(True, self.__observerTestVar == 10)
def testObserverConstructor(self):
"""ObserverTest: Test Observer Constructor"""
obsrvr = puremvc.patterns.observer.Observer(self.__observerTestMethod,self)
note = puremvc.patterns.observer.Notification('ObserverTestNote',5)
obsrvr.notifyObserver(note)
self.assertEqual(True, self.__observerTestVar == 5)
def testCompareNotifyContext(self):
"""ObserverTest: Test compareNotifyContext()"""
obsrvr = puremvc.patterns.observer.Observer(self.__observerTestMethod, self)
negTestObj = object()
self.assertEqual(False, obsrvr.compareNotifyContext(negTestObj))
self.assertEqual(True, obsrvr.compareNotifyContext(self))
def testNameAccessors(self):
"""NotificationTest: Test Name Accessors"""
note = puremvc.patterns.observer.Notification('TestNote')
self.assertEqual(True, note.getName() == 'TestNote')
def testBodyAccessors(self):
"""NotificationTest: Test Body Accessors"""
note = puremvc.patterns.observer.Notification(None)
note.setBody(5)
self.assertEqual(True, note.getBody() == 5)
def testConstructor(self):
"""NotificationTest: Test Constructor"""
note = puremvc.patterns.observer.Notification('TestNote',5,'TestNoteType')
self.assertEqual(True, note.getName() == 'TestNote')
self.assertEqual(True, note.getBody() == 5)
self.assertEqual(True, note.getType() == 'TestNoteType')
| [
11748,
555,
715,
395,
198,
198,
11748,
5899,
76,
28435,
13,
33279,
82,
13,
672,
15388,
198,
198,
4871,
27058,
14402,
7,
403,
715,
395,
13,
14402,
20448,
2599,
198,
220,
220,
220,
37227,
31310,
18497,
14402,
25,
6208,
27058,
23939,
378... | 2.518742 | 827 |
#!/usr/bin/env python
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import pytest
from scribd_dl import ScribdDL
# def pytest_addoption(parser):
# parser.addoption("--driver", action="store", default="chrome", help="Type in browser type")
# parser.addoption("--url", action="store", default="https://.../", help="url")
@pytest.fixture(scope='session') # Can be module, session, function, class
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
27... | 2.90566 | 159 |
import random | [
11748,
4738
] | 6.5 | 2 |
"""Test twotuple_metric."""
# pylint: disable=invalid-name
from pathlib import Path
from itertools import zip_longest
import pandas as pd
from align_benchmark.twotuple_metric import twotuple_metric
file_loc = "data/para-wh-ch2-benchmark1.xlsx"
if not Path(file_loc).exists():
raise SystemExit(f"File [{file_loc}] does not exist.")
_ = pd.read_excel(file_loc, header=0)[["Unnamed: 1", "Unnamed: 3"]]
_.columns = ["list1", "list2"]
bm1 = _.to_numpy().tolist()
lst = [*zip_longest(range(33), range(36), fillvalue=32)]
def test_twotuple_metric55():
"""Test twotuple_metric 5 5."""
assert twotuple_metric(bm1[5], lst[5]) == 0.5
def test_twotuple_metric_nonnumerical_entry():
"""Test entry that cannot be converted to integer."""
assert twotuple_metric([0, 1], [0, ""]) == 0.0
| [
37811,
14402,
665,
313,
29291,
62,
4164,
1173,
526,
15931,
198,
2,
279,
2645,
600,
25,
15560,
28,
259,
12102,
12,
3672,
198,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
340,
861,
10141,
1330,
19974,
62,
6511,
395,
198,
198,
11748,
... | 2.445122 | 328 |
from collections import namedtuple
from inspect import signature
activation_func = node('ActivationFunc', activation_func=None, inplace=True)
add = node('Add', inplace=True)
add_relu = node('AddRelu', inplace=False)
bn = node('BatchNorm2d', ['num_features'], eps=1e-5, momentum=0.1, affine=True, track_running_stats=True)
clip = node('Clip', min_val=-1, max_val=1, inplace=False, min_value=None, max_value=None)
concat_pool_2d = node('ConcatPool2d')
conv = node('Conv2d', ['in_channels', 'out_channels', 'kernel_size'], stride=1, padding=0, dilation=1, groups=1, bias=True)
correct = node('Correct')
dropout = node('Dropout', p=0.5, inplace=False)
global_avg_pool = node('GlobalAvgPool2d')
identity = node('Identity')
linear = node('Linear', ['in_features', 'out_features'], bias=True)
max_pool = node('MaxPool2d', ['kernel_size'], stride=None, padding=0, dilation=1, return_indices=False, ceil_mode=False)
relu = node('ReLU', inplace=True)
relu6 = node('ReLU6', inplace=True)
x_entropy = node('CrossEntropyLoss', weight=None, size_average=True, ignore_index=-100, reduce=True)
shortcut = node_def(Shortcut)#, ['in_channels', 'out_channels', 'stride'], identity=False)
| [
6738,
17268,
1330,
3706,
83,
29291,
198,
6738,
10104,
1330,
9877,
198,
220,
220,
220,
220,
220,
220,
220,
198,
198,
48545,
62,
20786,
796,
10139,
10786,
25526,
341,
37,
19524,
3256,
14916,
62,
20786,
28,
14202,
11,
287,
5372,
28,
1782... | 2.678005 | 441 |
# @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import stack
import stack.commands
from collections import defaultdict
from collections import OrderedDict
from pathlib import Path
import json
class Command(stack.commands.dump.command):
"""
Dump the contents of the stacki database as json.
This command dumps specifically the virtual machine data.
For each host added as a virtual machine, it will dump the
vm specific data including the hypervisor, storage, memory,
and cpu cores
<example cmd='dump vm'>
Dump json data for virtual machines in the stacki database
</example>
<related>load</related>
"""
| [
2,
2488,
22163,
4766,
31,
198,
2,
15069,
357,
66,
8,
4793,
532,
13130,
3813,
14706,
198,
2,
1439,
2489,
10395,
13,
23881,
72,
7,
81,
8,
410,
20,
13,
87,
8931,
72,
13,
785,
198,
2,
3740,
1378,
12567,
13,
785,
14,
15156,
14706,
... | 3.474419 | 215 |
from math import isnan, sqrt, pow, floor, ceil, trunc
from math_tools import clamp | [
6738,
10688,
1330,
2125,
272,
11,
19862,
17034,
11,
7182,
11,
4314,
11,
2906,
346,
11,
40122,
198,
6738,
10688,
62,
31391,
1330,
29405
] | 3.416667 | 24 |
#!/usr/bin/env python3
import os
import requests
from selfdrive.test.process_replay.test_processes import segments
from selfdrive.test.process_replay.process_replay import CONFIGS
BASE_URL = "https://github.com/martinl/openpilot-ci/raw/master/process_replay/"
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
ref_commit = open(os.path.join(process_replay_dir, "ref_commit")).read().strip()
for car_brand, segment in segments:
for cfg in CONFIGS:
cmp_log_url = (BASE_URL + "%s/%s_%s_%s.bz2" % (ref_commit, segment.replace("|", "_"), cfg.proc_name, ref_commit))
cmp_log_fn = os.path.join(process_replay_dir, "%s_%s_%s.bz2" % (segment, cfg.proc_name, ref_commit))
r = requests.get(cmp_log_url)
if r.status_code == 200:
with open(cmp_log_fn, 'wb') as f:
f.write(r.content)
else:
print("Failed to download: " + cmp_log_url)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
28686,
198,
11748,
7007,
198,
198,
6738,
2116,
19472,
13,
9288,
13,
14681,
62,
260,
1759,
13,
9288,
62,
14681,
274,
1330,
17894,
198,
6738,
2116,
19472,
13,
9288,
13,
14681,... | 2.378378 | 370 |
from example.dto.contact_dto import ContactCreateDto, ContactUpdateDto, ContactDetailsDto
from example.model.contact import Contact
from pf_sqlalchemy.crud.pfs_rest_helper_service import PfsRestHelperService
pfs_rest_helper_service = PfsRestHelperService(Contact)
| [
6738,
1672,
13,
67,
1462,
13,
32057,
62,
67,
1462,
1330,
14039,
16447,
35,
1462,
11,
14039,
10260,
35,
1462,
11,
14039,
24259,
35,
1462,
198,
6738,
1672,
13,
19849,
13,
32057,
1330,
14039,
198,
6738,
279,
69,
62,
25410,
282,
26599,
... | 3.256098 | 82 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'A program does that is a DJ by using feedback provided by the dancers.',
'author': 'Thomas Schaper',
'url': 'https://gitlab.com/SilentDiscoAsAService/DJFeet',
'download_url': 'https://gitlab.com/SilentDiscoAsAService/DJFeet',
'author_email': 'thomas@libremail.nl',
'version': '0.0',
'install_requires': ['nose'],
'packages': ['dj_feet'],
'scripts': [],
'entry_points': {
'console_scripts': [
'server = dj_feet.cli:main'
]
},
'name': 'dj_feet'
}
setup(**config)
| [
28311,
25,
198,
220,
220,
220,
422,
900,
37623,
10141,
1330,
9058,
198,
16341,
17267,
12331,
25,
198,
220,
220,
220,
422,
1233,
26791,
13,
7295,
1330,
9058,
198,
198,
11250,
796,
1391,
198,
220,
220,
220,
705,
11213,
10354,
705,
32,
... | 2.386282 | 277 |
# -*- coding: utf-8 -*-
import unittest
from unittest import mock
from pastepwn.analyzers.awssessiontokenanalyzer import AWSSessionTokenAnalyzer
if __name__ == '__main__':
unittest.main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
555,
715,
395,
198,
6738,
555,
715,
395,
1330,
15290,
198,
198,
6738,
1613,
538,
675,
13,
38200,
47031,
13,
707,
824,
2521,
30001,
38200,
9107,
1330,
14356,
5432,... | 2.722222 | 72 |
import frappe
import json
from datetime import timedelta, date
| [
11748,
5306,
27768,
198,
11748,
33918,
198,
6738,
4818,
8079,
1330,
28805,
12514,
11,
3128,
198
] | 3.9375 | 16 |
"""Data acquisition system.
"""
import datetime
import os
import sys
import time
import stockdaq.data.downloader_dict
from stockdaq.logger import logger
class Acquisiter:
"""Data Acquisiter
Parameters
----------
stocklist: list of str
List of symbols of stocks of interest.
apikey_dict: dict
A dictionary of API keys in {"api":"key"} pairs.
api_list: list of str, optional
List of APIs, in preferred order.
Options are ["Alpha Vantage",]
root_dir: str, optional
The root directory of the database.
frequency: str, optional
"intraday", "daily", "weekly", "monthly"
Options are ["intraday"].
file_structure: list of str, optional
How to set up parent/child folders.
Defaults to ["symbol", "frequency", "data"].
Attributes
----------
stocklist: list of str
List of symbols of stocks of interest.
apikey_dict: dict
A dictionary of API keys in {"api":"key"} pairs.
api_list: list of str, optional
List of APIs, in preferred order.
Options are ["Alpha Vantage",]
root_dir: str, optional
The root directory of the database.
frequency: str, optional
"intraday", "daily", "weekly", "monthly"
Options are ["intraday"].
file_structure: list of str, optional
How to set up parent/child folders.
Defaults to ["symbol", "frequency", "data"].
"""
def __init__(self, stocklist, api_config_path, apikey_dict,
api_list=["Alpha Vantage",],frequency="intraday", root_dir="./",
file_structure=["symbol", "frequency", "data"],
rolling=False, api_call_interval=12,
database_update_interval=86400):
"""Constructor
Parameters
----------
stocklist: list of str
List of symbols of stocks of interest.
api_config_path: str
Path to the API configuration file.
apikey_dict: dict
A dictionary of API keys in {"api":"key"} pairs.
api_list: list of str, optional
List of APIs, in preferred order.
Options are ["Alpha Vantage",]
root_dir: str, optional
The root directory of the database.
frequency: str, optional
"intraday", "daily", "weekly", "monthly"
Options are ["intraday"].
file_structure: list of str, optional
How to set up parent/child folders.
Defaults to ["symbol", "frequency", "data"].
rolling: boolean, optional
Rolling update. Rate limited by API requestion limitations
set in the API configuration file.
Defaults to be False.
api_call_interval: int, optional
Minimal delay (seconds) between API calls.
Use your API's limitation to derive this value.
Defaults to 12.
database_update_interval: int, optional
Interval (seconds) between each database update.
Defaults to 86400 (1 day).
"""
self.stocklist = stocklist
self.root_dir = root_dir
self.api_config_path = api_config_path
self.api_list = api_list
self.apikey_dict = apikey_dict
self.frequency = frequency
self.file_structure = file_structure
self.rolling = rolling
self.api_call_interval = datetime.timedelta(seconds=api_call_interval)
self.database_update_interval = datetime.timedelta(
seconds=database_update_interval)
def update_database(self, download_kwargs={}, export_kwargs={}):
"""Get stock data from API and update datebase.
Parameters
----------
downloader_kwargs: dict
Keyword arguments passed to
stockdaq.data.downloader.YourDownloader.download() method.
export_kwargs: dict
Keyword arguments passed to
stockdaq.data.downloader.Downloader.export() method.
"""
# api = self.api_list[0]
# apikey = self.apikey_dict[api]
# downloader = stockdaq.data.downloader_dict.downloader_dict[api](
# apikey=apikey
# )
last_update_datetime = datetime.datetime.now()
for symbol in self.stocklist:
for api in self.api_list:
try:
apikey = self.apikey_dict[api]
downloader = (
stockdaq.data.downloader_dict.downloader_dict[api](
apikey=apikey
)
)
downloader.download(
symbol=symbol, frequency=self.frequency,
**download_kwargs
)
last_api_call_datetime = datetime.datetime.now()
# Now prefix is the dir.
prefix = self.get_prefix(symbol=symbol)
if not os.path.isdir(prefix):
os.makedirs(prefix)
# Now add the customized prefix
try:
export_kwargs["prefix"]
except KeyError:
export_kwargs["prefix"] = None
if export_kwargs["prefix"] is not None:
prefix += export_kwargs["prefix"]
new_export_kwargs = dict(export_kwargs)
new_export_kwargs["prefix"] = prefix
downloader.export(**new_export_kwargs)
while (datetime.datetime.now() - last_api_call_datetime <
self.api_call_interval):
time.sleep(1)
break # Break out of the api loop when success
except ValueError as err:
logger.error("Error encountered when trying to acquisite "
"symbol: {} data from API: {}\nError message:"
"\n{}"
"".format(symbol, api, err))
except:
print("Unexpected error:", sys.exc_info()[0])
raise
logger.info("Database update finished.")
if self.rolling:
next_update_datetime = (last_update_datetime
+ self.database_update_interval)
logger.info("Rolling update enabled, "
"next update is scheduled at "
"{}.".format(str(next_update_datetime)))
while datetime.datetime.now() < next_update_datetime:
time.sleep(1)
self.update_database(
download_kwargs=download_kwargs, export_kwargs=export_kwargs
)
def get_prefix(self, symbol):
"""Get path prefix for a specific data.
Parameters
----------
symbol: str
The stock symbol.
Returns
-------
prefix: str
The prefix of the file path.
"""
prefix = self.root_dir+""
for folder in self.file_structure:
if folder == "data":
break
elif folder == "frequency":
prefix += self.frequency+"/"
elif folder == "symbol":
prefix += symbol+"/"
else:
raise ValueError("{} structure not available.".format(folder))
return prefix
| [
37811,
6601,
12673,
1080,
13,
198,
37811,
198,
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
198,
11748,
4283,
48539,
13,
7890,
13,
15002,
263,
62,
11600,
198,
6738,
4283,
48539,
13,
6404,
1362,
1330,
49... | 2.054705 | 3,656 |
# -*- encoding: utf-8 -*-
"""
hio.help.timing module
"""
import time
from .. import hioing
class TimerError(hioing.HioError):
"""
Generic Timer Errors
Usage:
raise TimerError("error message")
"""
class RetroTimerError(TimerError):
"""
Error due to real time being retrograded before start time of timer
Usage:
raise RetroTimerError("error message")
"""
class Timer(hioing.Mixin):
"""
Class to manage real elaspsed time using time module.
Attributes:
._start is start tyme in seconds
._stop is stop tyme in seconds
Properties:
.duration is float time duration in seconds of timer from ._start to ._stop
.elaspsed is float time elasped in seconds since ._start
.remaining is float time remaining in seconds until ._stop
.expired is boolean, True if expired, False otherwise, i.e. time >= ._stop
methods:
.start() start timer at current time
.restart() = restart timer at last ._stop so no time lost
"""
def __init__(self, duration=0.0, start=None, **kwa):
"""
Initialization method for instance.
Parameters:
duration is float duration of timer in seconds (fractional)
start is float optional start time in seconds allows starting before
or after current time
"""
super(Timer, self).__init__(**kwa) # Mixin for Mult-inheritance MRO
self._start = float(start) if start is not None else time.time()
self._stop = self._start + float(duration) # need for default duration
self.start(duration=duration, start=start)
@property
def duration(self):
"""
duration property getter, .duration = ._stop - ._start
.duration is float duration tyme
"""
return (self._stop - self._start)
@property
def elapsed(self):
"""
elapsed time property getter,
Returns elapsed time in seconds (fractional) since ._start.
"""
return (time.time() - self._start)
@property
def remaining(self):
"""
remaining time property getter,
Returns remaining time in seconds (fractional) before ._stop.
"""
return (self._stop - time.time())
@property
def expired(self):
"""
Returns True if timer has expired, False otherwise.
time.time() >= ._stop,
"""
return (time.time() >= self._stop)
def start(self, duration=None, start=None):
"""
Starts Timer of duration secs at start time start secs.
If duration not provided then uses current duration
If start not provided then starts at current time.time()
"""
# remember current duration when duration not provided
duration = float(duration) if duration is not None else self.duration
self._start = float(start) if start is not None else time.time()
self._stop = self._start + duration
return self._start
def restart(self, duration=None):
"""
Lossless restart of Timer at start = ._stop for duration if provided,
Otherwise current duration.
No time lost. Useful to extend Timer so no time lost
"""
return self.start(duration=duration, start=self._stop)
class MonoTimer(Timer):
"""
Class to manage real elaspsed time using time module but with monotonically
increating time guarantee in spite of system time being retrograded.
If the system clock is retrograded (moved back in time) while the timer is
running then time.time() could move to before the start time.
MonoTimer detects this retrograde and if retro is True then
retrogrades the start and stop times back Otherwise it raises a TimerRetroError.
MonoTimer is not able to detect a prograded clock (moved forward in time)
Attributes:
._start is start time in seconds
._stop is stop time in seconds
._last is last measured time in seconds with retrograde handling
.retro is boolean If True retrograde ._start and ._stop when time is retrograded.
Properties:
.duration is float time duration in seconds of timer from ._start to ._stop
.elaspsed is float time elasped in seconds since ._start
.remaining is float time remaining in seconds until ._stop
.expired is boolean True if expired, False otherwise, i.e. time >= ._stop
.latest is float latest measured time in seconds with retrograte handling
methods:
.start() = start timer at current time returns start time
.restart() = restart timer at last ._stop so no time lost, returns start time
"""
def __init__(self, duration=0.0, start=None, retro=True):
"""
Initialization method for instance.
Parameters:
duration in seconds (fractional)
start is float optional start time in seconds allows starting before
or after current time
retro is boolean IF True automaticall shift timer whenever
retrograded clock detected Otherwise ignore
"""
self._start = float(start) if start is not None else time.time()
self._stop = self._start + float(duration) # need for default duration
self._last = self._start
self.retro = True if retro else False # ensure boolean
self.start(duration=duration, start=start)
@property
def elapsed(self):
"""
elapsed time property getter,
Returns elapsed time in seconds (fractional) since ._start.
"""
return (self.latest - self._start)
@property
def remaining(self):
"""
remaining time property getter,
Returns remaining time in seconds (fractional) before ._stop.
"""
return (self._stop - self.latest)
@property
def expired(self):
"""
Returns True if timer has expired, False otherwise.
.latest >= ._stop,
"""
return (self.latest >= self._stop)
@property
def latest(self):
"""
latest measured time property getter,
Returns latest measured time in seconds adjusted for retrograded system time.
"""
delta = time.time() - self._last # current time - last time checked
if delta < 0: # system clock has retrograded
if self.retro:
self._start += delta
self._stop += delta
else:
raise RetroTimerError("System time retrograded by {0} seconds"
" while timer running.".format(delta))
self._last += delta
return self._last
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
71,
952,
13,
16794,
13,
16514,
278,
8265,
198,
198,
37811,
198,
11748,
640,
198,
198,
6738,
11485,
1330,
289,
952,
278,
628,
198,
4871,
5045,
263,
12331,
7... | 2.66811 | 2,540 |
# coding: utf-8
import pytest
from requests import HTTPError, Response
from postmarker.core import USER_AGENT, PostmarkClient
from postmarker.models.messages import MessageManager, OutboundMessageManager
from postmarker.models.triggers import TriggersManager
@pytest.mark.parametrize("klass", (PostmarkClient, OutboundMessageManager, MessageManager, TriggersManager))
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
11748,
12972,
9288,
198,
6738,
7007,
1330,
14626,
12331,
11,
18261,
198,
198,
6738,
1281,
4102,
263,
13,
7295,
1330,
1294,
1137,
62,
4760,
3525,
11,
2947,
4102,
11792,
198,
6738,
1281,
4102,
263,
... | 3.485981 | 107 |
"""Implementation of the simple 50-line version of NOTEARS algorithm.
Defines the h function, the augmented Lagrangian, and its gradient.
Each augmented Lagrangian subproblem is minimized by L-BFGS-B from scipy.
Note: this version implements NOTEARS without l1 regularization,
i.e. lambda = 0, hence it requires n >> d.
"""
import numpy as np
import scipy.linalg as slin
import scipy.optimize as sopt
def notears(X: np.ndarray,
max_iter: int = 100,
h_tol: float = 1e-8,
w_threshold: float = 1e-1) -> np.ndarray:
"""Solve min_W ell(W; X) s.t. h(W) = 0 using augmented Lagrangian.
Args:
X: [n,d] sample matrix
max_iter: max number of dual ascent steps
h_tol: exit if |h(w)| <= h_tol
w_threshold: fixed threshold for edge weights
Returns:
W_est: [d,d] estimate
"""
n, d = X.shape
# X = X + np.random.normal(scale=1.0, size=(n, d))
w_est, w_new = np.zeros(d * d), np.zeros(d * d)
rho, alpha, h, h_new = 1.0, 0.0, np.inf, np.inf
mask = lambda x, y: True if x < 4 or y < 4 or x == y else False
bnds = [(0, 0) if mask(i, j) else (None, None) for i in range(d) for j in range(d)]
for _ in range(max_iter):
while rho < 1e+20:
sol = sopt.minimize(_func, w_est, method='L-BFGS-B', jac=_grad, bounds=bnds)
w_new = sol.x
h_new = _h(w_new)
if h_new > 0.25 * h:
rho *= 10
else:
break
w_est, h = w_new, h_new
alpha += rho * h
if h <= h_tol:
break
print(w_est.reshape([d, d]))
w_est[np.abs(w_est) < w_threshold] = 0
return w_est.reshape([d, d])
if __name__ == '__main__':
import glog as log
import networkx as nx
import utils
# configurations
n, d = 1000, 10
graph_type, degree, sem_type = 'erdos-renyi', 4, 'linear-gauss'
log.info('Graph: %d node, avg degree %d, %s graph', d, degree, graph_type)
log.info('Data: %d samples, %s SEM', n, sem_type)
# graph
log.info('Simulating graph ...')
G = utils.simulate_random_dag(d, degree, graph_type)
log.info('Simulating graph ... Done')
# data
log.info('Simulating data ...')
X = utils.simulate_sem(G, n, sem_type)
log.info('Simulating data ... Done')
# solve optimization problem
log.info('Solving equality constrained problem ...')
W_est = notears(X)
G_est = nx.DiGraph(W_est)
log.info('Solving equality constrained problem ... Done')
# evaluate
fdr, tpr, fpr, shd, nnz = utils.count_accuracy(G, G_est)
log.info('Accuracy: fdr %f, tpr %f, fpr %f, shd %d, nnz %d',
fdr, tpr, fpr, shd, nnz)
| [
37811,
3546,
32851,
286,
262,
2829,
2026,
12,
1370,
2196,
286,
5626,
17133,
50,
11862,
13,
198,
198,
7469,
1127,
262,
289,
2163,
11,
262,
30259,
21003,
36985,
666,
11,
290,
663,
31312,
13,
198,
10871,
30259,
21003,
36985,
666,
850,
45... | 2.152623 | 1,258 |
from __future__ import absolute_import
import inspect
import os
from collections import deque
from typing import Any, Optional, Callable
import torch
from laia.common.logging import get_logger
from laia.common.random import get_rng_state
_logger = get_logger(__name__)
class RollingSaver(Saver):
"""Saver wrapper that keeps a maximum number of files"""
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
11748,
10104,
198,
11748,
28686,
198,
6738,
17268,
1330,
390,
4188,
198,
6738,
19720,
1330,
4377,
11,
32233,
11,
4889,
540,
198,
198,
11748,
28034,
198,
198,
6738,
8591,
544,
13,... | 3.333333 | 111 |
"""
Module: 'flowlib.modules._lego' on M5 FlowUI v1.4.0-beta
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.11.0', version='v1.11-284-g5d8e1c867 on 2019-08-30', machine='ESP32 module with ESP32')
# Stubber: 1.3.1 - updated
from typing import Any
ENCODER_ADDR = 4
class Lego:
""""""
class Lego_Motor:
""""""
M5GO_WHEEL_ADDR = 86
MOTOR_CTRL_ADDR = 0
i2c_bus = None
machine = None
module = None
motor1_pwm = 0
motor2_pwm = 0
os = None
time = None
ustruct = None
| [
37811,
198,
26796,
25,
705,
2704,
4883,
571,
13,
18170,
13557,
1455,
78,
6,
319,
337,
20,
27782,
10080,
410,
16,
13,
19,
13,
15,
12,
31361,
198,
37811,
198,
2,
13122,
52,
25,
357,
17597,
3672,
11639,
9774,
2624,
3256,
18666,
12453,
... | 2.227273 | 220 |
from ptrlib import *
import os
HOST = os.getenv('SECCON_HOST', "localhost")
PORT = os.getenv('SECCON_PORT', "10042")
sock = Socket(HOST, int(PORT))
sock.sendlineafter("value 2: ", "help()")
sock.sendlineafter("help> ", "+")
sock.sendlineafter("--More--", "!/bin/cat /proc/self/environ")
print(sock.recvregex("SECCON\{.+\}"))
sock.close()
| [
6738,
50116,
8019,
1330,
1635,
198,
11748,
28686,
198,
198,
39,
10892,
796,
28686,
13,
1136,
24330,
10786,
23683,
10943,
62,
39,
10892,
3256,
366,
36750,
4943,
198,
15490,
796,
28686,
13,
1136,
24330,
10786,
23683,
10943,
62,
15490,
3256,... | 2.442857 | 140 |
import maya.cmds as cmds
import maya.mel as mel
| [
11748,
743,
64,
13,
28758,
82,
355,
23991,
82,
198,
11748,
743,
64,
13,
17694,
355,
7758,
628
] | 2.722222 | 18 |
""" Remove double instances of cells (and the unused vertices) """
from larlib import evalStruct
""" Generation of Struct object and transform to LAR model pair """
cubes = larCuboids([10,10,10],True)
V = cubes[0]
FV = cubes[1][-2]
CV = cubes[1][-1]
bcells = boundaryCells(CV,FV)
BV = [FV[f] for f in bcells]
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((V,BV))))
block = Model((V,BV))
struct = Struct(10*[block, t(10,0,0)])
struct = Struct(10*[struct, t(0,10,0)])
struct = Struct(3*[struct, t(0,0,10)])
W,FW = struct2lar(struct)
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((W,FW))))
""" Remove the double instances of cells """
cellDict = defaultdict(list)
for k,cell in enumerate(FW):
cellDict[tuple(cell)] += [k]
FW = [list(key) for key in cellDict.keys() if len(cellDict[key])==1]
VIEW(EXPLODE(1.2,1.2,1.2)(MKPOLS((W,FW))))
""" Remove the unused vertices """
print "len(W) =",len(W)
V,FV = larRemoveVertices(W,FW)
print "len(V) =",len(V)
| [
37811,
17220,
4274,
10245,
286,
4778,
357,
392,
262,
21958,
9421,
1063,
8,
37227,
198,
6738,
300,
7063,
571,
1330,
5418,
44909,
198,
198,
37811,
16588,
286,
32112,
2134,
290,
6121,
284,
47211,
2746,
5166,
37227,
198,
66,
29080,
796,
263... | 2.328321 | 399 |
# Copyright 2014 by Kevin Wu.
# Copyright 2014 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for online functionality of the KEGG module."""
# Builtins
import unittest
from Bio.KEGG.KGML import KGML_parser
from Bio.KEGG.REST import kegg_conv, kegg_find, kegg_get
from Bio.KEGG.REST import kegg_info, kegg_link, kegg_list
from Bio import SeqIO
import requires_internet
requires_internet.check()
# TODO - revert to using with statements once we drop
# Python 2.6 and 2.7, see http://bugs.python.org/issue12487
class KEGGTests(unittest.TestCase):
"""Tests for KEGG REST API."""
class KGMLPathwayTests(unittest.TestCase):
"""Tests with metabolic maps."""
def test_parse_remote_pathway(self):
"""Download a KEGG pathway from the KEGG server and parse KGML."""
h = kegg_get("ko03070", "kgml")
pathway = KGML_parser.read(h)
self.assertEqual(pathway.name, "path:ko03070")
h.close()
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| [
2,
15069,
1946,
416,
7939,
18027,
13,
198,
2,
15069,
1946,
416,
5613,
23769,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
770,
2438,
318,
636,
286,
262,
8436,
404,
7535,
6082,
290,
21825,
416,
663,
198,
2,
5964,
13,
220,
4222,
766,
... | 2.746067 | 445 |
#
# verify.py: routines that handle comparison and display of expected
# vs. actual output
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
import re, sys
from difflib import unified_diff, ndiff
import pprint
import logging
import svntest
logger = logging.getLogger()
######################################################################
# Exception types
class SVNUnexpectedOutput(svntest.Failure):
"""Exception raised if an invocation of svn results in unexpected
output of any kind."""
pass
class SVNUnexpectedStdout(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in unexpected
output on STDOUT."""
pass
class SVNUnexpectedStderr(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in unexpected
output on STDERR."""
pass
class SVNExpectedStdout(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in no output on
STDOUT when output was expected."""
pass
class SVNExpectedStderr(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn results in no output on
STDERR when output was expected."""
pass
class SVNUnexpectedExitCode(SVNUnexpectedOutput):
"""Exception raised if an invocation of svn exits with a value other
than what was expected."""
pass
class SVNIncorrectDatatype(SVNUnexpectedOutput):
"""Exception raised if invalid input is passed to the
run_and_verify_* API"""
pass
class SVNDumpParseError(svntest.Failure):
"""Exception raised if parsing a dump file fails"""
pass
######################################################################
# Comparison of expected vs. actual output
def createExpectedOutput(expected, output_type, match_all=True):
"""Return EXPECTED, promoted to an ExpectedOutput instance if not
None. Raise SVNIncorrectDatatype if the data type of EXPECTED is
not handled."""
if isinstance(expected, list):
expected = ExpectedOutput(expected)
elif isinstance(expected, str):
expected = RegexOutput(expected, match_all)
elif isinstance(expected, int):
expected = RegexOutput(".*: E%d:.*" % expected, False)
elif expected is AnyOutput:
expected = AnyOutput()
elif expected is not None and not isinstance(expected, ExpectedOutput):
raise SVNIncorrectDatatype("Unexpected type for '%s' data" % output_type)
return expected
class ExpectedOutput(object):
"""Matches an ordered list of lines.
If MATCH_ALL is True, the expected lines must match all the actual
lines, one-to-one, in the same order. If MATCH_ALL is False, the
expected lines must match a subset of the actual lines, one-to-one,
in the same order, ignoring any other actual lines among the
matching ones.
"""
def __init__(self, expected, match_all=True):
"""Initialize the expected output to EXPECTED which is a string, or
a list of strings.
"""
assert expected is not None
self.expected = expected
self.match_all = match_all
def matches(self, actual):
"""Return whether SELF matches ACTUAL (which may be a list
of newline-terminated lines, or a single string).
"""
assert actual is not None
expected = self.expected
if not isinstance(expected, list):
expected = [expected]
if not isinstance(actual, list):
actual = [actual]
if self.match_all:
return expected == actual
i_expected = 0
for actual_line in actual:
if expected[i_expected] == actual_line:
i_expected += 1
if i_expected == len(expected):
return True
return False
def display_differences(self, message, label, actual):
"""Show the differences between the expected and ACTUAL lines. Print
MESSAGE unless it is None, the expected lines, the ACTUAL lines,
and a diff, all labeled with LABEL.
"""
display_lines(message, self.expected, actual, label, label)
display_lines_diff(self.expected, actual, label, label)
class AnyOutput(ExpectedOutput):
"""Matches any non-empty output.
"""
class RegexOutput(ExpectedOutput):
"""Matches a single regular expression.
If MATCH_ALL is true, every actual line must match the RE. If
MATCH_ALL is false, at least one actual line must match the RE. In
any case, there must be at least one line of actual output.
"""
def __init__(self, expected, match_all=True):
"EXPECTED is a regular expression string."
assert isinstance(expected, str)
ExpectedOutput.__init__(self, expected, match_all)
self.expected_re = re.compile(expected)
class RegexListOutput(ExpectedOutput):
"""Matches an ordered list of regular expressions.
If MATCH_ALL is True, the expressions must match all the actual
lines, one-to-one, in the same order. If MATCH_ALL is False, the
expressions must match a subset of the actual lines, one-to-one, in
the same order, ignoring any other actual lines among the matching
ones.
In any case, there must be at least one line of actual output.
"""
def __init__(self, expected, match_all=True):
"EXPECTED is a list of regular expression strings."
assert isinstance(expected, list) and expected != []
ExpectedOutput.__init__(self, expected, match_all)
self.expected_res = [re.compile(e) for e in expected]
class UnorderedOutput(ExpectedOutput):
"""Matches an unordered list of lines.
The expected lines must match all the actual lines, one-to-one, in
any order.
"""
class UnorderedRegexListOutput(ExpectedOutput):
"""Matches an unordered list of regular expressions.
The expressions must match all the actual lines, one-to-one, in any
order.
Note: This can give a false negative result (no match) when there is
an actual line that matches multiple expressions and a different
actual line that matches some but not all of those same
expressions. The implementation matches each expression in turn to
the first unmatched actual line that it can match, and does not try
all the permutations when there are multiple possible matches.
"""
class AlternateOutput(ExpectedOutput):
"""Matches any one of a list of ExpectedOutput instances.
"""
def __init__(self, expected, match_all=True):
"EXPECTED is a list of ExpectedOutput instances."
assert isinstance(expected, list) and expected != []
assert all(isinstance(e, ExpectedOutput) for e in expected)
ExpectedOutput.__init__(self, expected)
######################################################################
# Displaying expected and actual output
def display_trees(message, label, expected, actual):
'Print two trees, expected and actual.'
if message is not None:
logger.warn(message)
if expected is not None:
logger.warn('EXPECTED %s:', label)
svntest.tree.dump_tree(expected)
if actual is not None:
logger.warn('ACTUAL %s:', label)
svntest.tree.dump_tree(actual)
def display_lines_diff(expected, actual, expected_label, actual_label):
"""Print a unified diff between EXPECTED (labeled with EXPECTED_LABEL)
and ACTUAL (labeled with ACTUAL_LABEL).
Each of EXPECTED and ACTUAL is a string or a list of strings.
"""
if not isinstance(expected, list):
expected = [expected]
if not isinstance(actual, list):
actual = [actual]
logger.warn('DIFF ' + expected_label + ':')
for x in unified_diff(expected, actual,
fromfile='EXPECTED ' + expected_label,
tofile='ACTUAL ' + actual_label):
logger.warn('| ' + x.rstrip())
def display_lines(message, expected, actual,
expected_label, actual_label=None):
"""Print MESSAGE, unless it is None, then print EXPECTED (labeled
with EXPECTED_LABEL) followed by ACTUAL (labeled with ACTUAL_LABEL).
Each of EXPECTED and ACTUAL is a string or a list of strings.
"""
if message is not None:
logger.warn(message)
if type(expected) is str:
expected = [expected]
if type(actual) is str:
actual = [actual]
if actual_label is None:
actual_label = expected_label
if expected is not None:
logger.warn('EXPECTED %s:', expected_label)
for x in expected:
logger.warn('| ' + x.rstrip())
if actual is not None:
logger.warn('ACTUAL %s:', actual_label)
for x in actual:
logger.warn('| ' + x.rstrip())
def compare_and_display_lines(message, label, expected, actual,
raisable=None):
"""Compare two sets of output lines, and print them if they differ,
preceded by MESSAGE iff not None. EXPECTED may be an instance of
ExpectedOutput (and if not, it is wrapped as such). ACTUAL may be a
list of newline-terminated lines, or a single string. RAISABLE is an
exception class, an instance of which is thrown if ACTUAL doesn't
match EXPECTED."""
if raisable is None:
raisable = svntest.main.SVNLineUnequal
### It'd be nicer to use createExpectedOutput() here, but its
### semantics don't match all current consumers of this function.
assert expected is not None
assert actual is not None
if not isinstance(expected, ExpectedOutput):
expected = ExpectedOutput(expected)
if isinstance(actual, str):
actual = [actual]
actual = svntest.main.filter_dbg(actual)
if not expected.matches(actual):
expected.display_differences(message, label, actual)
raise raisable
def verify_outputs(message, actual_stdout, actual_stderr,
expected_stdout, expected_stderr, all_stdout=True):
"""Compare and display expected vs. actual stderr and stdout lines:
if they don't match, print the difference (preceded by MESSAGE iff
not None) and raise an exception.
If EXPECTED_STDERR or EXPECTED_STDOUT is a string the string is
interpreted as a regular expression. For EXPECTED_STDOUT and
ACTUAL_STDOUT to match, every line in ACTUAL_STDOUT must match the
EXPECTED_STDOUT regex, unless ALL_STDOUT is false. For
EXPECTED_STDERR regexes only one line in ACTUAL_STDERR need match."""
expected_stderr = createExpectedOutput(expected_stderr, 'stderr', False)
expected_stdout = createExpectedOutput(expected_stdout, 'stdout', all_stdout)
for (actual, expected, label, raisable) in (
(actual_stderr, expected_stderr, 'STDERR', SVNExpectedStderr),
(actual_stdout, expected_stdout, 'STDOUT', SVNExpectedStdout)):
if expected is None:
continue
if isinstance(expected, RegexOutput):
raisable = svntest.main.SVNUnmatchedError
elif not isinstance(expected, AnyOutput):
raisable = svntest.main.SVNLineUnequal
compare_and_display_lines(message, label, expected, actual, raisable)
def verify_exit_code(message, actual, expected,
raisable=SVNUnexpectedExitCode):
"""Compare and display expected vs. actual exit codes:
if they don't match, print the difference (preceded by MESSAGE iff
not None) and raise an exception."""
if expected != actual:
display_lines(message, str(expected), str(actual), "Exit Code")
raise raisable
# A simple dump file parser. While sufficient for the current
# testsuite it doesn't cope with all valid dump files.
# One day we may need to parse individual property name/values into a map
def compare_dump_files(message, label, expected, actual):
"""Parse two dump files EXPECTED and ACTUAL, both of which are lists
of lines as returned by run_and_verify_dump, and check that the same
revisions, nodes, properties, etc. are present in both dumps.
"""
parsed_expected = DumpParser(expected).parse()
parsed_actual = DumpParser(actual).parse()
if parsed_expected != parsed_actual:
raise svntest.Failure('\n' + '\n'.join(ndiff(
pprint.pformat(parsed_expected).splitlines(),
pprint.pformat(parsed_actual).splitlines())))
| [
2,
198,
2,
220,
11767,
13,
9078,
25,
220,
31878,
326,
5412,
7208,
290,
3359,
286,
2938,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
3691,
13,
4036,
5072,
198,
2,
198,
2,
220,
3834,
9641,
318,
257,
... | 3.150823 | 4,071 |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name='syncbn_cpu',
ext_modules=[
CppExtension('syncbn_cpu', [
'operator.cpp',
'syncbn_cpu.cpp',
]),
],
cmdclass={
'build_ext': BuildExtension
})
| [
6738,
900,
37623,
10141,
1330,
9058,
198,
6738,
28034,
13,
26791,
13,
20322,
62,
2302,
3004,
1330,
10934,
11627,
3004,
11,
327,
381,
11627,
3004,
198,
198,
40406,
7,
198,
220,
220,
220,
1438,
11639,
27261,
9374,
62,
36166,
3256,
198,
... | 2.111842 | 152 |
# -*- coding: utf-8 -*-
from typing import List
from overrides import overrides
from recc.log.logging import recc_database_logger as logger
from recc.database.struct.group_member import GroupMember
from recc.database.struct.group_join_member import (
GroupJoinGroupMember,
ProjectJoinGroupMember,
)
from recc.database.interfaces.db_group_member import DbGroupMember
from recc.database.postgresql.mixin.pg_base import PgBase
from recc.database.postgresql.query.group_member import (
INSERT_GROUP_MEMBER,
UPDATE_GROUP_MEMBER_PERMISSION,
DELETE_GROUP_MEMBER,
SELECT_GROUP_MEMBER_BY_GROUP_UID_AND_USER_UID,
SELECT_GROUP_MEMBER_BY_GROUP_UID,
SELECT_GROUP_MEMBER_BY_USER_UID,
SELECT_GROUP_MEMBER_ALL,
SELECT_GROUP_MEMBER_JOIN_GROUP_BY_USER_UID,
SELECT_GROUP_MEMBER_JOIN_GROUP_BY_USER_UID_AND_GROUP_UID,
SELECT_GROUP_MEMBER_JOIN_PROJECT_BY_USER_UID,
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
19720,
1330,
7343,
198,
6738,
23170,
1460,
1330,
23170,
1460,
198,
6738,
302,
535,
13,
6404,
13,
6404,
2667,
1330,
302,
535,
62,
48806,
62,
6404,
1362,
355,
... | 2.624633 | 341 |
PREFIX = '!?'
TOKEN = ''
IMAGE = 'qiskitbot'
BLACKLIST = ['']
| [
47,
31688,
10426,
796,
705,
0,
8348,
198,
10468,
43959,
796,
10148,
198,
3955,
11879,
796,
705,
80,
1984,
270,
13645,
6,
198,
9148,
8120,
45849,
796,
685,
7061,
60,
198
] | 2 | 31 |
#!/usr/bin/env python3
import openpmd_api as io
series = io.Series("LaserAccelerationRZ_opmd_plt/openpmd_%T.h5", io.Access.read_only)
assert len(series.iterations) == 3, 'improper number of iterations stored'
ii = series.iterations[20]
assert len(ii.meshes) == 7, 'improper number of meshes'
# select j_t
jt = ii.meshes['j']['t']
# this is in C (Python) order; r is the fastest varying index
(Nm, Nz, Nr) = jt.shape
assert Nm == 3, 'Wrong number of angular modes stored or possible incorrect ordering when flushed'
assert Nr == 64, 'Wrong number of radial points stored or possible incorrect ordering when flushed'
assert Nz == 512, 'Wrong number of z points stored or possible incorrect ordering when flushed'
assert ii.meshes['part_per_grid'][io.Mesh_Record_Component.SCALAR].shape == [512,64], 'problem with part_per_grid'
assert ii.meshes['rho_electrons'][io.Mesh_Record_Component.SCALAR].shape == [3, 512, 64], 'problem with rho_electrons'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1280,
4426,
67,
62,
15042,
355,
33245,
198,
198,
25076,
796,
33245,
13,
27996,
7203,
43,
6005,
12832,
7015,
341,
49,
57,
62,
404,
9132,
62,
489,
83,
14,
9654,
4426,
... | 2.962733 | 322 |
name = 'Muria'
version = '0.5.3'
author = 'Ahmad Ghulam Zakiy <https://twitter.com/xakiy>'
| [
3672,
796,
705,
23830,
544,
6,
198,
9641,
796,
705,
15,
13,
20,
13,
18,
6,
198,
9800,
796,
705,
10910,
9937,
11972,
377,
321,
32605,
7745,
220,
1279,
5450,
1378,
6956,
13,
785,
14,
87,
461,
7745,
29,
6,
198
] | 2.243902 | 41 |
from sys import stdin, setrecursionlimit
setrecursionlimit(10**7)
# Function to check all the valid way to solve the sudoku.
board = [[ int(ele) for ele in input().split() ]for i in range(9)]
ans = solveSudoku(board)
if ans is True:
print('true')
else:
print('false')
| [
6738,
25064,
1330,
14367,
259,
11,
900,
8344,
24197,
32374,
198,
2617,
8344,
24197,
32374,
7,
940,
1174,
22,
8,
628,
198,
198,
2,
15553,
284,
2198,
477,
262,
4938,
835,
284,
8494,
262,
424,
67,
11601,
13,
628,
198,
198,
3526,
796,
... | 2.848485 | 99 |
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import math
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dictionary_corpus import Corpus, Dictionary, tokenize
from utils import batchify
import lm_argparser
parser = argparse.ArgumentParser(parents=[lm_argparser.lm_parser],
description="Training and testing ngram LSTM model")
parser.add_argument('--train', action='store_true', default=False,
help='enable training regime')
parser.add_argument('--test', action='store_true', default=False,
help='enable testing regime')
parser.add_argument('--test_path', type=str,
help='path to test file, gold file and vocab file output')
parser.add_argument('--suffix', type=str,
help='suffix for generated output files which will be saved as path.output_suffix')
args = parser.parse_args()
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(),
logging.FileHandler(args.log)])
logging.info(args)
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder.
ntoken: vocab size
nip: embedding size
"""
###############################################################################
# Training code
###############################################################################
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
if args.train:
logging.info("Loading data")
corpus = Corpus(args.data)
# logging.info(corpus.train)
ntokens = len(corpus.dictionary)
logging.info("Vocab size %d", ntokens)
logging.info("Batchying..")
eval_batch_size = 256
train_data = batchify(corpus.train, args.batch_size, args.cuda)
# logging.info("Train data size", train_data.size())
val_data = batchify(corpus.valid, eval_batch_size, args.cuda)
test_data = batchify(corpus.test, eval_batch_size, args.cuda)
logging.info("Building the model")
# model = torch.nn.DataParallel(model.RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied),
# dim=1)
model = RNNModel(args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied)
if args.cuda:
model.cuda()
# Loop over epochs.
lr = args.lr
best_val_loss = None
try:
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate_perplexity(val_data)
logging.info('-' * 89)
logging.info('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
logging.info('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(args.save, 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
logging.info('-' * 89)
logging.info('Exiting from training early')
# Load the best saved model.
with open(args.save, 'rb', encoding="utf8") as f:
model = torch.load(f)
# Run on valid data with OOV excluded
test_loss = evaluate_perplexity(val_data, exclude_oov=True)
logging.info('=' * 89)
logging.info('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(test_loss, math.exp(test_loss)))
logging.info('=' * 89)
#####################################
# Testing #
#####################################
if args.test:
dictionary = Dictionary(args.data)
with open(args.save, 'rb', encoding="utf8") as f:
print("Loading the model")
if args.cuda:
model = torch.load(f)
model.cuda()
else:
# to convert model trained on cuda to cpu model
model = torch.load(f, map_location=lambda storage, loc: storage)
model.cpu()
model.eval()
eval_batch_size = 1
ntokens = len(dictionary)
#print("Vocab size", ntokens)
#print("TESTING")
# depends on generation script (constantly modified) - the column where the target word index is written
index_col = 3
mask = create_target_mask(args.test_path + ".text", args.test_path + ".gold", index_col)
mask_data = batchify(torch.LongTensor(mask), eval_batch_size, False)
test_data = batchify(tokenize(dictionary, args.test_path + ".text"), eval_batch_size, args.cuda)
f_output = open(args.test_path + ".output_" + args.suffix, 'w')
evaluate_on_mask(test_data, mask_data)
f_output.close()
| [
2,
15069,
357,
66,
8,
2864,
12,
25579,
11,
3203,
11,
3457,
13,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
770,
2723,
2438,
318,
11971,
739,
262,
5964,
1043,
287,
262,
198,
2,
38559,
24290,
2393,
287,
262,
6808,
8619,
286,
42... | 2.48114 | 2,280 |
import boto3
defaultRegion = 'eu-west-1'
defaultUrl = 'https://rekognition.'+defaultRegion+'.amazonaws.com'
defaultBucket = "jsimon-public"
| [
11748,
275,
2069,
18,
198,
198,
12286,
47371,
796,
705,
12496,
12,
7038,
12,
16,
6,
198,
12286,
28165,
796,
705,
5450,
1378,
37818,
2360,
653,
2637,
10,
12286,
47371,
10,
4458,
33103,
8356,
13,
785,
6,
198,
12286,
33,
38811,
796,
36... | 2.877551 | 49 |
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import re
VERSION_PATTERN = re.compile(r"(?:HAProxy|hapee-lb) version ([^,]+)")
| [
2,
357,
34,
8,
16092,
324,
519,
11,
3457,
13,
12131,
12,
25579,
198,
2,
1439,
2489,
10395,
198,
2,
49962,
739,
45157,
1431,
347,
10305,
13789,
357,
3826,
38559,
24290,
8,
198,
11748,
302,
198,
198,
43717,
62,
47,
1404,
31800,
796,
... | 2.782609 | 69 |
import os
import time
import hashlib
import hmac
import base64
import random
import sys
import binascii
import requests
import urllib.parse
from nmtwizard.cloud_translation_framework import CloudTranslationFramework
if __name__ == "__main__":
TencentTranslateFramework().run()
| [
11748,
28686,
198,
11748,
640,
198,
11748,
12234,
8019,
198,
11748,
289,
20285,
198,
11748,
2779,
2414,
198,
11748,
4738,
198,
11748,
25064,
198,
11748,
9874,
292,
979,
72,
198,
11748,
7007,
198,
11748,
2956,
297,
571,
13,
29572,
198,
1... | 3.445783 | 83 |
# coding: utf-8
import pprint
import re
import six
class ListRecordSetsWithLineRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'zone_type': 'str',
'marker': 'str',
'limit': 'str',
'offset': 'str',
'line_id': 'str',
'tags': 'str',
'status': 'str',
'type': 'str',
'name': 'str',
'id': 'str',
'records': 'str',
'sort_key': 'str',
'sort_dir': 'str',
'health_check_id': 'str',
'search_mode': 'str'
}
attribute_map = {
'zone_type': 'zone_type',
'marker': 'marker',
'limit': 'limit',
'offset': 'offset',
'line_id': 'line_id',
'tags': 'tags',
'status': 'status',
'type': 'type',
'name': 'name',
'id': 'id',
'records': 'records',
'sort_key': 'sort_key',
'sort_dir': 'sort_dir',
'health_check_id': 'health_check_id',
'search_mode': 'search_mode'
}
def __init__(self, zone_type=None, marker=None, limit=None, offset=None, line_id=None, tags=None, status=None, type=None, name=None, id=None, records=None, sort_key=None, sort_dir=None, health_check_id=None, search_mode=None):
"""ListRecordSetsWithLineRequest - a model defined in huaweicloud sdk"""
self._zone_type = None
self._marker = None
self._limit = None
self._offset = None
self._line_id = None
self._tags = None
self._status = None
self._type = None
self._name = None
self._id = None
self._records = None
self._sort_key = None
self._sort_dir = None
self._health_check_id = None
self._search_mode = None
self.discriminator = None
if zone_type is not None:
self.zone_type = zone_type
if marker is not None:
self.marker = marker
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
if line_id is not None:
self.line_id = line_id
if tags is not None:
self.tags = tags
if status is not None:
self.status = status
if type is not None:
self.type = type
if name is not None:
self.name = name
if id is not None:
self.id = id
if records is not None:
self.records = records
if sort_key is not None:
self.sort_key = sort_key
if sort_dir is not None:
self.sort_dir = sort_dir
if health_check_id is not None:
self.health_check_id = health_check_id
if search_mode is not None:
self.search_mode = search_mode
@property
def zone_type(self):
"""Gets the zone_type of this ListRecordSetsWithLineRequest.
待查询的Record Set的域名类型。 取值范围:public 搜索模式默认为模糊搜索。 默认值为public。
:return: The zone_type of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._zone_type
@zone_type.setter
def zone_type(self, zone_type):
"""Sets the zone_type of this ListRecordSetsWithLineRequest.
待查询的Record Set的域名类型。 取值范围:public 搜索模式默认为模糊搜索。 默认值为public。
:param zone_type: The zone_type of this ListRecordSetsWithLineRequest.
:type: str
"""
self._zone_type = zone_type
@property
def marker(self):
"""Gets the marker of this ListRecordSetsWithLineRequest.
分页查询起始的资源ID,为空时为查询第一页。 默认值为空。
:return: The marker of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ListRecordSetsWithLineRequest.
分页查询起始的资源ID,为空时为查询第一页。 默认值为空。
:param marker: The marker of this ListRecordSetsWithLineRequest.
:type: str
"""
self._marker = marker
@property
def limit(self):
"""Gets the limit of this ListRecordSetsWithLineRequest.
每页返回的资源个数。 取值范围:0~500 取值一般为10,20,50。默认值为500。
:return: The limit of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListRecordSetsWithLineRequest.
每页返回的资源个数。 取值范围:0~500 取值一般为10,20,50。默认值为500。
:param limit: The limit of this ListRecordSetsWithLineRequest.
:type: str
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this ListRecordSetsWithLineRequest.
分页查询起始偏移量,表示从偏移量的下一个资源开始查询。 取值范围:0~2147483647 默认值为0。 当前设置marker不为空时,以marker为分页起始标识。
:return: The offset of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListRecordSetsWithLineRequest.
分页查询起始偏移量,表示从偏移量的下一个资源开始查询。 取值范围:0~2147483647 默认值为0。 当前设置marker不为空时,以marker为分页起始标识。
:param offset: The offset of this ListRecordSetsWithLineRequest.
:type: str
"""
self._offset = offset
@property
def line_id(self):
"""Gets the line_id of this ListRecordSetsWithLineRequest.
解析线路ID。
:return: The line_id of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._line_id
@line_id.setter
def line_id(self, line_id):
"""Sets the line_id of this ListRecordSetsWithLineRequest.
解析线路ID。
:param line_id: The line_id of this ListRecordSetsWithLineRequest.
:type: str
"""
self._line_id = line_id
@property
def tags(self):
"""Gets the tags of this ListRecordSetsWithLineRequest.
资源标签。 取值格式:key1,value1|key2,value2 多个标签之间用\"|\"分开,每个标签的键值用英文逗号\",\"相隔。
:return: The tags of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ListRecordSetsWithLineRequest.
资源标签。 取值格式:key1,value1|key2,value2 多个标签之间用\"|\"分开,每个标签的键值用英文逗号\",\"相隔。
:param tags: The tags of this ListRecordSetsWithLineRequest.
:type: str
"""
self._tags = tags
@property
def status(self):
"""Gets the status of this ListRecordSetsWithLineRequest.
待查询的Record Set的状态。 取值范围:ACTIVE、ERROR、DISABLE、FREEZE、PENDING_CREATE、PENDING_UPDATE、PENDING_DELETE
:return: The status of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ListRecordSetsWithLineRequest.
待查询的Record Set的状态。 取值范围:ACTIVE、ERROR、DISABLE、FREEZE、PENDING_CREATE、PENDING_UPDATE、PENDING_DELETE
:param status: The status of this ListRecordSetsWithLineRequest.
:type: str
"""
self._status = status
@property
def type(self):
"""Gets the type of this ListRecordSetsWithLineRequest.
待查询的Record Set的记录集类型。 取值范围:A、CNAME、MX、AAAA、TXT、SRV、NS、CAA
:return: The type of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ListRecordSetsWithLineRequest.
待查询的Record Set的记录集类型。 取值范围:A、CNAME、MX、AAAA、TXT、SRV、NS、CAA
:param type: The type of this ListRecordSetsWithLineRequest.
:type: str
"""
self._type = type
@property
def name(self):
"""Gets the name of this ListRecordSetsWithLineRequest.
待查询的Record Set的域名中包含此name。 搜索模式默认为模糊搜索。 默认值为空。
:return: The name of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListRecordSetsWithLineRequest.
待查询的Record Set的域名中包含此name。 搜索模式默认为模糊搜索。 默认值为空。
:param name: The name of this ListRecordSetsWithLineRequest.
:type: str
"""
self._name = name
@property
def id(self):
"""Gets the id of this ListRecordSetsWithLineRequest.
待查询的Record Set的id包含此id。 搜索模式默认为模糊搜索。 默认值为空。
:return: The id of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ListRecordSetsWithLineRequest.
待查询的Record Set的id包含此id。 搜索模式默认为模糊搜索。 默认值为空。
:param id: The id of this ListRecordSetsWithLineRequest.
:type: str
"""
self._id = id
@property
def records(self):
"""Gets the records of this ListRecordSetsWithLineRequest.
待查询的Record Set的值中包含此records。 搜索模式默认为模糊搜索。 默认值为空。
:return: The records of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._records
@records.setter
def records(self, records):
"""Sets the records of this ListRecordSetsWithLineRequest.
待查询的Record Set的值中包含此records。 搜索模式默认为模糊搜索。 默认值为空。
:param records: The records of this ListRecordSetsWithLineRequest.
:type: str
"""
self._records = records
@property
def sort_key(self):
"""Gets the sort_key of this ListRecordSetsWithLineRequest.
查询结果中Record Set列表的排序字段。 取值范围: name:域名 type:记录集类型 默认值为空,表示不排序。
:return: The sort_key of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._sort_key
@sort_key.setter
def sort_key(self, sort_key):
"""Sets the sort_key of this ListRecordSetsWithLineRequest.
查询结果中Record Set列表的排序字段。 取值范围: name:域名 type:记录集类型 默认值为空,表示不排序。
:param sort_key: The sort_key of this ListRecordSetsWithLineRequest.
:type: str
"""
self._sort_key = sort_key
@property
def sort_dir(self):
"""Gets the sort_dir of this ListRecordSetsWithLineRequest.
查询结果中Record Set列表的排序方式。 取值范围: desc:降序排序 asc:升序排序 默认值为空,表示不排序。
:return: The sort_dir of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._sort_dir
@sort_dir.setter
def sort_dir(self, sort_dir):
"""Sets the sort_dir of this ListRecordSetsWithLineRequest.
查询结果中Record Set列表的排序方式。 取值范围: desc:降序排序 asc:升序排序 默认值为空,表示不排序。
:param sort_dir: The sort_dir of this ListRecordSetsWithLineRequest.
:type: str
"""
self._sort_dir = sort_dir
@property
def health_check_id(self):
"""Gets the health_check_id of this ListRecordSetsWithLineRequest.
健康检查ID。 搜索模式默认为模糊搜索。 默认值为空。
:return: The health_check_id of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._health_check_id
@health_check_id.setter
def health_check_id(self, health_check_id):
"""Sets the health_check_id of this ListRecordSetsWithLineRequest.
健康检查ID。 搜索模式默认为模糊搜索。 默认值为空。
:param health_check_id: The health_check_id of this ListRecordSetsWithLineRequest.
:type: str
"""
self._health_check_id = health_check_id
@property
def search_mode(self):
"""Gets the search_mode of this ListRecordSetsWithLineRequest.
查询条件搜索模式。 取值范围: like:模糊搜索 equal:精确搜索 默认值为like。
:return: The search_mode of this ListRecordSetsWithLineRequest.
:rtype: str
"""
return self._search_mode
@search_mode.setter
def search_mode(self, search_mode):
"""Sets the search_mode of this ListRecordSetsWithLineRequest.
查询条件搜索模式。 取值范围: like:模糊搜索 equal:精确搜索 默认值为like。
:param search_mode: The search_mode of this ListRecordSetsWithLineRequest.
:type: str
"""
self._search_mode = search_mode
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListRecordSetsWithLineRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
279,
4798,
198,
11748,
302,
198,
198,
11748,
2237,
628,
628,
198,
198,
4871,
7343,
23739,
50,
1039,
3152,
13949,
18453,
25,
628,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
49213,
... | 1.732683 | 7,998 |
"""
This script is responsible for disabling guest connection to shared folders
Commands used:
- defaults write /Library/Preferences/SystemConfiguration/com.apple.smb.server AllowGuestAccess false
"""
from common import CLITieIn
if __name__ == '__main__':
from utils import get_argparser, get_args
parser = get_argparser()
args = get_args(parser)
actor = GuestConnectionToSharedFoldersDisabler(
sudo_password=args.sudo_password,
)
result = actor.run()
if not result:
exit(1)
exit(0)
| [
37811,
198,
198,
1212,
4226,
318,
4497,
329,
34909,
8319,
4637,
284,
4888,
24512,
198,
198,
6935,
1746,
973,
25,
198,
198,
12,
26235,
3551,
1220,
23377,
14,
36698,
4972,
14,
11964,
38149,
14,
785,
13,
18040,
13,
82,
2022,
13,
15388,
... | 2.868421 | 190 |
import numpy as np
a = np.array(([109,15]), dtype=np.uint8)
b = np.ndarray((1,), dtype=np.uint16, buffer=a)
print(b*0.0062)
c = b.byteswap()
print(c)
print(c*0.0062) | [
11748,
299,
32152,
355,
45941,
198,
198,
64,
796,
45941,
13,
18747,
19510,
58,
14454,
11,
1314,
46570,
288,
4906,
28,
37659,
13,
28611,
23,
8,
198,
65,
796,
45941,
13,
358,
18747,
19510,
16,
11,
828,
288,
4906,
28,
37659,
13,
28611,... | 2 | 83 |
# Copyright 2017 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Installs BeakerX into a Jupyter and Python environment.'''
import argparse
import json
import os
import pkg_resources
import shutil
import subprocess
import sys
import pathlib
import tempfile
from string import Template
from jupyter_client.kernelspecapp import KernelSpecManager
from jupyter_core import paths
from traitlets.config.manager import BaseJSONConfigManager
from distutils import log
if __name__ == "__main__":
install()
| [
2,
15069,
2177,
35288,
33993,
5673,
38303,
311,
31033,
11,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262... | 3.591837 | 294 |
import struct
| [
11748,
2878,
198
] | 4.666667 | 3 |
import torch
import itertools
from typing import List, Tuple, Dict, Generator, Iterable
from tqdm import tqdm
from G2G.model.graph_wrapper import GraphWrapper
from G2G.utils import shortest_as_adj_from_graph_wrapper
| [
11748,
28034,
198,
11748,
340,
861,
10141,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
11,
360,
713,
11,
35986,
11,
40806,
540,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
402,
17,
38,
13,
19849,
13,
34960,
62,
485... | 3.205882 | 68 |
"""Smoother utility.
This file contains the smoothing utility functions. We have a number of
possible smoothers to choose from: windowed average, local weighted regression,
and a causal Savitzky-Golay filter.
Code is courtesy of Dmitry Shemetov, Maria Jahja, and Addison Hu.
These smoothers are all functions that take a 1D numpy array and return a smoothed
1D numpy array of the same length (with a few np.nans in the beginning). See the
docstrings for details.
"""
from typing import Union
import warnings
import numpy as np
import pandas as pd
class Smoother: # pylint: disable=too-many-instance-attributes
"""Smoother class.
This is the smoothing utility class. This class holds the parameter settings for its smoother
methods and provides reasonable defaults. Basic usage can be found in the examples below.
The smoother function takes numpy arrays or pandas Series as input, expecting the values to be
on a regularly-spaced time grid. NANs are ok, as long as the array does not begin with a NAN.
The rest of the NANs will be handled via imputation by default, though this can be turned off.
Parameters
----------
smoother_name: {'savgol', 'moving_average', 'identity', 'left_gauss_linear'}
This variable specifies the smoother. We have four smoothers, currently:
* 'savgol' or a Savtizky-Golay smoother (default)
* 'moving_average' or a moving window average smoother
* 'identity' or the trivial smoother (no smoothing)
* 'left_gauss_linear' or a Gaussian-weight linear regression smoother
Descriptions of the smoothers are available in the doc strings. Full mathematical
details are in: https://github.com/cmu-delphi/covidcast-modeling/ in the folder
'indicator_smoother'.
poly_fit_degree: int
A parameter for the 'savgol' smoother which sets the degree of the polynomial fit.
window_length: int
The length of the fitting window for 'savgol' and the averaging window 'moving_average'.
This value is in the units provided by the data, which are likely to be days for Delphi.
Note that if window_length is smaller than the length of the signal, then only the
imputation method is run on the signal.
gaussian_bandwidth: float or None
If float, all regression is done with Gaussian weights whose variance is
half the gaussian_bandwidth. If None, performs unweighted regression. (Applies
to 'left_gauss_linear' and 'savgol'.)
Here are some reference values (the given bandwidth produces a 95% weighting on
the data of length time window into the past):
time window | bandwidth
7 36
14 144
21 325
28 579
35 905
42 1303
impute: {'savgol', 'zeros', None}
If 'savgol' (default), will fill nan values with a savgol fit on the largest available time
window prior (up to window_length). If 'zeros', will fill nan values with zeros.
If None, leaves the nans in place.
minval: float or None
The smallest value to allow in a signal. If None, there is no smallest value.
Currently only implemented for 'left_gauss_linear'. This should probably not be in the scope
of the smoothing utility.
boundary_method: {'shortened_window', 'identity', 'nan'}
Determines how the 'savgol' method handles smoothing at the (left) boundary, where the past
data length is shorter than the window_length parameter. If 'shortened_window', it uses the
maximum window available; at the very edge (generally up to poly_fit_degree) it keeps the
same value as the raw signal. If 'identity', it just keeps the raw signal. If 'nan', it
writes nans. For the other smoothing methods, 'moving_average' writes nans and
'left_gauss_linear' uses a shortened window.
Methods
----------
smooth: np.ndarray or pd.Series
Takes a 1D signal and returns a smoothed version.
The input and the output have the same length and type.
Example Usage
-------------
Example 1. Apply a rolling average smoother with a window of length 10.
>>> smoother = Smoother(smoother_name='moving_average', window_length=10)
>>> smoothed_signal = smoother.smooth(signal)
Example 2. Smooth a dataframe column.
>>> smoother = Smoother(smoother_name='savgol')
>>> df[col] = df[col].transform(smoother.smooth)
Example 3. Apply a rolling weighted average smoother, with 95% weight on the recent 2 weeks and
a sharp cutoff after 4 weeks.
>>> smoother = Smoother(smoother_name='savgol', poly_fit_degree=0, window_length=28,
gaussian_bandwidth=144)
>>> smoothed_signal = smoother.smooth(signal)
Example 4. Apply a local linear regression smoother (essentially equivalent to
`left_gauss_linear`), with 95% weight on the recent week and a sharp
cutoff after 3 weeks.
>>> smoother = Smoother(smoother_name='savgol', poly_fit_degree=1, window_length=21,
gaussian_bandwidth=36)
>>> smoothed_signal = smoother.smooth(signal)
Example 5. Apply the identity function (simplifies code that iterates through smoothers _and_
expects a copy of the raw data).
>>> smoother = Smoother(smoother_name='identity')
>>> smoothed_signal = smoother.smooth(signal)
"""
def __init__(
self,
smoother_name="savgol",
poly_fit_degree=2,
window_length=28,
gaussian_bandwidth=144, # a ~2 week window
impute_method=None,
minval=None,
boundary_method="shortened_window",
):
"""See class docstring."""
self.smoother_name = smoother_name
self.poly_fit_degree = poly_fit_degree
self.window_length = window_length
self.gaussian_bandwidth = gaussian_bandwidth
self.impute_method = self._select_imputer(impute_method, self.smoother_name)
self.minval = minval
self.boundary_method = boundary_method
valid_smoothers = {"savgol", "left_gauss_linear", "moving_average", "identity"}
valid_impute_methods = {"savgol", "zeros", "identity"}
valid_boundary_methods = {"shortened_window", "identity", "nan"}
if self.smoother_name not in valid_smoothers:
raise ValueError("Invalid smoother_name given.")
if self.impute_method not in valid_impute_methods:
raise ValueError("Invalid impute_method given.")
if self.boundary_method not in valid_boundary_methods:
raise ValueError("Invalid boundary_method given.")
if self.window_length <= 1:
raise ValueError("Window length is too short.")
if smoother_name == "savgol":
# The polynomial fitting is done on a past window of size window_length
# including the current day value.
self.coeffs = self.savgol_coeffs(
-self.window_length + 1, 0, self.poly_fit_degree
)
else:
self.coeffs = None
def smooth(
self, signal: Union[np.ndarray, pd.Series], impute_order=2
) -> Union[np.ndarray, pd.Series]:
"""Apply a smoother to a signal.
The major workhorse smoothing function. Imputes the nans and then applies
a smoother to the signal.
Parameters
----------
signal: np.ndarray or pd.Series
A 1D signal to be smoothed.
impute_order: int
The polynomial order of the fit used for imputation. By default, this is set to
2.
Returns
----------
signal_smoothed: np.ndarray or pd.Series
A smoothed 1D signal. Returns an array of the same type and length as
the input.
"""
# If all nans, pass through
if np.all(np.isnan(signal)):
return signal
is_pandas_series = isinstance(signal, pd.Series)
pandas_index = signal.index if is_pandas_series else None
signal = signal.to_numpy() if is_pandas_series else signal
# Find where the first non-nan value is located and truncate the initial nans
ix = np.where(~np.isnan(signal))[0][0]
signal = signal[ix:]
# Don't smooth in certain edge cases
if len(signal) < self.poly_fit_degree or len(signal) == 1:
signal_smoothed = signal.copy()
else:
# Impute
signal = self.impute(signal, impute_order=impute_order)
# Smooth
signal_smoothed = self._select_smoother()(signal)
# Append the nans back, since we want to preserve length
signal_smoothed = np.hstack([np.nan * np.ones(ix), signal_smoothed])
# Convert back to pandas if necessary
if is_pandas_series:
signal_smoothed = pd.Series(signal_smoothed)
signal_smoothed.index = pandas_index
return signal_smoothed
def _select_smoother(self):
"""Select a smoothing method based on the smoother type."""
if self.smoother_name == "savgol":
return self.savgol_smoother
if self.smoother_name == "left_gauss_linear":
return self.left_gauss_linear_smoother
if self.smoother_name == "moving_average":
return self.moving_average_smoother
if self.smoother_name == "identity":
return lambda x: x
raise ValueError(f"invalid smoother {self.smoother_name}")
def impute(self, signal, impute_order=2):
"""Impute the nan values in the signal.
See the class docstring for an explanation of the impute methods.
Parameters
----------
signal: np.ndarray
1D signal to be imputed.
impute_order: int
The polynomial order of the fit used for imputation.
Returns
-------
imputed_signal: np.ndarray
Imputed signal.
"""
if self.impute_method == "savgol":
# We cannot impute if the signal begins with a NaN (there is no information to go by).
# To preserve input-output array lengths, this util will not drop NaNs for you.
if np.isnan(signal[0]):
raise ValueError("The signal should not begin with a nan value.")
imputed_signal = self.savgol_impute(signal, impute_order)
elif self.impute_method == "zeros":
imputed_signal = np.nan_to_num(signal)
elif self.impute_method == "identity":
imputed_signal = np.copy(signal)
return imputed_signal
def moving_average_smoother(self, signal):
"""Compute a moving average on the signal.
Parameters
----------
signal: np.ndarray
Input array.
Returns
-------
signal_smoothed: np.ndarray
An array with the same length as arr, but the first window_length-1
entries are np.nan.
"""
if not isinstance(self.window_length, int):
raise ValueError("k must be int.")
signal_padded = np.append(np.nan * np.ones(self.window_length - 1), signal)
signal_smoothed = (
np.convolve(
signal_padded, np.ones(self.window_length, dtype=int), mode="valid"
)
/ self.window_length
)
return signal_smoothed
def left_gauss_linear_smoother(self, signal):
"""Smooth the y-values using a local linear regression with Gaussian weights.
DEPRECATED: This method is available to help sanity check the 'savgol' method.
Use 'savgol' with poly_fit_degree=1 and the appropriate gaussian_bandwidth instead.
At each time t, we use the data from times 1, ..., t-dt, weighted
using the Gaussian kernel, to produce the estimate at time t.
Parameters
----------
signal: np.ndarray
A 1D signal.
Returns
----------
signal_smoothed: np.ndarray
A smoothed 1D signal.
"""
warnings.warn(
"Use the savgol smoother with poly_fit_degree=1 instead.",
DeprecationWarning,
)
n = len(signal)
signal_smoothed = np.zeros_like(signal)
# A is the regression design matrix
A = np.vstack([np.ones(n), np.arange(n)]).T # pylint: disable=invalid-name
for idx in range(n):
weights = np.exp(
-((np.arange(idx + 1) - idx) ** 2) / self.gaussian_bandwidth
)
AwA = np.dot( # pylint: disable=invalid-name
A[: (idx + 1), :].T * weights, A[: (idx + 1), :]
)
Awy = np.dot( # pylint: disable=invalid-name
A[: (idx + 1), :].T * weights, signal[: (idx + 1)].reshape(-1, 1)
)
try:
beta = np.linalg.solve(AwA, Awy)
signal_smoothed[idx] = np.dot(A[: (idx + 1), :], beta)[-1]
except np.linalg.LinAlgError:
signal_smoothed[idx] = (
signal[idx] # pylint: disable=using-constant-test
if self.impute
else np.nan
)
if self.minval is not None:
signal_smoothed[signal_smoothed <= self.minval] = self.minval
return signal_smoothed
def savgol_predict(self, signal, poly_fit_degree, nr):
"""Predict a single value using the savgol method.
Fits a polynomial through the values given by the signal and returns the value
of the polynomial at the right-most signal-value. More precisely, for a signal of length
n, fits a poly_fit_degree polynomial through the points signal[-n+1+nr], signal[-n+2+nr],
..., signal[nr], and returns the evaluation of the polynomial at signal[0]. Hence, if
nr=0, then the last value of the signal is smoothed, and if nr=-1, then the value after
the last signal value is anticipated.
Parameters
----------
signal: np.ndarray
A 1D signal to smooth.
poly_fit_degree: int
The degree of the polynomial fit.
nr: int
An integer that determines the position of the predicted value relative to the signal.
Returns
----------
predicted_value: float
The anticipated value that comes after the end of the signal based on a polynomial fit.
"""
coeffs = self.savgol_coeffs(-len(signal) + 1 + nr, nr, poly_fit_degree)
predicted_value = signal @ coeffs
return predicted_value
def savgol_coeffs(self, nl, nr, poly_fit_degree):
"""Solve for the Savitzky-Golay coefficients.
Solves for the Savitzky-Golay coefficients. The coefficients c_i
give a filter so that
y = sum_{i=-{n_l}}^{n_r} c_i x_i
is the value at 0 (thus the constant term) of the polynomial fit
through the points {x_i}. The coefficients are c_i are calculated as
c_i = ((A.T @ A)^(-1) @ (A.T @ e_i))_0
where A is the design matrix of the polynomial fit and e_i is the standard
basis vector i. This is currently done via a full inversion, which can be
optimized.
Parameters
----------
nl: int
The left window bound for the polynomial fit, inclusive.
nr: int
The right window bound for the polynomial fit, inclusive.
poly_fit_degree: int
The degree of the polynomial to be fit.
Returns
----------
coeffs: np.ndarray
A vector of coefficients of length nr - nl + 1 that determines the savgol
convolution filter.
"""
if nl >= nr:
raise ValueError("The left window bound should be less than the right.")
if nr > 0:
warnings.warn("The filter is no longer causal.")
A = np.vstack( # pylint: disable=invalid-name
[np.arange(nl, nr + 1) ** j for j in range(poly_fit_degree + 1)]
).T
if self.gaussian_bandwidth is None:
mat_inverse = np.linalg.inv(A.T @ A) @ A.T
else:
weights = np.exp(-((np.arange(nl, nr + 1)) ** 2) / self.gaussian_bandwidth)
mat_inverse = np.linalg.inv((A.T * weights) @ A) @ (A.T * weights)
window_length = nr - nl + 1
coeffs = np.zeros(window_length)
for i in range(window_length):
basis_vector = np.zeros(window_length)
basis_vector[i] = 1.0
coeffs[i] = (mat_inverse @ basis_vector)[0]
return coeffs
def savgol_smoother(self, signal): # pylint: disable=inconsistent-return-statements
"""Smooth signal with the savgol smoother.
Returns a convolution of the 1D signal with the Savitzky-Golay coefficients, respecting
boundary effects. For an explanation of boundary effects methods, see the class docstring.
Parameters
----------
signal: np.ndarray
A 1D signal.
Returns
----------
signal_smoothed: np.ndarray
A smoothed 1D signal of same length as signal.
"""
# Reverse because np.convolve reverses the second argument
temp_reversed_coeffs = np.array(list(reversed(self.coeffs)))
# Smooth the part of the signal away from the boundary first
signal_padded = np.append(np.nan * np.ones(len(self.coeffs) - 1), signal)
signal_smoothed = np.convolve(signal_padded, temp_reversed_coeffs, mode="valid")
# This section handles the smoothing behavior at the (left) boundary:
# - shortened_window (default) applies savgol with a smaller window to do the fit
# - identity keeps the original signal (doesn't smooth)
# - nan writes nans
if self.boundary_method == "nan":
return signal_smoothed
# boundary methods "identity" and "shortened window"
for ix in range(min(len(self.coeffs), len(signal))):
if ix == 0 or self.boundary_method == "identity":
signal_smoothed[ix] = signal[ix]
else:
# At the very edge, the design matrix is often singular, in which case
# we just fall back to the raw signal
try:
signal_smoothed[ix] = self.savgol_predict(
signal[: ix + 1], self.poly_fit_degree, 0
)
except np.linalg.LinAlgError: # for small ix, the design matrix is singular
signal_smoothed[ix] = signal[ix]
return signal_smoothed
def savgol_impute(self, signal, impute_order):
"""Impute the nan values in signal using savgol.
This method fills the nan values in the signal with polynomial interpolation
on a rolling window of the immediate past up to window_length data points.
A number of boundary cases are handled involving nan filling close to the boundary.
Note that in the case of many adjacent nans, the method will use previously
imputed values to do the fitting for later values.
Parameters
----------
signal: np.ndarray
A 1D signal to be imputed.
impute_order: int
The polynomial order of the fit used for imputation.
Returns
----------
signal_imputed: np.ndarray
An imputed 1D signal.
"""
if impute_order > self.window_length:
raise ValueError("Impute order must be smaller than window length.")
signal_imputed = np.copy(signal)
for ix in np.where(np.isnan(signal_imputed))[0]:
# Boundary cases
if ix < self.window_length:
# At the boundary, a single value should just be extended
if ix == 1:
signal_imputed[ix] = signal_imputed[ix - 1]
# Otherwise, use savgol fitting on the largest window prior,
# reduce the polynomial degree if needed (can't fit if the
# imputation order is larger than the available data)
else:
signal_imputed[ix] = self.savgol_predict(
signal_imputed[:ix], min(ix - 1, impute_order), -1
)
# Away from the boundary, use savgol fitting on a fixed window
else:
signal_imputed[ix] = self.savgol_predict(
signal_imputed[ix - self.window_length : ix],
impute_order,
-1,
)
return signal_imputed
| [
37811,
50,
5908,
847,
10361,
13,
198,
198,
1212,
2393,
4909,
262,
32746,
722,
10361,
5499,
13,
775,
423,
257,
1271,
286,
198,
79,
4733,
32686,
82,
284,
3853,
422,
25,
4324,
276,
2811,
11,
1957,
26356,
20683,
11,
198,
392,
257,
26558... | 2.335011 | 8,943 |
import string
import random
import argparse
parser = argparse.ArgumentParser(description='generate a password with x amount of randomised ascii characters')
parser.add_argument('x', metavar='N', type=int, help='length of desired password string')
args = parser.parse_args()
x = args.x
password_characters = string.ascii_letters + string.digits + string.punctuation
secret_key = ''.join(random.choice(password_characters) for i in range(x))
print secret_key
| [
11748,
4731,
198,
11748,
4738,
198,
11748,
1822,
29572,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
7,
11213,
11639,
8612,
378,
257,
9206,
351,
2124,
2033,
286,
4738,
1417,
355,
979,
72,
3435,
11537,
198,
48610,
13,
2860,... | 3.382353 | 136 |
# Copyright 2020 The Kale Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from kale import Pipeline, Step
from kale.static_analysis import dependencies
@pytest.mark.parametrize("code,target", [
('', []),
('a = b', ['b']),
('a = foo(b)', ['foo', 'b']),
('a = b\nfoo(b)', ['b', 'foo']),
('foo(b)', ['foo', 'b'])
])
def test_pyflakes_report(code, target):
"""Tests pyflakes_report function."""
res = dependencies.pyflakes_report(code)
assert sorted(res) == sorted(target)
def test_detect_fns_free_variables():
"""Test the function returns the correct free variables."""
source_code = '''
x = 5
def foo():
print(math.sqrt(x))
'''
target = {"foo": ({"x", "math"}, {})}
assert target == dependencies.detect_fns_free_variables(source_code)
def test_detect_fns_free_variables_with_imports():
"""Test the function returns the correct free variables."""
imports_and_functions = """
import math
"""
source_code = '''
x = 5
def foo():
print(math.sqrt(x))
'''
target = {"foo": ({"x"}, {})}
assert target == dependencies.detect_fns_free_variables(
source_code,
imports_and_functions
)
def test_dependencies_detection_free_variable(dummy_nb_config):
"""Test dependencies detection with free variables."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
''']
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo():
print(x)
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ['''
foo()
''']
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ["x"]
assert sorted(pipeline.get_step("step2").ins) == ["x"]
assert sorted(pipeline.get_step("step2").outs) == ["foo", "x"]
assert sorted(pipeline.get_step("step3").ins) == ["foo", "x"]
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_inner_function(dummy_nb_config):
"""Test dependencies detection with inner functions."""
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo():
def bar(x):
print(x)
bar(5)
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ['''
foo()
print(x)
''']
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['x']
assert sorted(pipeline.get_step("step2").ins) == []
assert sorted(pipeline.get_step("step2").outs) == ['foo']
assert sorted(pipeline.get_step("step3").ins) == ['foo', 'x']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_inner_function_free_variable(dummy_nb_config):
"""Test dependencies detection with free variables and inner function."""
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo():
def bar():
print(x)
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["foo()"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['x']
assert sorted(pipeline.get_step("step2").ins) == ['x']
assert sorted(pipeline.get_step("step2").outs) == ['foo', 'x']
assert sorted(pipeline.get_step("step3").ins) == ['foo', 'x']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_with_parameter(dummy_nb_config):
"""Test dependencies detection with function with parameter."""
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo(x):
def bar():
print(x)
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["foo(5)"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == []
assert sorted(pipeline.get_step("step2").ins) == []
assert sorted(pipeline.get_step("step2").outs) == ['foo']
assert sorted(pipeline.get_step("step3").ins) == ['foo']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_with_globals(dummy_nb_config):
"""Test dependencies detection with inner function and globals."""
imports_and_functions = "import math"
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1",
source=_prepend_to_source(_source,
imports_and_functions)))
_source = ['''
def foo(x):
def bar():
math.sqrt(x)
bar()
''']
pipeline.add_step(Step(name="step2",
source=_prepend_to_source(_source,
imports_and_functions)))
_source = ["foo(5)"]
pipeline.add_step(Step(name="step3",
source=_prepend_to_source(_source,
imports_and_functions)))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline, imports_and_functions)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == []
assert sorted(pipeline.get_step("step2").ins) == []
assert sorted(pipeline.get_step("step2").outs) == ['foo']
assert sorted(pipeline.get_step("step3").ins) == ['foo']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_with_pipeline_parameters(dummy_nb_config):
"""Test dependencies are detected with pipeline parameters and globals."""
imports_and_functions = "import math"
pipeline = Pipeline(dummy_nb_config)
pipeline.pipeline_parameters = {"y": (5, 'int')}
_source = ["x = 5"]
pipeline.add_step(Step(name="step1",
source=_prepend_to_source(_source,
imports_and_functions)))
_source = ['''
def foo(x):
def bar():
math.sqrt(x + y)
bar()
''']
pipeline.add_step(Step(name="step2",
source=_prepend_to_source(_source,
imports_and_functions)))
_source = ["foo(5)"]
pipeline.add_step(Step(name="step3",
source=_prepend_to_source(_source,
imports_and_functions)))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline, imports_and_functions)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == []
assert sorted(pipeline.get_step("step2").ins) == []
assert sorted(pipeline.get_step("step2").outs) == ['foo']
assert pipeline.get_step("step2").parameters == {"y": (5, 'int')}
assert sorted(pipeline.get_step("step3").ins) == ['foo']
assert sorted(pipeline.get_step("step3").outs) == []
assert pipeline.get_step("step3").parameters == {"y": (5, 'int')}
def test_dependencies_detection_with_try_except(dummy_nb_config):
"""Test dependencies are detected with functions inside try."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
y = 6
''']
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
try:
def foo():
print(x)
def bar():
print(y)
except:
pass
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ['''
foo()
bar()
''']
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['x', 'y']
assert sorted(pipeline.get_step("step2").ins) == ['x', 'y']
assert sorted(pipeline.get_step("step2").outs) == ['bar', 'foo', 'x', 'y']
assert sorted(pipeline.get_step("step3").ins) == ['bar', 'foo', 'x', 'y']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_recursive(dummy_nb_config):
"""Test dependencies are detected even with a chain of functions calls."""
pipeline = Pipeline(dummy_nb_config)
_source = ["x = 5"]
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def foo():
print(x)
def bar():
foo()
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["bar()"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['x']
assert sorted(pipeline.get_step("step2").ins) == ['x']
assert sorted(pipeline.get_step("step2").outs) == ['bar', 'foo', 'x']
assert sorted(pipeline.get_step("step3").ins) == ['bar', 'foo', 'x']
assert sorted(pipeline.get_step("step3").outs) == []
def test_dependencies_detection_recursive_different_steps(dummy_nb_config):
"""Test dependencies are detected even with a chain of functions calls."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
def foo():
print(x)
''']
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def bar():
foo()
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["bar()"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step1").ins) == []
assert sorted(pipeline.get_step("step1").outs) == ['foo', 'x']
assert sorted(pipeline.get_step("step2").ins) == ['foo', 'x']
assert sorted(pipeline.get_step("step2").outs) == ['bar', 'foo', 'x']
assert sorted(pipeline.get_step("step3").ins) == ['bar', 'foo', 'x']
assert sorted(pipeline.get_step("step3").outs) == []
def test_deps_detection_recursive_different_steps_long(dummy_nb_config):
"""Test dependencies are detected even with a long chain of fns calls."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
def init():
print(x)
''']
pipeline.add_step(Step(name="step0", source=_source))
_source = ['''
def foo():
init()
''']
pipeline.add_step(Step(name="step1", source=_source))
_source = ['''
def bar():
foo()
''']
pipeline.add_step(Step(name="step2", source=_source))
_source = ["bar()"]
pipeline.add_step(Step(name="step3", source=_source))
pipeline.add_edge("step0", "step1")
pipeline.add_edge("step1", "step2")
pipeline.add_edge("step2", "step3")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step0").ins) == []
assert sorted(pipeline.get_step("step0").outs) == ['init', 'x']
assert sorted(pipeline.get_step("step1").ins) == ['init', 'x']
assert sorted(pipeline.get_step("step1").outs) == ['foo', 'init', 'x']
assert sorted(pipeline.get_step("step2").ins) == ['foo', 'init', 'x']
assert (sorted(pipeline.get_step("step2").outs)
== ['bar', 'foo', 'init', 'x'])
assert (sorted(pipeline.get_step("step3").ins)
== ['bar', 'foo', 'init', 'x'])
assert sorted(pipeline.get_step("step3").outs) == []
def test_deps_detection_recursive_different_steps_branch(dummy_nb_config):
"""Test dependencies when fns are passed from multiple branches."""
pipeline = Pipeline(dummy_nb_config)
_source = ['''
x = 5
y = 6
''']
pipeline.add_step(Step(name="step0", source=_source))
_source = ['''
def foo():
print(x)
''']
pipeline.add_step(Step(name="step_l", source=_source))
_source = ['''
def bar():
print(y)
''']
pipeline.add_step(Step(name="step_r", source=_source))
_source = ['''
def result():
foo()
bar()
''']
pipeline.add_step(Step(name="step_m", source=_source))
_source = ["result()"]
pipeline.add_step(Step(name="step_f", source=_source))
pipeline.add_edge("step0", "step_l")
pipeline.add_edge("step0", "step_r")
pipeline.add_edge("step_l", "step_m")
pipeline.add_edge("step_r", "step_m")
pipeline.add_edge("step_m", "step_f")
dependencies.dependencies_detection(pipeline)
assert sorted(pipeline.get_step("step0").ins) == []
assert sorted(pipeline.get_step("step0").outs) == ['x', 'y']
assert sorted(pipeline.get_step("step_l").ins) == ['x']
assert sorted(pipeline.get_step("step_l").outs) == ['foo', 'x']
assert sorted(pipeline.get_step("step_r").ins) == ['y']
assert sorted(pipeline.get_step("step_r").outs) == ['bar', 'y']
assert sorted(pipeline.get_step("step_m").ins) == ['bar', 'foo', 'x', 'y']
assert (sorted(pipeline.get_step("step_m").outs)
== ['bar', 'foo', 'result', 'x', 'y'])
assert (sorted(pipeline.get_step("step_f").ins)
== ['bar', 'foo', 'result', 'x', 'y'])
assert sorted(pipeline.get_step("step_f").outs) == []
| [
2,
220,
15069,
12131,
383,
509,
1000,
46665,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 2.435409 | 6,038 |
"""
spawn FTP get and put GUIs no matter what directory I'm run from; os.getcwd is not
necessarily the place this script lives; could also hardcode path from $PP4EHOME,
or guessLocation; could also do: [from PP4E.launchmodes import PortableLauncher,
PortableLauncher('getfilegui', '%s/getfilegui.py' % mydir)()], but need the DOS
console pop up on Windows to view status messages which describe transfers made;
"""
import os, sys
print('Running in: ', os.getcwd())
# PP3E
# from PP4E.Launcher import findFirst
# mydir = os.path.split(findFirst(os.curdir, 'PyFtpGui.pyw'))[0]
# PP4E
from PP4E.Tools.find import findlist
mydir = os.path.dirname(findlist('PyFtpGui.pyw', startdir=os.curdir)[0])
if sys.platform[:3] == 'win':
os.system('start %s\getfilegui.py' % mydir)
os.system('start %s\putfilegui.py' % mydir)
else:
os.system('python %s/getfilegui.py &' % mydir)
os.system('python %s/putfilegui.py &' % mydir)
| [
37811,
201,
198,
48183,
45854,
651,
290,
1234,
19348,
3792,
645,
2300,
644,
8619,
314,
1101,
1057,
422,
26,
28686,
13,
1136,
66,
16993,
318,
407,
220,
201,
198,
10789,
3093,
262,
1295,
428,
4226,
3160,
26,
220,
714,
635,
1327,
8189,
... | 2.546419 | 377 |
from phiorm.models.query import query
from phiorm.util import tree
| [
6738,
872,
72,
579,
13,
27530,
13,
22766,
1330,
12405,
198,
6738,
872,
72,
579,
13,
22602,
1330,
5509,
628,
198
] | 3.285714 | 21 |
"""Unit tests for pattern.py."""
import unittest
from iast.python.default import parse, make_pattern, Num, BinOp, Add, Mult
from iast.pattern import *
from iast.pattern import match_step
if __name__ == '__main__':
unittest.main()
| [
37811,
26453,
5254,
329,
3912,
13,
9078,
526,
15931,
628,
198,
11748,
555,
715,
395,
198,
198,
6738,
1312,
459,
13,
29412,
13,
12286,
1330,
21136,
11,
787,
62,
33279,
11,
31835,
11,
20828,
18257,
11,
3060,
11,
7854,
198,
6738,
1312,
... | 2.962963 | 81 |
from __future__ import division
from __future__ import print_function
import os
from skimage import io
from torch.utils.data import Dataset
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
28686,
198,
198,
6738,
1341,
9060,
1330,
33245,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
16092,
292,
316,
628
] | 3.763158 | 38 |
#!/usr/bin/env python3
###########################################
# #
# "Pype" #
# Simple file sharing server, #
# to upload and download file #
# from CLI #
# #
# Etienne SELLAN #
# 17/10/2018 #
# #
###########################################
import sys
import time
import signal
import threading
from threading import Thread
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import os
import binascii
import shutil
import base64
import math
import hashlib
# SETTINGS BEGIN
settings = {}
settings["url"] = "https://pype.sellan.fr"
settings["listen_address"] = "0.0.0.0"
settings["port"] = 80
settings["directory"] = "/tmp"
settings["delete_limit"] = 24 # hours
settings["cleaning_interval"] = 1 # hours
settings["id_length"] = 2 # bytes
settings["max_name_length"] = 64 # chars
settings["max_file_size"] = (10*1000*1000*1000) # bytes
# SETTINGS END
if __name__ == "__main__":
server = Thread(target=run_on, args=[int(settings["port"])])
server.daemon = True
server.start()
initialisation()
# Launch auto cleaning interval
set_interval(clean_files, (int(settings["cleaning_interval"]) * 3600))
signal.pause()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
29113,
7804,
21017,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.241221 | 655 |
from .import hgrid, auth, config # noqa | [
6738,
764,
11748,
289,
25928,
11,
6284,
11,
4566,
220,
1303,
645,
20402
] | 3.076923 | 13 |
import tests.unit.cli.commands.command_testing as command_testing
import lmctl.drivers.lm.base as lm_drivers
import lmctl.cli.commands.deployment_location as deployment_cmds
import tempfile
import shutil
import os
import json
import yaml
from unittest.mock import patch
from tests.common.simulations.lm_simulator import LmSimulator
| [
11748,
5254,
13,
20850,
13,
44506,
13,
9503,
1746,
13,
21812,
62,
33407,
355,
3141,
62,
33407,
198,
11748,
300,
76,
34168,
13,
36702,
13,
75,
76,
13,
8692,
355,
300,
76,
62,
36702,
198,
11748,
300,
76,
34168,
13,
44506,
13,
9503,
... | 3.223301 | 103 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext_lazy as _
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
62,
75,
1258... | 2.723404 | 47 |
#!/usr/bin/env python
# __BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# __END_LICENSE__
"""
Utilities for validating timestamps in import files
"""
import sys
import yaml
import os
import re
import datetime
import pytz
from PNGinfo import PNGinfo
import PIL.Image
import PIL.ExifTags
from csv import DictReader
from dateutil.parser import parse as dateparser
def get_timestamp_from_filename(filename, time_format, regex=None):
"""
Returns a utz timezone aware time parsed from the filename given the time format & regex
:param filename: the actual filename to parse for time
:param time_format: seconds, microseconds or dateparser
:param regex: The last pattern matched in the regex should hold the time
:return: time
"""
# Some filenames contain float seconds, some int microseconds
result = None
if time_format == 'seconds':
timestamp_pattern = '(\d{10}\.\d{4,10})'
match = re.search(timestamp_pattern, filename)
if match:
timestamp_string = match.groups()[-1]
result = datetime.datetime.utcfromtimestamp(float(timestamp_string)).replace(tzinfo=pytz.UTC)
else:
raise ValueError('Could not find expected time string in %s' % filename)
elif time_format == 'microseconds':
timestamp_pattern = '(\d{16})'
match = re.search(timestamp_pattern, filename)
if match:
timestamp_string = match.groups()[-1]
result = datetime.datetime.utcfromtimestamp(1e-6 * int(timestamp_string)).replace(tzinfo=pytz.UTC)
else:
raise ValueError('Could not find expected time string in %s' % filename)
elif time_format == 'dateparser':
if regex:
timestamp_pattern = regex
match = re.search(timestamp_pattern, filename)
if match:
if regex.count('(') == 2:
timestamp_string = match.group(1) + match.group(2)
else:
timestamp_string = match.groups()[-1]
zoneless_timestamp = dateparser(timestamp_string)
result = pytz.utc.localize(zoneless_timestamp)
else:
raise ValueError('Could not find expected time string in %s' % filename)
else:
raise ValueError('dateparser configuration requires regex: %s' % filename)
else:
raise ValueError('invalid type for filename timestamp: %s' % time_format)
return result
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser('usage: %prog [options] <source_root_dir_for_flight>')
parser.add_option('-c', '--configfile',
help='yaml config file for getting timestamps from files')
parser.add_option('-t', '--test',
action='store_true', default=False,
help='Run in test mode')
parser.add_option('-f', '--force',
action='store_true', default=False,
help='Force creation of a flight even if invalid timestamps are found')
parser.add_option('-m', '--make_flight',
action='store_true', default=False,
help='Create a flight for the given directory')
parser.add_option('-p', '--plot',
action='store_true', default=False,
help='Plot results to pdf, filename uses the import directory name')
parser.add_option('-q', '--quiet',
action='store_true', default=False,
help='Silence most printouts, only include times')
parser.add_option('-d', '--dirname_pattern', default=None,
help='pattern regex for dirname matching')
opts, args = parser.parse_args()
if len(args)<1:
parser.print_help()
sys.exit(0)
global QUIET
QUIET = opts.quiet
# the top level directory should contain all the data for a flight
flight_dir = args[0]
print 'flight_dir: %s' % flight_dir
# just the final directory name, not the full path to it
# have to accommodate the path ending in '/' or not
basename = os.path.basename(os.path.normpath(flight_dir))
print 'basename: %s' % basename
# Get start time from root directory
start_time = None
if opts.dirname_pattern:
start_time = get_timestamp_from_dirname(flight_dir, opts.dirname_pattern)
if start_time is None:
print 'ERROR: Expected the source root to be in the form %s' % opts.dirname_pattern
raise ValueError('Cannot get a valid timestamp from source root %s' % flight_dir)
if not QUIET:
print 'Flight dir timestamp is %s' % start_time
# If we were given a timestamp validation config, go validate timestamps for all data
if opts.configfile is not None:
validator = TimestampValidator(opts.configfile)
validator.find_files(flight_dir)
if not opts.test:
validator.process_files()
if not QUIET:
validator.print_stats()
timestamps = [t[1] for t in validator.timestamps]
first_data_time = min(timestamps)
last_data_time = max(timestamps)
print 'Timestamps for', basename
if start_time:
print 'start time: ', start_time
print 'first data time:', first_data_time
print 'last data time: ', last_data_time
if start_time:
for name, timestamp in validator.timestamps:
if timestamp < start_time:
print 'Error: %s in %s is before start time %s' % (timestamp, name, start_time)
# If we were asked to create a flight, create it
# Note that we cannot make a flight with an end time if we didn't get a config
if opts.make_flight:
try:
# get or create a flight for that source root directory
import django
django.setup()
from django.conf import settings
from xgds_core.flightUtils import get_or_create_flight_with_source_root
dirname = os.path.basename(os.path.normpath(flight_dir))
suffix = dirname[dirname.find('_'):]
local_start_time = start_time.astimezone(pytz.timezone(settings.TIME_ZONE))
name = '%s%s' % (local_start_time.strftime('%Y%m%d'), suffix)
flight = get_or_create_flight_with_source_root(flight_dir, start_time, last_data_time, name)
print 'Created or got flight %s' % flight
except ImportError as e:
print 'Error: Cannot create a flight'
print e
# If we were asked to make a plot, make it
if opts.plot:
pdffile = 'timestamps_%s.pdf' % basename
print 'plotting to', pdffile
validator.plot_times(pdffile)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
220,
11593,
33,
43312,
62,
43,
2149,
24290,
834,
198,
2,
15069,
357,
66,
8,
1853,
11,
1578,
1829,
5070,
11,
355,
7997,
416,
262,
198,
2,
22998,
286,
262,
2351,
15781,
261,
2306,... | 2.41489 | 3,143 |
import bc_utils
from logistic_regression import logistic_regression_2D
from nn_classification import nn_binary_classification_2D
LEARNING_RATE = 0.1
NUM_EPOCHS = 20000
bc_utils.create_output_dir()
results_file = open("output/results.txt", "w")
# Linear data
X_train, Y_train = bc_utils.generate_linear_data(300)
bc_utils.plot_data(X_train, Y_train, "data_linear_train.png")
X_test, Y_test = bc_utils.generate_linear_data(300)
bc_utils.plot_data(X_test, Y_test, "data_linear_test.png")
J_train, J_test = logistic_regression_2D(X_train, Y_train, X_test, Y_test, LEARNING_RATE, NUM_EPOCHS, "lr_db_linear_train.png")
results_file.write("Logistic Regression - linear data> J_train: " + str(J_train) + ", J_test: " + str(J_test) + "\n")
J_train, J_test = nn_binary_classification_2D(X_train, Y_train, X_test, Y_test, LEARNING_RATE, NUM_EPOCHS, "nn_db_linear_train.png")
results_file.write("NN Classification - linear data> J_train: " + str(J_train) + ", J_test: " + str(J_test) + "\n")
# Non-linear data
X_train, Y_train = bc_utils.generate_non_linear_data(300)
bc_utils.plot_data(X_train, Y_train, "data_non_linear_train.png")
X_test, Y_test = bc_utils.generate_non_linear_data(300)
bc_utils.plot_data(X_test, Y_test, "data_non_linear_test.png")
J_train, J_test = logistic_regression_2D(X_train, Y_train, X_test, Y_test, LEARNING_RATE, NUM_EPOCHS, "lr_db_non_linear_train.png")
results_file.write("Logistic Regression - non-linear data> J_train: " + str(J_train) + ", J_test: " + str(J_test) + "\n")
J_train, J_test = nn_binary_classification_2D(X_train, Y_train, X_test, Y_test, LEARNING_RATE, NUM_EPOCHS, "nn_db_non_linear_train.png")
results_file.write("NN Classification - non-linear data> J_train: " + str(J_train) + ", J_test: " + str(J_test) + "\n")
| [
11748,
47125,
62,
26791,
198,
6738,
2604,
2569,
62,
2301,
2234,
1330,
2604,
2569,
62,
2301,
2234,
62,
17,
35,
198,
6738,
299,
77,
62,
4871,
2649,
1330,
299,
77,
62,
39491,
62,
4871,
2649,
62,
17,
35,
198,
198,
2538,
1503,
15871,
6... | 2.414266 | 729 |
from unittest import TestCase
from .problem_11_1_merge_sorted_files import *
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
764,
45573,
62,
1157,
62,
16,
62,
647,
469,
62,
82,
9741,
62,
16624,
1330,
1635,
628,
198
] | 2.925926 | 27 |
# (C) Copyright 2014-2016 Hewlett Packard Enterprise Development LP
# Copyright 2017 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from monascaclient.common import monasca_manager
| [
2,
357,
34,
8,
15069,
1946,
12,
5304,
30446,
15503,
6400,
446,
14973,
7712,
18470,
198,
2,
15069,
2177,
376,
52,
41,
2043,
12564,
40880,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
... | 3.819672 | 183 |
# Copyright (c) Zhixian MA <zxma_sjtu@qq.com>
# MIT license
"""
Some I/O and processing tools are provied in this utils module.
Methods
-------
reg2mat:
Read point sources list from the region file, and translate it into np.ndarray
mat2reg:
Print PS list matrix to ds9 region files
compare:
Compare detected PS with the references
img2mat:
Read image from the provided path
logManager:
Configure logging style <to be strengthed>
References
------------
[1] scipy.ndimage
http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.
imread.html#scipy.ndimage.imread
"""
import os
import sys
import logging
import numpy as np
import pyregion
from astropy.io import fits
from scipy.ndimage import imread
# Defination of functions
def reg2mat(filename):
"""
Read region files and transform to matrix,the pyregion module is used.
"""
# Init
if os.path.exists(filename):
pslist = pyregion.open(filename)
else:
return 0
# Split and get the numbers
num_ps = len(pslist)
ps = []
for i in range(num_ps):
ps.append(pslist[i].coord_list)
ps = np.array(ps)
return ps
def mat2reg(ps,outfile,pstype = 'elp'):
"""
Transform ps mat to region file
Parameters
----------
ps: np.ndarray
A two dimensional matrix holds the information of point sources
outfile: str
Name of the output file
pstype: str
Type of region, can be 'elp','cir','box'
"""
reg = open(outfile,'w+')
if pstype is 'elp':
for i in range(ps.shape[0]):
ps_str = 'ellipse(' + str(ps[i,0])+','+ str(ps[i,1])+\
','+str(ps[i,2])+','+str(ps[i,3])+','+str(ps[i,4])+')\n'
reg.write(ps_str)
elif pstype is 'cir':
for i in range(ps.shape[0]):
ps_str = 'circle(' + str(ps[i,0])+','+ str(ps[i,1])+','+str(ps[i,2])+')\n'
reg.write(ps_str)
else:
for i in range(ps.shape[0]):
ps_str = 'box(' + str(ps[i,0])+','+ str(ps[i,1])+','+str(ps[i,2])+','+str(ps[i,3])+',0)\n'
reg.write(ps_str)
def compare(ps,ps_ref):
"""
Compare detected ps with the real one or reference
Parameters
----------
ps: np.ndarray
Detected point source list
ps_ref: np.ndarray
Referenced point source list
Returns
-------
num_same: int
Number of same PS
cord_x,cord_y: list
Coordinates of the same PS
err_rate: float
Error rate
"""
# Init
num_same = 0
err_rate = 0.0
cord_x = []
cord_y = []
# Extract coordinates of ps and ps_ref
ps_x = ps[:,0].tolist()
ps_y = ps[:,1].tolist()
ps_ref_x = ps_ref[:,0].tolist()
ps_ref_y = ps_ref[:,1].tolist()
# Compare
i = 1
while i <= len(ps_ref_x) - 1:
j = 1
while j <= len(ps_ref_x) - 1:
d = np.sqrt((ps_x[j]-ps_ref_x[i])**2 + (ps_y[j]-ps_ref_y[i])**2)
if d <= 5:
num_same += 1
cord_x.append(ps_x[j])
cord_y.append(ps_y[j])
ps_x.remove(ps_x[j])
ps_y.remove(ps_y[j])
break
j += 1
i += 1
len_ps = ps.shape[0]
err_rate = (abs(len_ps - len(ps_ref_x)) + len(ps_ref_x) - num_same)/ len(ps_ref_x)
return num_same,err_rate,cord_x,cord_y
def img2mat(imgpath):
"""
Load image
Parameter
---------
imgpath: str
path of the image,the image can be fits or other image type files
"""
# Judge type of path
postfix = os.path.splitext(imgpath)[-1]
if postfix == '.fits':
try:
img = fits.open(imgpath)
except IOError:
sys.exit("The image can't be loaded.")
img_mat = img[0].data
else:
try:
img_mat = imread(imgpath,mode='L')
except IOError:
sys.exit("The image can't be loaded.")
img_mat = np.array(img_mat,dtype=float)/255
return img_mat
def cluster(pslist,dist=5,itertime=3):
"""Cluster of potential point sources
Parameter
---------
dist: int
Smallest distance between to point sources to be clustered
itertime: int
Time of iteration
"""
# Init
rowIdx = pslist[:,1].tolist()
colIdx = pslist[:,0].tolist()
rowAxis = pslist[:,3].tolist()
colAxis = pslist[:,2].tolist()
ang = pslist[:,4].tolist()
peaks = pslist[:,5].tolist()
# Clustering
for t in range(itertime):
i = 0
while i <= len(colIdx) - 1:
j = i + 1
xs = colIdx[i]
ys = rowIdx[i]
temp_x = [xs]
temp_y = [ys]
temp_peak = [peaks[i]]
temp_ra = [rowAxis[i]]
temp_ca = [colAxis[i]]
temp_ang = [ang[i]]
while j <= len(colIdx) - 1:
if np.sqrt((xs-colIdx[j])**2+(ys-rowIdx[j])**2)<=dist:
temp_x.append(colIdx[j])
temp_y.append(rowIdx[j])
temp_ra.append(rowAxis[j])
temp_ca.append(colAxis[j])
temp_peak.append(peaks[j])
temp_ang.append(ang[j])
# remove
rowIdx.remove(rowIdx[j])
colIdx.remove(colIdx[j])
rowAxis.remove(rowAxis[j])
colAxis.remove(colAxis[j])
peaks.remove(peaks[j])
ang.remove(ang[j])
# change j
j = j - 1
j = j + 1
# update
rowIdx[i] = round(np.mean(temp_y))
colIdx[i] = round(np.mean(temp_x))
rowAxis[i] = np.mean(temp_ra)
colAxis[i] = np.mean(temp_ca)
peaks[i] = np.max(temp_peak)
idx = np.where(temp_peak==peaks[i])[0][0]
ang[i] = temp_ang[idx]
i = i + 1
final_list = np.array([colIdx,rowIdx,colAxis,rowAxis,ang,peaks]).transpose()
return final_list
def logManager(loglevel="INFO",toolname="egf2ps",appname = ""):
"""
A simple logging manger to configure the logging style.
Parameters
----------
loglevel: str
Level of logging, which can be "DEBUG","INFO","WARNING","ERROR",
and "CRITICAL". Default as "INFO".
toolname: str
Name of the tool.
appname: str
Name of the method or class.
Reference
---------
[1] Reitz, K., and Schlusser, T.
"The Hitchhiker's Guide to Python",
O'Reilly, 2016.
"""
# Formatter<TODO>
formatter = logging.Formatter(
'[%(levelname)s %(asctime)s]'+ toolname +
'--%(name)s: %(message)s')
# Set handler
handler = logging.StreamHandler()
handler.setFormatter(formatter)
# Initialize logger
logger = logging.getLogger(appname)
logger.addHandler(handler)
# Set level
level = "logging." + loglevel
logger.setLevel(eval(level))
return logger
| [
2,
15069,
357,
66,
8,
10511,
844,
666,
8779,
1279,
42592,
2611,
62,
82,
73,
28047,
31,
38227,
13,
785,
29,
198,
2,
17168,
5964,
198,
198,
37811,
198,
4366,
314,
14,
46,
290,
7587,
4899,
389,
899,
798,
287,
428,
3384,
4487,
8265,
... | 1.956474 | 3,607 |
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = '0.0.2.dev0+g3ce8781.d20210430'
version_tuple = (0, 0, 2, 'dev0+g3ce8781', 'd20210430')
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
2393,
7560,
416,
900,
37623,
10141,
62,
1416,
76,
198,
2,
836,
470,
1487,
11,
836,
470,
2610,
287,
2196,
1630,
198,
9641,
796,
705,
15,
13,
15,
13,
17,
13,
7959,
15,
10,
70,
18,
344,
2... | 2.333333 | 84 |
import pathlib
import typing
from typing import Any, Iterable, List, Optional, Set, Union, Type
from dataclasses import dataclass, field
__all__ = ("Parameter",)
@dataclass(order=True)
if __name__ == "__main__":
pass
| [
11748,
3108,
8019,
198,
11748,
19720,
198,
198,
6738,
19720,
1330,
4377,
11,
40806,
540,
11,
7343,
11,
32233,
11,
5345,
11,
4479,
11,
5994,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
198,
834,
439,
834,
796,
... | 3.026667 | 75 |
# -*- coding: utf-8 -*-
from datetime import date, timedelta
from django.shortcuts import render
__author__ = 'codez'
from .models import Event
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
4818,
8079,
1330,
3128,
11,
28805,
12514,
198,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
834,
9800,
834,
796,
705,
8189,
89,
6,
198,
198,
... | 2.941176 | 51 |
"""
preprocess_corpus.py
Script to preprocess corpus data
author: @justjoshtings
created: 3/31/2022
"""
from Woby_Modules.CorpusProcessor import CorpusProcessor
if __name__ == "__main__":
print("Executing preprocess_corpus.py")
main()
| [
37811,
198,
3866,
14681,
62,
10215,
79,
385,
13,
9078,
198,
7391,
284,
662,
14681,
35789,
1366,
198,
198,
9800,
25,
2488,
3137,
73,
3768,
83,
654,
198,
25598,
25,
513,
14,
3132,
14,
1238,
1828,
198,
37811,
198,
6738,
370,
26730,
62,... | 2.733333 | 90 |
import logging
logger = logging.getLogger(__name__)
import itertools
import random
from collections import OrderedDict
import functools
import numpy as np
import pandas as pd
from enum import Enum
| [
11748,
18931,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
11748,
340,
861,
10141,
198,
11748,
4738,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
198,
11748,
1257,
310,
10141,
198,
198... | 2.702381 | 84 |
import cv2
import numpy as np
import time
from skimage.feature import hog
from sklearn.externals import joblib
from nms import nms
import argparse
# made by abhinav sagar on 7/2/2019
parser = argparse.ArgumentParser(description='To read image name')
parser.add_argument('-i', "--image", help="Path to the test image", required=True)
parser.add_argument('-d','--downscale', help="Downscale ratio", default=1.2, type=float)
parser.add_argument('-v', '--visualize', help="Visualize the sliding window", action="store_true")
parser.add_argument('-w', '--winstride', help="Pixels to move in one step, in any direction", default=8, type=int)
parser.add_argument('-n', '--nms_threshold', help="Threshold Values between 0 to 1 for NMS thresholding. Default is 0.2", default=0.2, type=float)
args = vars(parser.parse_args())
clf = joblib.load("pedestrian.pkl")
orig = cv2.imread(args["image"])
img = orig.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
scaleFactor = args["downscale"]
inverse = 1.0/scaleFactor
winStride = (args["winstride"], args["winstride"])
winSize = (128, 64)
rects = []
h, w = gray.shape
count = 0
while (h >= 128 and w >= 64):
print (gray.shape)
h, w= gray.shape
horiz = w - 64
vert = h - 128
print (horiz, vert)
i = 0
j = 0
while i < vert:
j = 0
while j < horiz:
portion = gray[i:i+winSize[0], j:j+winSize[1]]
features = hog(portion, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2), block_norm="L2")
result = clf.predict([features])
if args["visualize"]:
visual = gray.copy()
cv2.rectangle(visual, (j, i), (j+winSize[1], i+winSize[0]), (0, 0, 255), 2)
cv2.imshow("visual", visual)
cv2.waitKey(1)
if int(result[0]) == 1:
print (result, i, j)
confidence = clf.decision_function([features])
appendRects(i, j, confidence, count, rects)
j = j + winStride[0]
i = i + winStride[1]
gray = cv2.resize(gray, (int(w*inverse), int(h*inverse)), interpolation=cv2.INTER_AREA)
count = count + 1
print (count)
print (rects)
nms_rects = nms(rects, args["nms_threshold"])
for (a, b, conf, c, d) in rects:
cv2.rectangle(orig, (a, b), (a+c, b+d), (0, 255, 0), 2)
cv2.imshow("Before NMS", orig)
cv2.waitKey(0)
for (a, b, conf, c, d) in nms_rects:
cv2.rectangle(img, (a, b), (a+c, b+d), (0, 255, 0), 2)
cv2.imshow("After NMS", img)
cv2.waitKey(0)
# save output
cv2.imwrite("../output.jpg", img)
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
6738,
1341,
9060,
13,
30053,
1330,
40476,
198,
6738,
1341,
35720,
13,
1069,
759,
874,
1330,
1693,
8019,
198,
6738,
299,
907,
1330,
299,
907,
198,
11748,
182... | 2.218058 | 1,174 |
from collections import Counter
if __name__ == '__main__':
with open('input.txt') as input_file:
policy_pass_list = [read(line) for line in input_file]
valid_passwords = list(filter(valid, policy_pass_list))
print(len(valid_passwords))
| [
6738,
17268,
1330,
15034,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
351,
1280,
10786,
15414,
13,
14116,
11537,
355,
5128,
62,
7753,
25,
198,
220,
220,
220,
220,
220,
220,
220,
... | 2.747368 | 95 |
import sys
import random
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, BatchNormalization
from keras.utils import to_categorical
from othello_ml import Othello, Action
from othello_ml.visualizer import Visualizer
from train import MLAgent, MLRenderer
model = Sequential([
Conv2D(
32,
kernel_size=(3, 3),
input_shape=(8, 8, 1),
padding='same',
activation='relu'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(32, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(2, (1, 1), activation='softmax', padding='same'),
])
model.summary()
model.load_weights("episode_1000.h5")
while True:
othello = Othello()
renderer = MLRenderer(path='./result/test-prob-')
agent1 = MLAgent(
othello, model, random_rate=0, no_reward=True, renderer=renderer)
agent2 = CliAgent(othello)
visualizer = Visualizer(othello, path=f'./result/test-')
othello.play()
| [
11748,
25064,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
34872,
17,
35,
11,
347,
963,
26447,
1634,
198,
6738,
41927,
292,
... | 2.530395 | 658 |
#!/usr/bin/env python
# Copyright 2016 University of Chicago
# Licensed under the APL 2.0 license
import argparse
import subprocess
import sys
from email.mime.text import MIMEText
import psycopg2
import fsurfer.helpers
import fsurfer
VERSION = fsurfer.__version__
def email_user(workflow_id, email):
"""
Email user informing them that a workflow will be deleted
:param workflow_id: id for workflow that will be deleted
:param email: email address for user
:return: True on success, False on failure
"""
logger = fsurfer.log.get_logger()
msg = MIMEText('The results from your freesurfer ' +
'workflow {0} '.format(workflow_id) +
'will be deleted in 7 days, please download ' +
'them if you would like to save the results.')
msg['Subject'] = 'Results for FSurf workflow {0} '.format(workflow_id)
msg['Subject'] += 'will be deleted'
sender = 'fsurf@login.osgconnect.net'
dest = email
msg['From'] = sender
msg['To'] = dest
try:
sendmail = subprocess.Popen(['/usr/sbin/sendmail', '-t'], stdin=subprocess.PIPE)
sendmail.communicate(msg.as_string())
logger.info("Emailed {0} about purge for workflow {1}".format(email,
workflow_id))
return True
except subprocess.CalledProcessError as e:
logger.exception("Error emailing {0}: {1}".format(email, e))
return False
def process_results():
"""
Process results from jobs, removing any that are more than 30 days old
:return: exit code (0 for success, non-zero for failure)
"""
fsurfer.log.initialize_logging()
logger = fsurfer.log.get_logger()
parser = argparse.ArgumentParser(description="Process and remove old results")
# version info
parser.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
# Arguments for action
parser.add_argument('--dry-run', dest='dry_run',
action='store_true', default=False,
help='Mock actions instead of carrying them out')
parser.add_argument('--debug', dest='debug',
action='store_true', default=False,
help='Output debug messages')
args = parser.parse_args(sys.argv[1:])
if args.debug:
fsurfer.log.set_debugging()
if args.dry_run:
sys.stdout.write("Doing a dry run, no changes will be made\n")
conn = fsurfer.helpers.get_db_client()
cursor = conn.cursor()
job_query = "SELECT jobs.id, " \
" users.username, " \
" users.email, " \
" jobs.state, " \
" jobs.subject " \
"FROM freesurfer_interface.jobs AS jobs, " \
" freesurfer_interface.users AS users "\
"WHERE (state = 'COMPLETED' OR" \
" state = 'ERROR') AND" \
" (age(job_date) >= '22 days' AND " \
" age(job_date) < '23 days') AND" \
" jobs.username = users.username"
try:
cursor.execute(job_query)
for row in cursor.fetchall():
logger.info("Warning user {0} about workflow {1} purge".format(row[0],
row[1]))
if args.dry_run:
sys.stdout.write("Would email {0}".format(row[2]))
sys.stdout.write("about workflow {0}\n".format(row[0]))
continue
if not email_user(row[0], row[2]):
logger.error("Can't email {0} for job {1}".format(row[2],
row[0]))
continue
conn.commit()
except psycopg2.Error, e:
logger.error("Got pgsql error: {0}".format(e))
return 1
finally:
conn.commit()
conn.close()
return 0
if __name__ == '__main__':
sys.exit(process_results())
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
15069,
1584,
2059,
286,
4842,
198,
2,
49962,
739,
262,
3486,
43,
362,
13,
15,
5964,
198,
11748,
1822,
29572,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
6738,
3053,
13,
7... | 2.086558 | 1,964 |
import os, time
from core import execute
from core import slack
from core import utils
class VulnScan(object):
''' Scanning vulnerable service based version '''
# def conclude(self):
# #### Create beautiful HTML report for masscan
# cmd = "xsltproc -o $WORKSPACE/portscan/final-$OUTPUT.html $PLUGINS_PATH/nmap-bootstrap.xsl $WORKSPACE/vulnscan/{0}-nmap"
# cmd = utils.replace_argument(self.options, cmd)
# output_path = utils.replace_argument(
# self.options, '$WORKSPACE/portscan/final-$OUTPUT.html')
# std_path = utils.replace_argument(
# self.options, '')
# execute.send_cmd(self.options, cmd, output_path, std_path, self.module_name)
| [
11748,
28686,
11,
640,
198,
6738,
4755,
1330,
12260,
198,
6738,
4755,
1330,
30740,
198,
6738,
4755,
1330,
3384,
4487,
198,
198,
4871,
25442,
77,
33351,
7,
15252,
2599,
198,
220,
220,
220,
705,
7061,
20937,
768,
8826,
2139,
1912,
2196,
... | 2.5 | 288 |
# Time: O(n * b), n is the length of gene string, b is size of bank
# Space: O(b)
# A gene string can be represented by an 8-character long string,
# with choices from "A","C","G","T".
# Suppose we need to investigate about a mutation (mutation from "start" to "end"),
# where ONE mutation is defined as ONE single character changed in the gene string.
# For example, "AACCGGTT" -> "AACCGGTA" is 1 mutation.
# Also, there is a given gene "bank", which records all the valid gene mutations.
# A gene must be in the bank to make it a valid gene string.
#
# Now, given 3 things - start, end, bank,
# your task is to determine what is the minimum number of mutations needed to
# mutate from "start" to "end". If there is no such a mutation, return -1.
#
# NOTE: 1. Starting point is assumed to be valid, so it might not be included in the bank.
# 2. If multiple mutations are needed, all mutations during in the sequence must be valid.
#
# For example,
#
# bank: "AACCGGTA"
# start: "AACCGGTT"
# end: "AACCGGTA"
# return: 1
#
# bank: "AACCGGTA", "AACCGCTA", "AAACGGTA"
# start: "AACCGGTT"
# end: "AAACGGTA"
# return: 2
#
# bank: "AAAACCCC", "AAACCCCC", "AACCCCCC"
# start: "AAAAACCC"
# end: "AACCCCCC"
# return: 3
from collections import deque
| [
2,
3862,
25,
220,
440,
7,
77,
1635,
275,
828,
299,
318,
262,
4129,
286,
9779,
4731,
11,
275,
318,
2546,
286,
3331,
198,
2,
4687,
25,
440,
7,
65,
8,
198,
198,
2,
317,
9779,
4731,
460,
307,
7997,
416,
281,
807,
12,
22769,
890,
... | 3.002404 | 416 |
"""Functions for making EvoEF2 predictions."""
import ampal
import gzip
import glob
import subprocess
import multiprocessing
import os
from pathlib import Path
from benchmark import config
from sklearn.preprocessing import OneHotEncoder
import warnings
import numpy as np
import pandas as pd
def run_Evo2EF(
pdb: str, chain: str, number_of_runs: str, working_dir: Path, path_to_evoef2: Path
) -> None:
"""Runs a shell script to predict sequence with EvoEF2
Patameters
----------
path: str
Path to PDB biological unit.
pdb: str
PDB code.
chain: str
Chain code.
number_of_runs: str
Number of sequences to be generated.
working_dir: str
Dir where to store temporary files and results.
path_to_EvoEF2: Path
Location of EvoEF2 executable.
"""
print(f"Starting {pdb}{chain}.")
# evo.sh must be in the same directory as this file.
p = subprocess.Popen(
[
os.path.dirname(os.path.realpath(__file__)) + "/evo.sh",
pdb,
chain,
number_of_runs,
working_dir,
path_to_evoef2,
]
)
p.wait()
print(f"{pdb}{chain} done.")
def multi_Evo2EF(
df: pd.DataFrame,
number_of_runs: int,
working_dir: Path,
path_to_assemblies: Path,
path_to_evoef2: Path,
max_processes: int = 8,
nmr:bool = False,
) -> None:
"""Runs Evo2EF on all PDB chains in the DataFrame.
Parameters
----------
df: pd.DataFrame
DataFrame with PDB and chain codes.
number_of_runs: int
Number of sequences to be generated for each PDB file.
max_processes: int = 8
Number of cores to use, default is 8.
working_dir: Path
Dir where to store temporary files and results.
path_to_assemblies: Path
Dir with biological assemblies.
path_to_EvoEF2: Path
Location of EvoEF2 executable.
nmr:bool=True
"""
inputs = []
# remove duplicated chains
df = df.drop_duplicates(subset=["PDB", "chain"])
# check if working directory exists. Make one if doesn't exist.
if not working_dir.exists():
os.makedirs(working_dir)
if not (working_dir / "results/").exists():
os.makedirs(working_dir / "results/")
print(f"{df.shape[0]} structures will be predicted.")
for i, protein in df.iterrows():
if not nmr:
with gzip.open(
path_to_assemblies / protein.PDB[1:3] / f"{protein.PDB}.pdb1.gz"
) as file:
assembly = ampal.load_pdb(file.read().decode(), path=False)
# fuse all states of the assembly into one state to avoid EvoEF2 errors.
empty_polymer = ampal.Assembly()
chain_id = []
for polymer in assembly:
for chain in polymer:
empty_polymer.append(chain)
chain_id.append(chain.id)
# relabel chains to avoid repetition, remove ligands.
str_list = string.ascii_uppercase.replace(protein.chain, "")
index = chain_id.index(protein.chain)
chain_id = list(str_list[: len(chain_id)])
chain_id[index] = protein.chain
empty_polymer.relabel_polymers(chain_id)
pdb_text = empty_polymer.make_pdb(alt_states=False, ligands=False)
# writing new pdb with AMPAL fixes most of the errors with EvoEF2.
with open((working_dir / protein.PDB).with_suffix(".pdb1"), "w") as pdb_file:
pdb_file.write(pdb_text)
#pick first nmr structure
else:
with gzip.open(
path_to_assemblies / protein.PDB[1:3] / f"pdb{protein.PDB}.ent.gz"
) as file:
assembly = ampal.load_pdb(file.read().decode(), path=False)
pdb_text = assembly[0].make_pdb(alt_states=False)
# writing new pdb with AMPAL fixes most of the errors with EvoEF2.
with open((working_dir / protein.PDB).with_suffix(".pdb1"), "w") as pdb_file:
pdb_file.write(pdb_text)
inputs.append(
(
protein.PDB,
protein.chain,
str(number_of_runs),
working_dir,
path_to_evoef2,
)
)
with multiprocessing.Pool(max_processes) as P:
P.starmap(run_Evo2EF, inputs)
def seq_to_arr(working_dir: Path, user_list: Path, ignore_uncommon:bool=True):
"""Produces prediction format compatible with the benchmarking tool.
working_dir: Path
Dir where EvoEF2 results are stored.
user_list: Path
Path to .txt file with protein chains to include in the benchmark"""
with open(Path(user_list)) as file:
chains=[x.strip('\n') for x in file.readlines()]
predicted_sequences = []
path = Path(working_dir)
enc=OneHotEncoder(categories=[config.acids],sparse=False)
with open(path/'datasetmap.txt','w') as file:
file.write(f"ignore_uncommon {ignore_uncommon}\ninclude_pdbs\n##########\n")
for protein in chains:
prediction_path = path / "results"/f"{protein}.txt"
# check for empty and missing files
if prediction_path.exists() and os.path.getsize(prediction_path) > 0:
with open(prediction_path) as prediction:
seq = prediction.readline().split()[0]
if seq != "0":
predicted_sequences+=list(seq)
file.write(f"{protein} {len(seq)}\n")
else:
warnings.warn(
f"EvoEF2: {protein} prediction does not exits, EvoEF2 returned 0."
)
else:
warnings.warn(
f"EvoEF2: {protein} prediction does not exits."
)
arr=enc.fit_transform(np.array(predicted_sequences).reshape(-1, 1))
pd.DataFrame(arr).to_csv(path/"evoEF2.csv", header=None, index=None)
| [
37811,
24629,
2733,
329,
1642,
4319,
78,
25425,
17,
16277,
526,
15931,
198,
198,
11748,
716,
18596,
198,
11748,
308,
13344,
198,
11748,
15095,
198,
11748,
850,
14681,
198,
11748,
18540,
305,
919,
278,
198,
11748,
28686,
198,
6738,
3108,
... | 2.070493 | 2,979 |
"""Holds the chromosome class and helper functions.
Classes:
Chromosome
Functions:
valid_append(List[Path], Path)
valid_insert(List[Path], Path)
"""
class Chromosome:
"""Represents a solution to the problem.
Holds the solution and its fitness/evaluation.
"""
def __init__(self, solution, fitness):
"""Instantiates a chromosome.
...
Args:
solution (List[Path]): The list of paths/steps of a solution
fitness (integer): The fitness/evaluation of the solution
"""
self.solution = solution
self.fitness = fitness
| [
37811,
39,
10119,
262,
34348,
1398,
290,
31904,
5499,
13,
198,
198,
9487,
274,
25,
628,
220,
220,
220,
18255,
418,
462,
198,
198,
24629,
2733,
25,
628,
220,
220,
220,
4938,
62,
33295,
7,
8053,
58,
15235,
4357,
10644,
8,
198,
220,
... | 2.607595 | 237 |
import pymysql.cursors
connection = pymysql.connect(host='localhost', port=3306, user='root', passwd='root', db='python_user',
charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
| [
11748,
279,
4948,
893,
13976,
13,
66,
1834,
669,
198,
198,
38659,
796,
279,
4948,
893,
13976,
13,
8443,
7,
4774,
11639,
36750,
3256,
2493,
28,
18,
20548,
11,
2836,
11639,
15763,
3256,
1208,
16993,
11639,
15763,
3256,
20613,
11639,
29412... | 2.191919 | 99 |
import json
from unittest.mock import mock_open, patch
import pytest
from codalib import util
def test_return_value():
"""
Test that parseVocabularySources returns a list of the correct length
where all elements are tuples.
"""
read_data = json.dumps({
'terms': [
{'name': 'foo', 'label': 'Foo'},
{'name': 'bar', 'label': 'Bar'},
{'name': 'baz', 'label': 'Baz'},
{'name': 'qux', 'label': 'Qux'}
]
})
m = mock_open(read_data=read_data)
with patch('codalib.util.open', m):
choices = util.parseVocabularySources('/foo/bar')
assert len(choices) == 4
# Verify that all elements of the list are tuples.
assert all([type(choice) is tuple for choice in choices])
def test_return_value_elements():
"""
Verify that the returned list elements contain the name and the label.
"""
read_data = json.dumps({
'terms': [
{'name': 'foo', 'label': 'Foo'}
]
})
m = mock_open(read_data=read_data)
with patch('codalib.util.open', m):
choices = util.parseVocabularySources('/foo/bar')
assert choices.pop() == ('foo', 'Foo')
@pytest.mark.xfail
def test_empty_file_does_not_raise_exception():
"""
Verify that an exception will not be raised if the file is empty.
"""
m = mock_open()
with patch('codalib.util.open', m):
util.parseVocabularySources('/foo/bar')
@pytest.mark.xfail
def test_empty_json_does_not_raise_exception():
"""
Verify that an exception will not be raised if the file has a json
object, but the object is empty.
"""
read_data = json.dumps({})
m = mock_open(read_data=read_data)
with patch('codalib.util.open', m):
util.parseVocabularySources('/foo/bar')
| [
11748,
33918,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
15290,
62,
9654,
11,
8529,
198,
198,
11748,
12972,
9288,
198,
198,
6738,
14873,
282,
571,
1330,
7736,
628,
198,
4299,
1332,
62,
7783,
62,
8367,
33529,
198,
220,
220,
220,
37... | 2.410667 | 750 |
from utils_prm import select_cfg_file
| [
6738,
3384,
4487,
62,
1050,
76,
1330,
2922,
62,
37581,
62,
7753,
628
] | 3 | 13 |
# %% [572. *Subtree of Another Tree](https://leetcode.com/problems/subtree-of-another-tree/)
# 問題:tがsの部分木かどうかを返す
# 解法:再帰を用いる
| [
2,
43313,
685,
48724,
13,
1635,
7004,
21048,
286,
6023,
12200,
16151,
5450,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
7266,
21048,
12,
1659,
12,
29214,
12,
21048,
34729,
198,
2,
10263,
243,
237,
165,
94,
234,
171,
120,
248... | 1.52439 | 82 |
"""
iorodeo-potentiostat
---------------------
Python interface to IO Rodeo's Potentiostat Shield for the teensy 3.2 development
board.
Example
--------
.. code:: python
from potentiostat import Potentiostat
dev = Potentiostat('/dev/ttyACM0')
dev.set_curr_range('100uA')
dev.set_sample_period(10)
name = 'cyclic'
param = {
'quietValue' : 0.0,
'quietTime' : 1000,
'amplitude' : 2.0,
'offset' : 0.0,
'period' : 1000,
'numCycles' : 5,
'shift' : 0.0,
}
dev.set_param(name,param)
t,volt,curr = dev.run_test(name,display='pbar')
Install
--------
.. code:: bash
$ pip install iorodeo-potentiostat
Links
-----
* Documentation http://stuff.iorodeo.com/docs/potentiostat
* Download https://bitbucket.org/iorodeo/potentiostat
"""
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='iorodeo-potentiostat',
version='0.0.3',
description='Serial interface to IO Rodeo Potentiostat',
long_description=__doc__,
url='https://bitbucket.org/iorodeo/potentiostat',
author='Will Dickson',
author_email='will@iorodeo.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
],
keywords='Serial interface for IO Rodeo Potentiostat',
packages=find_packages(exclude=['docs', 'tests', 'examples']),
install_requires=['pyserial','progressbar33'],
)
| [
37811,
198,
1504,
1098,
78,
12,
13059,
298,
72,
455,
265,
198,
19351,
12,
198,
198,
37906,
7071,
284,
24418,
371,
1098,
78,
338,
6902,
298,
72,
455,
265,
10022,
329,
262,
15508,
88,
513,
13,
17,
2478,
220,
198,
3526,
13,
198,
198,... | 2.407288 | 933 |
#!/usr/bin/env python
# FTW Telecommunications Research Center Vienna (www.ftw.at)
# Dietmar Schabus (schabus@ftw.at)
# July 2014
# library imports
import datetime
import random
# imports from this project
import memory_config
import liblog
# ==============================================================================
# memory_logic.Logic
# ==============================================================================
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
19446,
54,
48667,
4992,
3337,
23825,
357,
2503,
13,
701,
86,
13,
265,
8,
198,
2,
16292,
3876,
3059,
46844,
357,
20601,
46844,
31,
701,
86,
13,
265,
8,
198,
2,
2901,
1946,
... | 4.149533 | 107 |
#!/usr/bin/env python3
import argparse
import os
import shutil
import random
import ast
from os.path import basename, exists, splitext
import torch
import torch.backends.cudnn as cudnn
import numpy as np
import yaml
from core.trainers import CDTrainer
from utils.misc import OutPathGetter, Logger, register
if __name__ == '__main__':
main() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
4738,
198,
11748,
6468,
198,
6738,
28686,
13,
6978,
1330,
1615,
12453,
11,
7160,
11,
4328,
578,
742,
198,
... | 2.966667 | 120 |
# Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base service class for all service classes
"""
from oslo_log import log as logging
from tempest.lib.common import rest_client
LOG = logging.getLogger(__name__)
class BaseContrailClient(rest_client.RestClient):
"""Base Tempest REST client for Designate API"""
class ResponseBody(dict):
"""Class that wraps an http response and dict body into a single value.
Callers that receive this object will normally use it as a dict but
can extract the response if needed.
"""
| [
2,
15069,
1584,
5161,
5,
51,
11421,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
42... | 3.459877 | 324 |
#!/usr/bin/env python2
"""
This script demonstates how to transform SQL logs fetched from elasticsearch into graph showing how data flows between the code and the storage
"""
from __future__ import print_function
import collections
import time
import logging
import re
from datetime import datetime
from dateutil import tz
import sqlparse
from elasticsearch import Elasticsearch
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(name)-35s %(levelname)-8s %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"
)
logger = logging.getLogger(__name__)
def format_timestamp(ts):
"""
Format the UTC timestamp for Elasticsearch
eg. 2014-07-09T08:37:18.000Z
@see https://docs.python.org/2/library/time.html#time.strftime
"""
tz_info = tz.tzutc()
return datetime.fromtimestamp(ts, tz=tz_info).strftime("%Y-%m-%dT%H:%M:%S.000Z")
# take SQL logs from elasticsearch
sql_logs = get_log_messages(query='@message: /SQL.*/', limit=None) # None - return ALL matching messages
logger.info('Generating metadata...')
meta = map(extract_metadata, sql_logs)
meta = filter(lambda item: item is not None, meta)
logger.info('Building dataflow entries for {} queries...'.format(len(meta)))
entries = map(build_flow_entry, meta)
logger.info('Building TSV file with nodes and edges from {} entries...'.format(len(entries)))
graph = unique(
lambda entry, cnt: 'QPS: {:.4f}'.format(1. * cnt / 86400), # calculate QPS
entries
)
logger.info('Printing out TSV file with {} edges...'.format(len(graph)))
print('# SQL log entries analyzed: {}'.format(len(meta)))
print("\n".join(set(graph)))
# prepare flow data for redis operations
logger.info("Building dataflow entries for redis pushes...")
pushes = map(
lambda entry: '{source}\t{edge}\t{target}'.format(
source='bots:{}'.format(entry.get('@source_host').split('.')[0]), edge='push', target='redis:products'),
get_log_messages(query='program: "elecena.bots" AND @message: "bot::send"',limit=None)
)
pops = map(
lambda entry: '{source}\t{edge}\t{target}'.format(
target='mq/request.php', edge='pop', source='redis:products'),
get_log_messages(query='program: "uportal.bots-worker" AND @message: "Message taken from the queue"',limit=None)
)
graph = unique(
lambda entry, cnt: '{:.1f} messages/hour'.format(1. * cnt / 24),
pops + pushes
)
print('# Redis log entries')
print("\n".join(set(graph)))
# prepare HTTP traffic stats for bots
logger.info("Building dataflow entries for bots HTTP traffic...")
hosts_buckets, bytes_transfered = get_log_aggregate(
query='program: "elecena.bots" AND @message: "bot::send_http_request" AND severity: "info"',
group_by='@source_host', stats_field='@context.stats.size_download'
)
graph = []
max_count = max(hosts_buckets.values())
bytes_per_req = 1. * bytes_transfered['sum'] / bytes_transfered['count']
for host, count in hosts_buckets.iteritems():
graph.append('{source}\t{edge}\t{target}\t{value:.4f}\t{metadata}'.format(
source='web:shops', edge='http fetch', target='bots:{}'.format(host), value=1.0 * count / max_count,
metadata='{reqs:.0f} requests/hour, {gibs:.2f} GiB/hour'.format(reqs=1. * count / 24, gibs=bytes_per_req * count / 1024 / 1024 / 1024 / 24)
))
print('# bots HTTP traffic')
print("\n".join(set(graph)))
# prepare flow data for s3 operations
logger.info("Building dataflow entries for s3 operations...")
s3_uploads = map(
lambda entry: '{source}\t{edge}\t{target}'.format(
source='ImageBot', edge='upload', target='s3:s.elecena.pl'),
get_log_messages(query='program: "nano.ImageBot" AND @message: "Image stored"',limit=None)
)
graph = unique(
lambda entry, cnt: '{:.1f} requests/hour'.format(1. * cnt / 24),
s3_uploads
)
print('# s3 operations')
print("\n".join(set(graph)))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
37811,
198,
1212,
4226,
3222,
27219,
703,
284,
6121,
16363,
17259,
11351,
1740,
422,
27468,
12947,
656,
4823,
4478,
703,
1366,
15623,
1022,
262,
2438,
290,
262,
6143,
198,
37811,
198... | 2.743273 | 1,375 |
a = int(input())
b = int(input())
n = int(input())
for i in range(n, int(10e9)):
if (i % a == 0 and i % b == 0):
print(i)
break
| [
64,
796,
493,
7,
15414,
28955,
198,
65,
796,
493,
7,
15414,
28955,
198,
77,
796,
493,
7,
15414,
28955,
198,
1640,
1312,
287,
2837,
7,
77,
11,
493,
7,
940,
68,
24,
8,
2599,
198,
220,
220,
220,
611,
357,
72,
4064,
257,
6624,
657... | 1.973333 | 75 |
import os
from typing import Any, Dict, List, Tuple, Optional, Generator
from os.path import dirname, expandvars
from pathlib import Path
from .fixer import ImportStatementMap
from .import_statement import SingleImport, ImportStatement
| [
11748,
28686,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
7343,
11,
309,
29291,
11,
32233,
11,
35986,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
11,
4292,
85,
945,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
764,
13049... | 3.966667 | 60 |
#!/usr/bin/env python3
import argparse
import collections
import curses
import os
import random
import sys
import time
import pkg_resources
from multiprocessing import Array, Event, Process, Value
import numpy as np
import pandas as pd
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
17268,
198,
11748,
43878,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
279,
10025,
62,
37540,
198,
6738,
... | 3.2 | 90 |
# -*- coding:utf-8 -*-
Print("Hello World!")
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
198,
18557,
7203,
15496,
2159,
2474,
8,
198
] | 2.190476 | 21 |
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
"""
Constraint satisfaction problems require that all a problem's variables be assigned
values, out of a finite domain, that result in the satisfying of all constraints.
The :class:`ConstraintSatisfactionProblem` class aggregates all constraints and variables
defined for a problem and provides functionality to assist in problem solution, such
as verifying whether a candidate solution satisfies the constraints.
"""
from collections import Callable, Iterable, defaultdict
import dimod
from dwavebinarycsp.core.constraint import Constraint
class ConstraintSatisfactionProblem(object):
"""A constraint satisfaction problem.
Args:
vartype (:class:`~dimod.Vartype`/str/set):
Variable type for the binary quadratic model. Supported values are:
* :attr:`~dimod.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :attr:`~dimod.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
Attributes:
constraints (list[:obj:`.Constraint`]):
Constraints that together constitute the constraint satisfaction problem. Valid solutions
satisfy all of the constraints.
variables (dict[variable, list[:obj:`.Constraint`]]):
Variables of the constraint satisfaction problem as a dict, where keys are the variables
and values a list of all of constraints associated with the variable.
vartype (:class:`dimod.Vartype`):
Enumeration of valid variable types. Supported values are :attr:`~dimod.Vartype.SPIN`
or :attr:`~dimod.Vartype.BINARY`. If `vartype` is SPIN, variables can be assigned -1 or 1;
if BINARY, variables can be assigned 0 or 1.
Example:
This example creates a binary-valued constraint satisfaction problem, adds two constraints,
:math:`a = b` and :math:`b \\ne c`, and tests :math:`a,b,c = 1,1,0`.
>>> import dwavebinarycsp
>>> import operator
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem('BINARY')
>>> csp.add_constraint(operator.eq, ['a', 'b'])
>>> csp.add_constraint(operator.ne, ['b', 'c'])
>>> csp.check({'a': 1, 'b': 1, 'c': 0})
True
"""
@dimod.vartype_argument('vartype')
def add_constraint(self, constraint, variables=tuple()):
"""Add a constraint.
Args:
constraint (function/iterable/:obj:`.Constraint`):
Constraint definition in one of the supported formats:
1. Function, with input arguments matching the order and
:attr:`~.ConstraintSatisfactionProblem.vartype` type of the `variables`
argument, that evaluates True when the constraint is satisfied.
2. List explicitly specifying each allowed configuration as a tuple.
3. :obj:`.Constraint` object built either explicitly or by :mod:`dwavebinarycsp.factories`.
variables(iterable):
Variables associated with the constraint. Not required when `constraint` is
a :obj:`.Constraint` object.
Examples:
This example defines a function that evaluates True when the constraint is satisfied.
The function's input arguments match the order and type of the `variables` argument.
>>> import dwavebinarycsp
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> def all_equal(a, b, c): # works for both dwavebinarycsp.BINARY and dwavebinarycsp.SPIN
... return (a == b) and (b == c)
>>> csp.add_constraint(all_equal, ['a', 'b', 'c'])
>>> csp.check({'a': 0, 'b': 0, 'c': 0})
True
>>> csp.check({'a': 0, 'b': 0, 'c': 1})
False
This example explicitly lists allowed configurations.
>>> import dwavebinarycsp
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)
>>> eq_configurations = {(-1, -1), (1, 1)}
>>> csp.add_constraint(eq_configurations, ['v0', 'v1'])
>>> csp.check({'v0': -1, 'v1': +1})
False
>>> csp.check({'v0': -1, 'v1': -1})
True
This example uses a :obj:`.Constraint` object built by :mod:`dwavebinarycsp.factories`.
>>> import dwavebinarycsp
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(gates.and_gate(['a', 'b', 'c'])) # add an AND gate
>>> csp.add_constraint(gates.xor_gate(['a', 'c', 'd'])) # add an XOR gate
>>> csp.check({'a': 1, 'b': 0, 'c': 0, 'd': 1})
True
"""
if isinstance(constraint, Constraint):
if variables and (tuple(variables) != constraint.variables):
raise ValueError("mismatched variables and Constraint")
elif isinstance(constraint, Callable):
constraint = Constraint.from_func(constraint, variables, self.vartype)
elif isinstance(constraint, Iterable):
constraint = Constraint.from_configurations(constraint, variables, self.vartype)
else:
raise TypeError("Unknown constraint type given")
self.constraints.append(constraint)
for v in constraint.variables:
self.variables[v].append(constraint)
def add_variable(self, v):
"""Add a variable.
Args:
v (variable):
Variable in the constraint satisfaction problem. May be of any type that
can be a dict key.
Examples:
This example adds two variables, one of which is already used in a constraint
of the constraint satisfaction problem.
>>> import dwavebinarycsp
>>> import operator
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)
>>> csp.add_constraint(operator.eq, ['a', 'b'])
>>> csp.add_variable('a') # does nothing, already added as part of the constraint
>>> csp.add_variable('c')
>>> csp.check({'a': -1, 'b': -1, 'c': 1})
True
>>> csp.check({'a': -1, 'b': -1, 'c': -1})
True
"""
self.variables[v] # because defaultdict will create it if it's not there
def check(self, solution):
"""Check that a solution satisfies all of the constraints.
Args:
solution (container):
An assignment of values for the variables in the constraint satisfaction problem.
Returns:
bool: True if the solution satisfies all of the constraints; False otherwise.
Examples:
This example creates a binary-valued constraint satisfaction problem, adds
two logic gates implementing Boolean constraints, :math:`c = a \wedge b`
and :math:`d = a \oplus c`, and verifies that the combined problem is satisfied
for a given assignment.
>>> import dwavebinarycsp
>>> import dwavebinarycsp.factories.constraint.gates as gates
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
>>> csp.add_constraint(gates.and_gate(['a', 'b', 'c'])) # add an AND gate
>>> csp.add_constraint(gates.xor_gate(['a', 'c', 'd'])) # add an XOR gate
>>> csp.check({'a': 1, 'b': 0, 'c': 0, 'd': 1})
True
"""
return all(constraint.check(solution) for constraint in self.constraints)
def fix_variable(self, v, value):
"""Fix the value of a variable and remove it from the constraint satisfaction problem.
Args:
v (variable):
Variable to be fixed in the constraint satisfaction problem.
value (int):
Value assigned to the variable. Values must match the
:attr:`~.ConstraintSatisfactionProblem.vartype` of the constraint
satisfaction problem.
Examples:
This example creates a spin-valued constraint satisfaction problem, adds two constraints,
:math:`a = b` and :math:`b \\ne c`, and fixes variable b to +1.
>>> import dwavebinarycsp
>>> import operator
>>> csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.SPIN)
>>> csp.add_constraint(operator.eq, ['a', 'b'])
>>> csp.add_constraint(operator.ne, ['b', 'c'])
>>> csp.check({'a': +1, 'b': +1, 'c': -1})
True
>>> csp.check({'a': -1, 'b': -1, 'c': +1})
True
>>> csp.fix_variable('b', +1)
>>> csp.check({'a': +1, 'b': +1, 'c': -1}) # 'b' is ignored
True
>>> csp.check({'a': -1, 'b': -1, 'c': +1})
False
>>> csp.check({'a': +1, 'c': -1})
True
>>> csp.check({'a': -1, 'c': +1})
False
"""
if v not in self.variables:
raise ValueError("given variable {} is not part of the constraint satisfaction problem".format(v))
for constraint in self.variables[v]:
constraint.fix_variable(v, value)
del self.variables[v] # delete the variable
CSP = ConstraintSatisfactionProblem
"""An alias for :class:`.ConstraintSatisfactionProblem`."""
| [
2,
15069,
2864,
360,
12,
39709,
11998,
3457,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
... | 2.360056 | 4,316 |
#-- author: @avialxee ---#--#
#from radmap import rad_helpers
from _helpers import vz_query
import numpy as np
tgss = [[1, 12, 0.003], [2, 22, 32]]
nvss = [[12, 2, 13], [12, 2, 0.2]]
#tgss = np.array(tgss)
ins = RadioMap()
#
#print(tgss)
#print(nvss)
ind = np.where(np.array(tgss) < 0.015)
tgss[ind] =0.0
print(ins.spectral_index(tgss, nvss))
#print(ins.throw_output()) | [
2,
438,
1772,
25,
2488,
615,
498,
87,
1453,
11420,
2,
438,
2,
198,
2,
6738,
2511,
8899,
1330,
2511,
62,
16794,
364,
198,
6738,
4808,
16794,
364,
1330,
410,
89,
62,
22766,
198,
11748,
299,
32152,
355,
45941,
628,
198,
198,
25297,
8... | 2.155172 | 174 |
from chainer import backend
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
class SquaredDifference(function_node.FunctionNode):
"""Squared difference of input variables."""
def squared_difference(x1, x2):
"""Squared difference of input variables.
Args:
x1 (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be compared.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
x2 (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variables to be compared.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
Returns:
~chainer.Variable: ``(x1 - x2) ** 2`` element-wise.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
.. admonition:: Example
>>> x1 = np.arange(6).astype(np.float32)
>>> x1
array([0., 1., 2., 3., 4., 5.], dtype=float32)
>>> x2 = np.array([5, 4, 3, 2, 1, 0]).astype(np.float32)
>>> x2
array([5., 4., 3., 2., 1., 0.], dtype=float32)
>>> y = F.squared_difference(x1, x2)
>>> y.shape
(6,)
>>> y.array
array([25., 9., 1., 1., 9., 25.], dtype=float32)
"""
return SquaredDifference().apply((x1, x2))[0]
| [
6738,
6333,
263,
1330,
30203,
198,
6738,
6333,
263,
1330,
2163,
62,
17440,
198,
6738,
6333,
263,
1330,
3384,
4487,
198,
6738,
6333,
263,
13,
26791,
1330,
2099,
62,
9122,
628,
198,
4871,
5056,
1144,
28813,
1945,
7,
8818,
62,
17440,
13,... | 2.140962 | 603 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-06-22 11:37
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
20,
319,
2177,
12,
3312,
12,
1828,
1367,
25,
2718,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.80303 | 66 |