content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
"""
无人机获取视频流
"""
from djitellopy import tello
import cv2
drone = tello.Tello()
drone.connect()
print(drone.get_battery())
drone.stream_on()
while True:
img = drone.get_frame_read().frame
img = cv2.resize(img, (360, 240))
cv2.imshow("Image", img)
cv2.waitKey(1) | [
37811,
198,
33768,
254,
21689,
17312,
118,
164,
236,
115,
20998,
244,
164,
100,
228,
165,
95,
239,
38184,
223,
198,
37811,
198,
198,
6738,
42625,
578,
297,
11081,
1330,
1560,
78,
198,
11748,
269,
85,
17,
198,
198,
7109,
505,
796,
15... | 1.971831 | 142 |
import os
import torch
import torch.utils.data as data
from PIL import Image
from multiprocessing.dummy import Pool | [
11748,
28686,
198,
11748,
28034,
198,
11748,
28034,
13,
26791,
13,
7890,
355,
1366,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
18540,
305,
919,
278,
13,
67,
13513,
1330,
19850
] | 3.709677 | 31 |
# ==============================================================================
# Copyright 2018 Paul Balanca. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from .abstract_dataset import Dataset, SyntheticData
from .imagenet import ImagenetData, IMAGENET_NUM_TRAIN_IMAGES, IMAGENET_NUM_VAL_IMAGES
from .cifar10 import Cifar10Data, CIFAR10_NUM_TRAIN_IMAGES, CIFAR10_NUM_VAL_IMAGES
def create_dataset(data_dir, data_name, data_subset):
"""Create a Dataset instance based on data_dir and data_name.
"""
supported_datasets = {
'synthetic': SyntheticData,
'imagenet': ImagenetData,
'cifar10': Cifar10Data,
}
if not data_dir:
data_name = 'synthetic'
if data_name is None:
for supported_name in supported_datasets:
if supported_name in data_dir.lower():
data_name = supported_name
break
if data_name is None:
raise ValueError('Could not identify name of dataset. '
'Please specify with --data_name option.')
if data_name not in supported_datasets:
raise ValueError('Unknown dataset. Must be one of %s', ', '.join(
[key for key in sorted(supported_datasets.keys())]))
return supported_datasets[data_name](data_dir, data_subset)
| [
2,
38093,
25609,
28,
198,
2,
15069,
2864,
3362,
8528,
42124,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
28... | 2.884674 | 659 |
import os
import json
import re
import collections
from bs4 import BeautifulSoup
import urllib.request, urllib.parse, urllib.error
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.models import (
MessageEvent, PostbackEvent, TextMessage, TextSendMessage,
TemplateSendMessage, PostbackAction, ButtonsTemplate
)
from linebot.exceptions import (
LineBotApiError, InvalidSignatureError
)
import logging
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
line_bot_api = LineBotApi(os.environ["LINE_CHANNEL_ACCESS_TOKEN"])
handler = WebhookHandler(os.environ["LINE_CHANNEL_SECRET"])
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
302,
198,
11748,
17268,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
11748,
2956,
297,
571,
13,
25927,
11,
2956,
297,
571,
13,
29572,
11,
2956,
297,
571,
13,
18224,
198,
6738,
1627,... | 2.985646 | 209 |
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from PIL import Image
from torch.autograd import Variable
test_transforms = transforms.Compose([transforms.Resize(224),
transforms.ToTensor(),
])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model=torch.load('safety.pth')
model.eval()
to_pil = transforms.ToPILImage()
images, labels = get_random_images(5)
fig=plt.figure(figsize=(10,10))
for ii in range(len(images)):
image = to_pil(images[ii])
index = predict_image(image)
sub = fig.add_subplot(1, len(images), ii+1)
res = int(labels[ii]) == index
sub.set_title(str(classes[index]) + ":" + str(res))
plt.axis('off')
plt.imshow(image)
plt.show() | [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
1330,
6436,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
... | 2.287411 | 421 |
from .economy import *
__title__ = "DiscordEconomy"
__summary__ = "Discord.py, other libs, and forks(pycord, nextcord etc.) extension to create economy easily."
__uri__ = "https://github.com/Nohet/DiscordEconomy"
__version__ = "1.3.2"
__author__ = "Nohet"
__email__ = "igorczupryniak503@gmail.com"
__license__ = "MIT License"
__copyright__ = f"Copyright 2021 {__author__}"
| [
6738,
764,
13926,
88,
1330,
1635,
201,
198,
201,
198,
834,
7839,
834,
796,
366,
15642,
585,
28489,
88,
1,
201,
198,
834,
49736,
834,
796,
366,
15642,
585,
13,
9078,
11,
584,
9195,
82,
11,
290,
43378,
7,
9078,
66,
585,
11,
1306,
... | 2.565789 | 152 |
import os, re
if "CONDA_PREFIX" in os.environ:
# fix compilation in conda env
if "CUDA_HOME" not in os.environ and os.path.exists(
os.path.join(os.environ["CONDA_PREFIX"], "bin/nvcc")):
print("Detected CONDA_PREFIX containing nvcc but no CUDA_HOME, "
"setting CUDA_HOME=${CONDA_PREFIX}.")
os.environ["CUDA_HOME"] = os.environ["CONDA_PREFIX"]
if "CXX" in os.environ and os.environ["CXX"].startswith(os.environ["CONDA_PREFIX"]):
for FLAG in ["CXXFLAGS", "DEBUG_CXXFLAGS"]:
if FLAG in os.environ and " -std=" in os.environ[FLAG]:
print("Detected CONDA compiler with default std flags set. "
"Removing them to avoid compilation problems.")
os.environ[FLAG] = re.sub(r' -std=[^ ]*', '', os.environ[FLAG])
from setuptools import setup
import torch
from torch.utils import cpp_extension
import glob
ext_modules = [
cpp_extension.CppExtension(
"splatting.cpu",
["cpp/splatting.cpp"],
),
]
if torch.cuda.is_available():
ext_modules.append(
cpp_extension.CUDAExtension(
"splatting.cuda",
["cuda/splatting_cuda.cpp", "cuda/splatting.cu"],
),
)
setup(
name="splatting",
ext_modules=ext_modules,
cmdclass={"build_ext": cpp_extension.BuildExtension},
packages=["splatting"],
install_requires=["torch"],
extras_require={
"dev": ["pytest", "pytest-cov", "pre-commit"]
},
)
| [
11748,
28686,
11,
302,
198,
361,
366,
10943,
5631,
62,
47,
31688,
10426,
1,
287,
28686,
13,
268,
2268,
25,
198,
220,
220,
220,
1303,
4259,
23340,
287,
1779,
64,
17365,
198,
220,
220,
220,
611,
366,
43633,
5631,
62,
39069,
1,
407,
... | 2.133903 | 702 |
#!/usr/bin/env python3
# NOTE: you will need to make sure that you have opencv installed for the confidence argument to work with pyautogui
import pyautogui
import subprocess
import sys
import time
from collections import Counter, namedtuple
card_image_paths = ["../screenshots/" + card + ".png" for card in
["ace",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
"ten",
"jack",
"queen",
"king"]]
# Returns the card that is most likely to be in the given region, where it is, and the
# confidence level that it was found at
if __name__ == "__main__":
print("Scanning screen for piles, this may take a while")
# Calling screenshot once here is much faster than calling locateOnScreen() repeatedly
scr = pyautogui.screenshot()
# This just does a rough initial scan: we do some basic things to clean it up, but it
# doesn't have to be perfect, as we'll scan the cards more closely in a bit
card_positions = {}
for num, img_path in enumerate(card_image_paths, start=1):
card_positions[num] = []
for new_pos in pyautogui.locateAll(needleImage=img_path, haystackImage=scr, grayscale=True, confidence=0.90):
add_pos = True
# Try to avoid adding overlapping regions
for existing_pos in card_positions[num]:
if overlaps(new_pos, existing_pos):
add_pos = False
if num == 12: # "Q" looks a lot like the "0" in "10"
for ten_pos in card_positions[10]:
if overlaps(new_pos, ten_pos):
add_pos = False
if add_pos:
card_positions[num].append(new_pos)
# Some numbers (6, 8, 9, 10) look like themselves or other numbers upside-down, so we
# remove the farthest-down ones
for num in range(1, 14):
while len(card_positions[num]) > 4:
max_y = 0
for pos in card_positions[num]:
max_y = max(max_y, pos.top)
card_positions[num] = [pos for pos in card_positions[num] if pos.top != max_y]
# Find the coordinates of the piles
min_x = 100000
max_x = 0
min_y = 100000
max_y = 0
for card, positions in card_positions.items():
for pos in positions:
min_x = min(min_x, pos.left)
max_x = max(max_x, pos.left)
min_y = min(min_y, pos.top)
max_y = max(max_y, pos.top)
diff_x = max_x - min_x # The distance between pile 0 and pile 3
diff_x /= 3 # The average distance between each pile
# Experimentally-derived adjustment: the "farthest right" number is a little too far
diff_x = int(diff_x * 0.99)
diff_y = max_y - min_y # The distance between the top card and the bottom card
diff_y /= 12 # The average distance between each card in the pile
diff_y = int(diff_y)
# Note that each pile has the bottom-most card first
piles = [[], [], [], []]
# Here we try to read the individual cards more carefully
for card_idx in range(13):
for pile_idx in range(len(piles)):
x_coord = min_x + diff_x*pile_idx
y_coord = min_y + diff_y*card_idx
# Experimentally-derived magic numbers that seem to work well
left = int(x_coord - (diff_x*0.06))
width = int(diff_x*0.24)
top = int(y_coord - (diff_y*0.24))
height = int(diff_y*1.02)
Box = namedtuple("Box", ["left", "top", "width", "height"])
search_box = Box(left, top, width, height)
# Find the most likely candidates for each card, based on our early rough scan
# If a card has several candidates, use the one the computer's most confident in
# If there aren't any candidates, we'll search for every number
possibilities = set()
for card, positions in card_positions.items():
for position in positions:
if overlaps(search_box, position):
possibilities.add(card)
# # For debugging: move the cursor in a box around the card we're trying to ID
# # top-left
# pyautogui.moveTo(x=left, y=top, duration=pyautogui.MINIMUM_DURATION)
# # top-right
# pyautogui.moveTo(x=left+width, y=top, duration=pyautogui.MINIMUM_DURATION)
# # bottom-right
# pyautogui.moveTo(x=left+width, y=top+height, duration=pyautogui.MINIMUM_DURATION)
# # bottom-left
# pyautogui.moveTo(x=left, y=top+height, duration=pyautogui.MINIMUM_DURATION)
# # top-left again
# pyautogui.moveTo(x=left, y=top, duration=pyautogui.MINIMUM_DURATION)
piles[pile_idx].append(mostLikely(scr, search_box, possibilities))
for card_idx in range(13):
for pile_idx in range(len(piles)):
print("{:2d} ".format(piles[pile_idx][card_idx][0]), end='')
print()
while True:
print("If these piles look right to you, hit enter. Otherwise, input corrections in form 'col row corrected_card'")
cmd = input("> ")
if len(cmd) == 0:
break
cmd = cmd.split()
piles[int(cmd[0])][int(cmd[1])][0] = int(cmd[2])
print("Corrected piles:")
for card_idx in range(13):
for pile_idx in range(len(piles)):
print("{:2d} ".format(piles[pile_idx][card_idx][0]), end='')
print()
# Convert the piles list to a string that we can give to the C++ solver program
in_str = ""
for pile in piles:
for card in pile:
in_str += str(card[0])
in_str += "\n"
print("Finding optimal solution")
commands = subprocess.run("./mf83_main", input=in_str, text=True, capture_output=True).stdout
print("Entering solution")
for command in commands.split():
if command == "-":
time.sleep(0.1) # Have to wait for the cards to move on the screen
next_stack_pos = pyautogui.locateCenterOnScreen("../screenshots/next_stack.png", confidence=0.8)
if next_stack_pos:
click(next_stack_pos)
else:
input("Couldn't find next stack button, hit enter after you've clicked it")
else:
pile_idx = int(command)
card_coords = piles[pile_idx][-1][1]
click(card_coords)
piles[pile_idx] = piles[pile_idx][:-1]
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
24550,
25,
345,
481,
761,
284,
787,
1654,
326,
345,
423,
1280,
33967,
6589,
329,
262,
6628,
4578,
284,
670,
351,
12972,
2306,
519,
9019,
198,
11748,
12972,
2306,
519,
901... | 2.239153 | 2,881 |
#! python3
# delUneededFiles.py - Walks through a folder tree and searches for
# exceptionally large files or folders—say, ones
# that have a file size of more than 100MB. Print
# these files with their absolute path to the
# screen.
# Adam Pellot
import os
import shutil
print('Enter the path of the folder you would like to use:')
folder = input()
# Walk the entire folder tree and search files and folders for large files.
for foldername, subfolders, filenames in os.walk(folder):
for subfolder in subfolders:
filePath = os.path.join(foldername, subfolder)
if os.path.getsize(filePath) > 100000000:
print(os.path.abspath(subfolder))
for filename in filenames:
filePath = os.path.join(foldername, filename)
if os.path.getsize(filePath) > 100000000:
print(os.path.abspath(filename))
| [
2,
0,
21015,
18,
198,
2,
1619,
52,
27938,
25876,
13,
9078,
532,
6445,
591,
832,
257,
9483,
5509,
290,
15455,
329,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.460526 | 380 |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from .base_dataset import BaseDataset
class NIHChestDataset(BaseDataset):
"""
Data loader for NIH data set.
Args:
directory: Base directory for data set.
split: String specifying split.
options include:
'all': Include all splits.
'train': Include training split.
label_list: String specifying labels to include. Default is 'all',
which loads all labels.
transform: A composible transform list to be applied to the data.
"""
@staticmethod
| [
37811,
198,
15269,
357,
66,
8,
3203,
11,
3457,
13,
290,
663,
29116,
13,
198,
198,
1212,
2723,
2438,
318,
11971,
739,
262,
17168,
5964,
1043,
287,
262,
198,
43,
2149,
24290,
2393,
287,
262,
6808,
8619,
286,
428,
2723,
5509,
13,
198,
... | 2.86532 | 297 |
import os
import sys
import argparse
import utils
if __name__ == '__main__':
main()
| [
11748,
28686,
201,
198,
11748,
25064,
201,
198,
11748,
1822,
29572,
201,
198,
11748,
3384,
4487,
201,
198,
201,
198,
201,
198,
201,
198,
201,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
201,
198,
220,
220,
220,
1388... | 2.266667 | 45 |
from datasimulator.graph import Graph
from dictionaryutils import dictionary
| [
6738,
19395,
320,
8927,
13,
34960,
1330,
29681,
198,
6738,
22155,
26791,
1330,
22155,
628
] | 5.2 | 15 |
from htk.test_scaffold.models import TestScaffold
from htk.test_scaffold.tests import BaseTestCase
from htk.test_scaffold.tests import BaseWebTestCase
from htk.constants import *
####################
# Finally, import tests from subdirectories last to prevent circular import
from htk.lib.tests import *
from htk.scripts.tests import *
| [
6738,
289,
30488,
13,
9288,
62,
1416,
2001,
727,
13,
27530,
1330,
6208,
3351,
2001,
727,
198,
6738,
289,
30488,
13,
9288,
62,
1416,
2001,
727,
13,
41989,
1330,
7308,
14402,
20448,
198,
6738,
289,
30488,
13,
9288,
62,
1416,
2001,
727,
... | 3.414141 | 99 |
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core import mail
from django.conf import settings
from django.test.utils import override_settings
from django.utils.encoding import force_text
from rest_framework import status
from .test_base import BaseAPITestCase
class APITestCase1(TestCase, BaseAPITestCase):
"""
Case #1:
- user profile: defined
- custom registration: backend defined
"""
urls = 'tests.urls'
USERNAME = 'person'
PASS = 'person'
EMAIL = "person1@world.com"
NEW_PASS = 'new-test-pass'
REGISTRATION_VIEW = 'rest_auth.runtests.RegistrationView'
# data without user profile
REGISTRATION_DATA = {
"username": USERNAME,
"password1": PASS,
"password2": PASS
}
REGISTRATION_DATA_WITH_EMAIL = REGISTRATION_DATA.copy()
REGISTRATION_DATA_WITH_EMAIL['email'] = EMAIL
BASIC_USER_DATA = {
'first_name': "John",
'last_name': 'Smith',
'email': EMAIL
}
USER_DATA = BASIC_USER_DATA.copy()
USER_DATA['newsletter_subscribe'] = True
@override_settings(OLD_PASSWORD_FIELD_ENABLED=True)
def test_password_reset_with_invalid_email(self):
"""
Invalid email should not raise error, as this would leak users
"""
get_user_model().objects.create_user(self.USERNAME, self.EMAIL, self.PASS)
# call password reset
mail_count = len(mail.outbox)
payload = {'email': 'nonexisting@email.com'}
self.post(self.password_reset_url, data=payload, status_code=200)
self.assertEqual(len(mail.outbox), mail_count)
@override_settings(
ACCOUNT_EMAIL_VERIFICATION='mandatory',
ACCOUNT_EMAIL_REQUIRED=True
)
| [
6738,
42625,
14208,
13,
7295,
13,
6371,
411,
349,
690,
1330,
9575,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
651,
62,
7220,
62,
19849,
198,
6738,
42625,
14208,
13,
7... | 2.455285 | 738 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['AppService']
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 3.607143 | 112 |
import torch
import time
import random
import torch.utils.data
import torch.nn.functional as F
from dataset import NASBenchDataset, SplitSubet
from sampler import ArchSampler
from .train_utils import * | [
11748,
28034,
198,
11748,
640,
198,
11748,
4738,
198,
11748,
28034,
13,
26791,
13,
7890,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
6738,
27039,
1330,
7210,
44199,
27354,
292,
316,
11,
27758,
7004,
316,
198,
6738,
6072... | 3.690909 | 55 |
#!/usr/bin/env python
# coding: utf-8
# ## Damage and Loss Assessment (12-story RC frame)
#
# This example continues the example2 to conduct damage and loss assessment using the PLoM model and compare the results against the results based on MSA
# ### Run example2
import numpy as np
import random
import time
from math import pi
import pandas as pd
from ctypes import *
import matplotlib.pyplot as plt
import sys
plt.ion()
# ### Import PLoM modules
# In[36]:
sys.path.insert(1, '../../')
from PLoM import *
# ### Load Incremental (IDA) Data
# MSA data are loaded via a comma-separate value (csv) file. The first row contains column names for both predictors (X) and responses (y). The following rows are input sample data. Users are expected to specif the csv filename.
# In[37]:
# Filename
filename = './data/response_frame12_ida_comb.csv'
model = PLoM(model_name='IDA', data=filename, col_header=True, plot_tag=True)
# ### Configuring tasks
# Please specify tasks to run - the list of tasks can be run in sqeunce or invidivdual tasks can be run separately.
# In[38]:
tasks = ['DataNormalization','RunPCA','RunKDE','ISDEGeneration']
# ### Step 0: Scaling the data
# In[39]:
# Configure the task
model.ConfigTasks(['DataNormalization'])
# Launch the run
model.RunAlgorithm()
# ### Step 1: Principal Component Analysis (PCA)
# In[40]:
# Tolerance for truncating principal components
tol_pca = 1e-6
# Configure the task
model.ConfigTasks(['RunPCA'])
# Launch the run
model.RunAlgorithm(epsilon_pca=tol_pca)
# ### Step 2: Kernel Density Estimation (KDE)
# In[41]:
# Smoothing parameter in the KDE
sp = 25
# Configure the task
model.ConfigTasks(['RunKDE'])
# Launch the run
model.RunAlgorithm(epsilon_kde=sp)
# ### Step 3: Create the generator
# In[42]:
# Extra parameters for ISDE generation
new_sample_num_ratio = 20
tol_PCA2 = 1e-5
# Configure the task
model.ConfigTasks(['ISDEGeneration'])
# Launch the run
model.RunAlgorithm(n_mc = new_sample_num_ratio, tol_PCA2 = tol_PCA2)
# ### Step 4: Exporting data
# In[43]:
# Available data list
model.export_results()
# In[44]:
# Pick up the original and new realizations, X0 and X_new
model.export_results(data_list=['/X0','/X_new'], file_format_list=['csv','csv'])
# ### Post-processing
# We would like to check the basic statistics of the input sample (i.e., IDA) and the generated new realizations by PLoM. The key metrics include the median, standard deviation, and correlation coefficient matrix of different structural responses.
# In[45]:
# Load results
df_ida = pd.read_csv('../../RunDir/IDA/DataOut/X0.csv')
df_plom = pd.read_csv('../../RunDir/IDA/DataOut/X_new.csv')
print(df_ida.head)
print(df_plom.head)
# In[46]:
x0 = df_ida.iloc[:,1:].T
x_c = df_plom.iloc[:,1:].T
x_name = x0.index.tolist()
x0 = np.array(x0)
x_c = np.array(x_c)
n = 27
# Correlation coefficient matrix
c_ida = np.corrcoef(x0)
c_plom = np.corrcoef(x_c)
c_combine = c_ida
tmp = np.triu(c_plom).flatten()
tmp = tmp[tmp != 0]
c_combine[np.triu_indices(27)] = tmp
# Plot covariance matrix
fig, ax = plt.subplots(figsize=(8,6))
ctp = ax.contourf(c_combine[3:,3:], cmap=plt.cm.hot, levels=1000)
ctp.set_clim(0,1)
ax.plot([0, 23], [0, 23], 'k--')
ax.set_xticks(list(range(n-3)))
ax.set_yticks(list(range(n-3)))
ax.set_xticklabels(x_name[3:], fontsize=8, rotation=45)
ax.set_yticklabels(x_name[3:], fontsize=8, rotation=45)
ax.set_title('Covariance matrix comparison')
ax.grid()
cbar = fig.colorbar(ctp,ticks=[x/10 for x in range(11)])
plt.show()
# Plot the cross-section of correlation matrix
fig, ax = plt.subplots(figsize=(6,4))
ax.plot([0],[0],'k-',label='MSA')
ax.plot([0],[0],'r:',label='PLoM')
for i in range(n-3):
ax.plot(np.array(range(n-3)),c_ida[i+3][3:],'k-')
ax.plot(np.array(range(n-3)),c_plom[i+3][3:],'r:')
ax.set_xticks(list(range(n-3)))
ax.set_xticklabels(x_name[3:], fontsize=8, rotation=45)
ax.set_ylabel('Correlation coefficient')
ax.set_ylim([0,1])
ax.set_xlim([0,n-4])
ax.legend()
ax.grid()
plt.show()
# ### Hazard Adjustment
# This section can be used to process the PLoM predictions from raw IDA training. Site specific hazard information is needed as an input. An example site hazard csv file is provided, the first column is the Sa intensity, the second column is the median SaRatio, the third column is the median duration, and the last four columns are covariance matrix entries.
# In[47]:
# Load site hazard information
shz = pd.read_csv('./data/site_hazard.csv')
sa_levels = shz['Sa']
print(shz)
print(np.array(shz.iloc[0]['cov11':]).reshape((2,2)))
# In[48]:
# Draw samples from the site distribution
num_rlz = 1000 # sample size
np.random.seed(1) # random seed for replicating results
rlz_imv = []
for i in range(len(shz.index)):
rlz_imv.append(np.random.multivariate_normal(mean=[shz['mSaRatio'][i],shz['mDs'][i]],cov=np.array(shz.iloc[i]['cov11':]).reshape((2,2)),size=num_rlz))
# In[49]:
# Search nearest PLoM data points for each sample in rlz_imv
lnsa_plom = x_c[0]
lnsaratio_plom = x_c[1]
lnds_plom = x_c[2]
# Create the nearest interporator and interpolate data
from scipy.interpolate import NearestNDInterpolator
res_edp = []
for i in range(n-3):
# Loop all EDPs
interp_nn = NearestNDInterpolator(list(zip(lnsa_plom,lnsaratio_plom,lnds_plom)),x_c[3+i])
pred_nn = []
for j in range(len(shz.index)):
# Loop all intensity levels
pred_nn.append(interp_nn(np.ones(rlz_imv[j][:,0].shape)*np.log(shz['Sa'][j]),
rlz_imv[j][:,0],rlz_imv[j][:,1]))
res_edp.append(pred_nn)
fig, ax = plt.subplots(figsize=(6,4))
ax.plot(rlz_imv[0][:,0],rlz_imv[0][:,1],'r.',label='Resample')
plt.show()
# In[50]:
ref_msa = pd.read_csv('./data/response_rcf12_msa_la_nc.csv')
# In[51]:
from sklearn.neighbors import KNeighborsRegressor
neigh = KNeighborsRegressor(n_neighbors=2,weights='distance',algorithm='auto',p=2)
res = []
for i in range(n-3):
# Loop all EDPs
neigh.fit(np.transpose(x_c[0:3]),x_c[i+3])
pred = []
for j in range(len(shz.index)):
# Loop all intensity levels
pred.append(neigh.predict(np.array((np.ones(rlz_imv[j][:,0].shape)*np.log(shz['Sa'][j]),rlz_imv[j][:,0],rlz_imv[j][:,1])).T))
res.append(pred)
# In[52]:
num_story = 12
num_sa = 6
sdr_cur_med_msa = np.zeros((num_story,num_sa))
sdr_cur_std_msa = np.zeros((num_story,num_sa))
sdr_cur_med_plom = np.zeros((num_story,num_sa))
sdr_cur_std_plom = np.zeros((num_story,num_sa))
for i in range(12):
for j in range(6):
sdr_cur_msa = ref_msa.loc[ref_msa['Sa']==shz['Sa'][j]][x_name[i+3][2:]]
sdr_cur_med_msa[i,j] = np.exp(np.mean(np.log(sdr_cur_msa)))
sdr_cur_std_msa[i,j] = np.std(np.log(sdr_cur_msa))
sdr_cur_plom = np.exp(res[i][j])
sdr_cur_med_plom[i,j] = np.exp(np.mean(res[i][j]))
sdr_cur_std_plom[i,j] = np.std(res[i][j])
fig = plt.figure(figsize=(12,8))
story_list = list(range(1,num_story+1))
for i in range(6):
plt.subplot(2,3,i+1)
ax = plt.gca()
ax.plot([0],[0],'k-',label='MSA')
ax.plot([0],[0],'r-',label='PLoM-IDA \nHazard Adjusted')
ax.plot(sdr_cur_med_msa[:,i],story_list,'k-')
ax.plot(sdr_cur_med_msa[:,i]*np.exp(sdr_cur_std_msa[:,i]),story_list,'k--')
ax.plot(sdr_cur_med_msa[:,i]/np.exp(sdr_cur_std_msa[:,i]),story_list,'k--')
ax.plot(sdr_cur_med_plom[:,i],story_list,'r-')
ax.plot(sdr_cur_med_plom[:,i]*np.exp(sdr_cur_std_plom[:,i]),story_list,'r--')
ax.plot(sdr_cur_med_plom[:,i]/np.exp(sdr_cur_std_plom[:,i]),story_list,'r--')
ax.set_xlim(0.0,0.05)
ax.set_ylim(1,12)
ax.grid()
ax.legend()
ax.set_xlabel('$SDR_{max}$ (in/in)')
ax.set_ylabel('Story')
# In[53]:
num_story = 12
num_sa = 6
pfa_cur_med_msa = np.zeros((num_story,num_sa))
pfa_cur_std_msa = np.zeros((num_story,num_sa))
pfa_cur_med_plom = np.zeros((num_story,num_sa))
pfa_cur_std_plom = np.zeros((num_story,num_sa))
for i in range(12):
for j in range(6):
pfa_cur_msa = ref_msa.loc[ref_msa['Sa']==shz['Sa'][j]][x_name[i+15][2:]]
pfa_cur_med_msa[i,j] = np.exp(np.mean(np.log(pfa_cur_msa)))
pfa_cur_std_msa[i,j] = np.std(np.log(pfa_cur_msa))
pfa_cur_plom = np.exp(res[i+12][j])
pfa_cur_med_plom[i,j] = np.exp(np.mean(res[i+12][j]))
pfa_cur_std_plom[i,j] = np.std(res[i+12][j])
fig = plt.figure(figsize=(12,8))
story_list = list(range(1,num_story+1))
for i in range(6):
plt.subplot(2,3,i+1)
ax = plt.gca()
ax.plot([0],[0],'k-',label='MSA')
ax.plot([0],[0],'r-',label='PLoM-IDA \nHazard Adjusted')
ax.plot(pfa_cur_med_msa[:,i],story_list,'k-')
ax.plot(pfa_cur_med_msa[:,i]*np.exp(pfa_cur_std_msa[:,i]),story_list,'k--')
ax.plot(pfa_cur_med_msa[:,i]/np.exp(pfa_cur_std_msa[:,i]),story_list,'k--')
ax.plot(pfa_cur_med_plom[:,i],story_list,'r-')
ax.plot(pfa_cur_med_plom[:,i]*np.exp(pfa_cur_std_plom[:,i]),story_list,'r--')
ax.plot(pfa_cur_med_plom[:,i]/np.exp(pfa_cur_std_plom[:,i]),story_list,'r--')
ax.set_xlim(0.0,1)
ax.set_ylim(1,12)
ax.grid()
ax.legend()
ax.set_xlabel('$PFA$ (g)')
ax.set_ylabel('Story')
# In[54]:
x0_ref = []
for i in range(n):
x0_ref.append([np.log(x) for x in ref_msa.iloc[:, i].values.tolist()])
c_msa = np.corrcoef(x0_ref)
res_conct = []
for i in range(n-3):
tmp = []
for j in range(len(shz.index)):
tmp = tmp+res[i][j].tolist()
res_conct.append(tmp)
c_plom = np.corrcoef(res_conct)
# Plot correlation of resampled data
fig, ax = plt.subplots(figsize=(6,4))
ax.plot([0],[0],'k-',label='MSA')
ax.plot([0],[0],'r:',label='PLoM-IDA (Hazard Adjusted)')
for i in range(n-15):
ax.plot(np.array(range(n-3)),c_msa[i+3][3:],'k-')
ax.plot(np.array(range(n-3)),c_plom[i],'r:')
ax.set_xticks(list(range(n-3)))
ax.set_xticklabels(x_name[3:], fontsize=8, rotation=45)
ax.set_ylabel('Correlation coefficient')
ax.set_ylim([0,1])
ax.set_xlim([0,n-16])
ax.legend()
ax.grid()
plt.show()
fig.savefig('plom_vs_ida_cov.png',dpi=600)
# In[55]:
# Estimation errors
err_med = np.linalg.norm(np.log(sdr_cur_med_plom) - np.log(sdr_cur_med_msa),axis=0)/np.linalg.norm(np.log(sdr_cur_med_msa),axis=0)
err_std = np.linalg.norm(sdr_cur_std_plom - sdr_cur_std_msa,axis=0)/np.linalg.norm(sdr_cur_std_msa,axis=0)
# Plot
fig, ax = plt.subplots(figsize=(6,6))
ax.plot(list(range(6)),err_med,'ko-',label='Mean EDP')
ax.plot(list(range(6)),err_std,'rs-',label='Standard deviation EDP')
ax.set_xticks(list(range(6)))
ax.set_xticklabels(['Sa = '+str(x)+'g' for x in sa_levels],rotation=30)
ax.set_xlim([0,5])
ax.set_ylim([0,1])
ax.set_ylabel('MSE')
ax.grid()
ax.legend()
plt.show()
# Save
np.savetxt('plom_ida.csv',np.exp(np.array(res_conct)).T,delimiter=',')
# Generate uncorrelated samples for comparison
num_uc = 1000 # sample size (per Sa level)
uc_sample = pd.DataFrame()
for j in range(num_sa):
for i in range(num_story):
uc_sample['1-PID-'+str(i+1)+'-1'] = np.exp(np.random.normal(loc=np.log(sdr_cur_med_plom[i,j]),scale=sdr_cur_std_plom[i,j],size=num_uc))
uc_sample['1-PFA-'+str(i+1)+'-1'] = 0.0*np.exp(np.random.normal(loc=np.log(pfa_cur_med_plom[i,j]),scale=pfa_cur_std_plom[i,j],size=num_uc))
uc_sample['1-PRD-1-1'] = uc_sample['1-PID-2-1']
uc_sample.to_csv('plom_ida_uc_s'+str(j+1)+'.csv',index_label='#Num')
# ### Damage and Loss
# This section is going to process the structural damage and loss estimation results. The SDR data are used as the input EDP to pelicun. Lognormal distribution is assumed for the input SDR sample in pelicun. The HAZUS-MH module is used, and the damage model is selected for high-rise concrete moment frame (C1H) with moderate-code design level and the occupancy type of COM1. Comparisons between MSA and PLoM results are made.
# In[11]:
# Damage states
import pandas as pd
df_damage = pd.DataFrame()
for i in range(4):
df_tmp = pd.read_csv('./data/'+'msa_s'+str(i+1)+'/DL_summary.csv')
df_damage['msa-s'+str(i+1)] = df_tmp['highest_damage_state/S'] # extract the structural damage states
df_tmp = pd.read_csv('./data/'+'plom_s'+str(i+1)+'/DL_summary.csv')
df_damage['plom-s'+str(i+1)] = df_tmp['highest_damage_state/S'] # extract the structural damage states
df_tmp = pd.read_csv('./data/'+'plom_uc_s'+str(i+1)+'/DL_summary.csv')
df_damage['plom-uc-s'+str(i+1)] = df_tmp['highest_damage_state/S'] # extract the structural damage states
for i in range(4):
fig, ax = plt.subplots(figsize=(6,4))
ax.hist(df_damage['msa-s'+str(i+1)],bins=5,range=(0.0,4.0),alpha=0.5,label='MSA, mean = '+str(np.round(np.mean(df_damage['msa-s'+str(i+1)]),3)))
ax.hist(df_damage['plom-s'+str(i+1)],bins=5,range=(0.0,4.0),alpha=0.5,label='PLoM, mean = '+str(np.round(np.mean(df_damage['plom-s'+str(i+1)]),3)))
ax.hist(df_damage['plom-uc-s'+str(i+1)],bins=5,range=(0.0,4.0),alpha=0.5,label='PLoM uncorr., mean = '+str(np.round(np.mean(df_damage['plom-uc-s'+str(i+1)]),3)))
ax.set_xlim([0.0,4])
ax.set_xlabel('Structural damage state')
ax.set_ylabel('Num. of realizations')
ax.legend()
ax.grid()
ax.set_title('Non-collapse damage states, Sa = '+str(sa_levels[i])+'g')
plt.show()
# In[12]:
# Expected loss ratios
import pandas as pd
df_loss = pd.DataFrame()
for i in range(4):
df_tmp = pd.read_csv('./data/'+'msa_s'+str(i+1)+'/DL_summary.csv')
df_loss['msa-s'+str(i+1)] = df_tmp['reconstruction/cost'] # extract the structural damage states
df_tmp = pd.read_csv('./data/'+'plom_s'+str(i+1)+'/DL_summary.csv')
df_loss['plom-s'+str(i+1)] = df_tmp['reconstruction/cost'] # extract the structural damage states
df_tmp = pd.read_csv('./data/'+'plom_uc_s'+str(i+1)+'/DL_summary.csv')
df_loss['plom-uc-s'+str(i+1)] = df_tmp['reconstruction/cost'] # extract the structural damage states
for i in range(4):
fig, ax = plt.subplots(figsize=(6,4))
ax.hist(df_loss['msa-s'+str(i+1)],bins=5,range=(0.0,1.0),alpha=0.5,label='MSA, mean = '+str(np.round(np.mean(df_loss['msa-s'+str(i+1)]),3)))
ax.hist(df_loss['plom-s'+str(i+1)],bins=5,range=(0.0,1.0),alpha=0.5,label='PLoM, mean = '+str(np.round(np.mean(df_loss['plom-s'+str(i+1)]),3)))
ax.hist(df_loss['plom-uc-s'+str(i+1)],bins=5,range=(0.0,1.0),alpha=0.5,label='PLoM uncorr., mean = '+str(np.round(np.mean(df_loss['plom-uc-s'+str(i+1)]),3)))
ax.set_xlim([0.0,1])
ax.set_xlabel('Loss ratio')
ax.set_ylabel('Num. of realizations')
ax.legend()
ax.grid()
ax.set_title('Non-collapse loss ratio, Sa = '+str(sa_levels[i])+'g')
plt.show()
# In[ ]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
22492,
8995,
290,
22014,
25809,
357,
1065,
12,
13571,
13987,
5739,
8,
198,
2,
220,
198,
2,
770,
1672,
4477,
262,
1672,
17,
284,
3189,
... | 2.17392 | 6,687 |
from __future__ import division
| [
6738,
11593,
37443,
834,
1330,
7297,
198
] | 4.571429 | 7 |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from exchange import injective_auction_rpc_pb2 as exchange_dot_injective__auction__rpc__pb2
class InjectiveAuctionRPCStub(object):
"""InjectiveAuctionRPC defines gRPC API of the Auction API.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AuctionEndpoint = channel.unary_unary(
'/injective_auction_rpc.InjectiveAuctionRPC/AuctionEndpoint',
request_serializer=exchange_dot_injective__auction__rpc__pb2.AuctionRequest.SerializeToString,
response_deserializer=exchange_dot_injective__auction__rpc__pb2.AuctionResponse.FromString,
)
self.Auctions = channel.unary_unary(
'/injective_auction_rpc.InjectiveAuctionRPC/Auctions',
request_serializer=exchange_dot_injective__auction__rpc__pb2.AuctionsRequest.SerializeToString,
response_deserializer=exchange_dot_injective__auction__rpc__pb2.AuctionsResponse.FromString,
)
self.StreamBids = channel.unary_stream(
'/injective_auction_rpc.InjectiveAuctionRPC/StreamBids',
request_serializer=exchange_dot_injective__auction__rpc__pb2.StreamBidsRequest.SerializeToString,
response_deserializer=exchange_dot_injective__auction__rpc__pb2.StreamBidsResponse.FromString,
)
class InjectiveAuctionRPCServicer(object):
"""InjectiveAuctionRPC defines gRPC API of the Auction API.
"""
def AuctionEndpoint(self, request, context):
"""Provide historical auction info for a given auction
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Auctions(self, request, context):
"""Provide the historical auctions info
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def StreamBids(self, request, context):
"""StreamBids streams new bids of an auction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
# This class is part of an EXPERIMENTAL API.
class InjectiveAuctionRPC(object):
"""InjectiveAuctionRPC defines gRPC API of the Auction API.
"""
@staticmethod
@staticmethod
@staticmethod
| [
2,
2980,
515,
416,
262,
308,
49,
5662,
11361,
8435,
17050,
13877,
13,
8410,
5626,
48483,
0,
198,
37811,
11792,
290,
4382,
6097,
11188,
284,
1237,
672,
3046,
12,
23211,
2594,
526,
15931,
198,
11748,
1036,
14751,
198,
198,
6738,
5163,
1... | 2.431217 | 1,134 |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webapp2
import jinja2
import os
import time
import json
import xml.etree.ElementTree as ET
from google.appengine.api.files.file import listdir as ls
from google.appengine.api import mail
template_dir = os.path.join(os.path.dirname(__file__), "pages")
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
app = webapp2.WSGIApplication([
('/', mainpageHandler),
('/documents', show_documents),
], debug=True)
| [
2,
15069,
1584,
3012,
3457,
13,
201,
198,
2,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
201,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
201,... | 2.83 | 400 |
from Voicelab.pipeline.Node import Node
import parselmouth
from parselmouth.praat import call
from Voicelab.toolkits.Voicelab.VoicelabNode import VoicelabNode
from Voicelab.toolkits.Voicelab.MeasureFormantNode import MeasureFormantNode
from Voicelab.VoicelabWizard.VoicelabTab import VoicelabTab
from Voicelab.VoicelabWizard.F1F2PlotWindow import F1F2PlotWindow
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import io
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.patches import Ellipse
import matplotlib.transforms as transforms
from numpy import random
from scipy.spatial import distance
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from scipy import stats
###################################################################################################
# F1F2PlotNode
# WARIO pipeline node for estimating the vocal tract of a voice.
###################################################################################################
# ARGUMENTS
# 'voice' : sound file generated by parselmouth praat
# 'state' : saves formant data for each voice for processing in end method
###################################################################################################
# RETURNS : an unnamed string to keep the pipeline running
# : saves an image file to disk: 'f1f2.png
###################################################################################################
def hz_to_bark(hz):
"""
This function converts Hz to Bark.
Parameters
----------
hz is the frequency in Hz
Returns
-------
bark is the frequency in bark
"""
bark = 7 * np.log(hz / 650 + np.sqrt(1 + (hz / 650) ** 2))
return bark
def gather_data():
"""
This function collects data from Peterson & Barney 1952 from Praat there is no input for the function.
Returns
-------
peterson_barney a pandas dataframe that also includes bark measures using hz_to_bark function
"""
peterson_barney = call("Create formant table (Peterson & Barney 1952)")
peterson_barney = pd.read_csv(io.StringIO(call(peterson_barney, "List", True)), sep='\t', header=0).dropna()
peterson_barney['F1 Bark'] = hz_to_bark(peterson_barney['F1'])
peterson_barney['F2 Bark'] = hz_to_bark(peterson_barney['F2'])
return peterson_barney
| [
6738,
20687,
291,
417,
397,
13,
79,
541,
4470,
13,
19667,
1330,
19081,
198,
11748,
1582,
741,
14775,
198,
6738,
1582,
741,
14775,
13,
79,
430,
265,
1330,
869,
198,
6738,
20687,
291,
417,
397,
13,
25981,
74,
896,
13,
42144,
291,
417,... | 3.159292 | 791 |
import abc
import functools
import hashlib
import itertools
import logging
import os
import typing as t
from pathlib import Path
from .cache import CacheManager, ENABLE_CACHING
from .core import PkgFile
from .pkg_helpers import (
normalize_pkgname,
is_listed_path,
guess_pkgname_and_version,
)
if t.TYPE_CHECKING:
from .config import _ConfigCommon as Configuration
log = logging.getLogger(__name__)
PathLike = t.Union[str, os.PathLike]
def write_file(fh: t.BinaryIO, destination: PathLike) -> None:
"""write a byte stream into a destination file. Writes are chunked to reduce
the memory footprint
"""
chunk_size = 2**20 # 1 MB
offset = fh.tell()
try:
with open(destination, "wb") as dest:
for chunk in iter(lambda: fh.read(chunk_size), b""):
dest.write(chunk)
finally:
fh.seek(offset)
def digest_file(file_path: PathLike, hash_algo: str) -> str:
"""
Reads and digests a file according to specified hashing-algorith.
:param file_path: path to a file on disk
:param hash_algo: any algo contained in :mod:`hashlib`
:return: <hash_algo>=<hex_digest>
From http://stackoverflow.com/a/21565932/548792
"""
blocksize = 2**16
digester = hashlib.new(hash_algo)
with open(file_path, "rb") as f:
for block in iter(lambda: f.read(blocksize), b""):
digester.update(block)
return f"{hash_algo}={digester.hexdigest()}"
PkgFunc = t.TypeVar("PkgFunc", bound=t.Callable[..., t.Iterable[PkgFile]])
| [
11748,
450,
66,
198,
11748,
1257,
310,
10141,
198,
11748,
12234,
8019,
198,
11748,
340,
861,
10141,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
19720,
355,
256,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
764,
23870,
1330,
... | 2.482428 | 626 |
import argparse
from pathlib import Path
from lib.photobooth import Photobooth, PhotoPrinter, RandomStaticPhoto
if __name__ == "__main__":
main()
| [
11748,
1822,
29572,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
6738,
9195,
13,
38611,
672,
5226,
1330,
5919,
672,
5226,
11,
5555,
6836,
3849,
11,
14534,
45442,
6191,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
... | 3.142857 | 49 |
"""Processor for the latent graph
In the original paper the processor is described as
The Processor iteratively processes the 256-channel latent feature data on the icosahedron grid
using 9 rounds of message-passing GNNs. During each round, a node exchanges information with itself
and its immediate neighbors. There are residual connections between each round of processing.
"""
import torch
from graph_weather.models.layers.graph_net_block import GraphProcessor
class Processor(torch.nn.Module):
"""Processor for latent graphD"""
def __init__(
self,
input_dim: int = 256,
edge_dim: int = 256,
num_blocks: int = 9,
hidden_dim_processor_node=256,
hidden_dim_processor_edge=256,
hidden_layers_processor_node=2,
hidden_layers_processor_edge=2,
mlp_norm_type="LayerNorm",
):
"""
Latent graph processor
Args:
input_dim: Input dimension for the node
edge_dim: Edge input dimension
num_blocks: Number of message passing blocks
hidden_dim_processor_node: Hidden dimension of the node processors
hidden_dim_processor_edge: Hidden dimension of the edge processors
hidden_layers_processor_node: Number of hidden layers in the node processors
hidden_layers_processor_edge: Number of hidden layers in the edge processors
mlp_norm_type: Type of norm for the MLPs
one of 'LayerNorm', 'GraphNorm', 'InstanceNorm', 'BatchNorm', 'MessageNorm', or None
"""
super().__init__()
# Build the default graph
# Take features from encoder and put into processor graph
self.input_dim = input_dim
self.graph_processor = GraphProcessor(
num_blocks,
input_dim,
edge_dim,
hidden_dim_processor_node,
hidden_dim_processor_edge,
hidden_layers_processor_node,
hidden_layers_processor_edge,
mlp_norm_type,
)
def forward(self, x: torch.Tensor, edge_index, edge_attr) -> torch.Tensor:
"""
Adds features to the encoding graph
Args:
x: Torch tensor containing node features
edge_index: Connectivity of graph, of shape [2, Num edges] in COO format
edge_attr: Edge attribues in [Num edges, Features] shape
Returns:
torch Tensor containing the values of the nodes of the graph
"""
out, _ = self.graph_processor(x, edge_index, edge_attr)
return out
| [
37811,
18709,
273,
329,
262,
41270,
4823,
198,
198,
818,
262,
2656,
3348,
262,
12649,
318,
3417,
355,
198,
198,
464,
32893,
11629,
9404,
7767,
262,
17759,
12,
17620,
41270,
3895,
1366,
319,
262,
14158,
8546,
704,
1313,
10706,
198,
3500,... | 2.501927 | 1,038 |
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import binascii
import fnmatch
import pathlib
import re
from typing import Type, List
from commit_validation.commit_validation import Commit, CommitValidator, SOURCE_FILE_EXTENSIONS, EXCLUDED_VALIDATION_PATTERNS, VERBOSE
class CrcValidator(CommitValidator):
"""A file-level validator that makes sure a file does not contain an invalid CRC"""
def get_validator() -> Type[CrcValidator]:
"""Returns the validator class for this module"""
return CrcValidator
| [
2,
198,
2,
15069,
357,
66,
8,
25767,
669,
284,
262,
4946,
513,
35,
7117,
4935,
13,
198,
2,
1114,
1844,
6634,
290,
5964,
2846,
3387,
766,
262,
38559,
24290,
379,
262,
6808,
286,
428,
6082,
13,
198,
2,
198,
2,
30628,
55,
12,
34156... | 3.341463 | 205 |
resistor = {'black':0, 'brown':1, 'red':2, 'orange':3, 'yellow':4, 'green': 5, 'blue':6, 'violet':7, 'grey':8, 'white':9}
first = str(resistor[input()])
if first == 0:
first = ''
second = str(resistor[input()])
if second == 0:
second = ''
answer = int(first + second)
third = 10**resistor[input()]
if answer == '':
print(0)
else:
print(answer * third)
| [
411,
32380,
796,
1391,
6,
13424,
10354,
15,
11,
705,
33282,
10354,
16,
11,
705,
445,
10354,
17,
11,
705,
43745,
10354,
18,
11,
705,
36022,
10354,
19,
11,
705,
14809,
10354,
642,
11,
705,
17585,
10354,
21,
11,
705,
85,
19194,
10354,
... | 2.424837 | 153 |
'''
Problem Name: Grade The Steel
Problem Code: FLOW014
Problem Type: https://www.codechef.com/problems/school
Problem Link: https://www.codechef.com/problems/FLOW014
Solution Link: https://www.codechef.com/viewsolution/46845982
'''
from sys import stdin, stdout
if __name__ == '__main__':
t = int(stdin.readline())
main(t)
| [
7061,
6,
198,
220,
220,
220,
20647,
6530,
25,
22653,
383,
7851,
198,
220,
220,
220,
20647,
6127,
25,
9977,
3913,
28645,
198,
220,
220,
220,
20647,
5994,
25,
3740,
1378,
2503,
13,
19815,
721,
258,
69,
13,
785,
14,
1676,
22143,
14,
... | 2.51773 | 141 |
from pytest_mock import MockerFixture
from pipert2.core.base.synchronise_routines.synchroniser_node import SynchroniserNode
| [
6738,
12972,
9288,
62,
76,
735,
1330,
337,
12721,
37,
9602,
198,
6738,
279,
9346,
83,
17,
13,
7295,
13,
8692,
13,
28869,
11413,
786,
62,
81,
448,
1127,
13,
28869,
11413,
5847,
62,
17440,
1330,
16065,
11413,
5847,
19667,
628,
628,
62... | 2.954545 | 44 |
#! /usr/bin/env python
# coding=utf-8
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import cv2
import numpy as np
import core.utils as utils
import tensorflow as tf
import re
from PIL import Image
import xml.etree.ElementTree as ET
from xml.etree import ElementTree # 导入ElementTree模块
return_elements = ["input/input_data:0", "pred_sbbox/concat_2:0", "pred_mbbox/concat_2:0",
"pred_lbbox/concat_2:0"]
pb_file = "./yolov3_bee.pb"
dirpath = './VOC2007/JPEGImages/'
xmlpath = './VOC2007/Annotations/'
if __name__ == '__main__':
main()
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
11748,
28686,
198,
418,
13,
268,
2268,
17816,
10234,
62,
8697,
47,
62,
23678,
62,
25294,
62,
2538,
18697,
20520,
11639,
17,
6,
198,
11748,
... | 2.193916 | 263 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# export.py - Exports enumerated data for reachable nodes into a JSON file.
#
# Copyright (c) Addy Yeow Chin Heng <ayeowch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Exports enumerated data for reachable nodes into a JSON file.
"""
import json
import logging
import os
import sys
import time
from binascii import hexlify, unhexlify
from ConfigParser import ConfigParser
from utils import new_redis_conn
REDIS_CONN = None
CONF = {}
def get_row(node):
"""
Returns enumerated row data from Redis for the specified node.
"""
# address, port, version, user_agent, timestamp, services
node = eval(node)
address = node[0]
port = node[1]
services = node[-1]
height = REDIS_CONN.get('height:{}-{}-{}'.format(address, port, services))
if height is None:
height = (0,)
else:
height = (int(height),)
hostname = REDIS_CONN.hget('resolve:{}'.format(address), 'hostname')
hostname = (hostname,)
geoip = REDIS_CONN.hget('resolve:{}'.format(address), 'geoip')
if geoip is None:
# city, country, latitude, longitude, timezone, asn, org
geoip = (None, None, 0.0, 0.0, None, None, None)
else:
geoip = eval(geoip)
return node + height + hostname + geoip
MAX_DUMPED_SNAPSHOTS = 500
def export_nodes(nodes, timestamp):
"""
Merges enumerated data for the specified nodes and exports them into
timestamp-prefixed JSON file.
"""
rows = []
start = time.time()
for node in nodes:
row = get_row(node)
rows.append(row)
end = time.time()
elapsed = end - start
logging.info("Elapsed: %d", elapsed)
dump = os.path.join(CONF['export_dir'], "{}.json".format(timestamp))
open(dump, 'w').write(json.dumps(rows, encoding="latin-1"))
REDIS_CONN.lpush('dumped_snapshots', timestamp)
REDIS_CONN.ltrim('dumped_snapshots', 0, MAX_DUMPED_SNAPSHOTS)
logging.info("Wrote %s", dump)
def init_conf(argv):
"""
Populates CONF with key-value pairs from configuration file.
"""
conf = ConfigParser()
conf.read(argv[1])
CONF['logfile'] = conf.get('export', 'logfile')
CONF['magic_number'] = unhexlify(conf.get('export', 'magic_number'))
CONF['db'] = conf.getint('export', 'db')
CONF['debug'] = conf.getboolean('export', 'debug')
CONF['export_dir'] = conf.get('export', 'export_dir')
if not os.path.exists(CONF['export_dir']):
os.makedirs(CONF['export_dir'])
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
10784,
13,
9078,
532,
1475,
3742,
27056,
515,
1366,
329,
3151,
540,
13760,
656,
257,
19449,
2393,
13,
198,... | 2.776834 | 1,295 |
#!/usr/bin/python3
import os
import csv
import sys
import argparse
from statistics import median
from collections import OrderedDict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("datafile", type=str, help="name of file with employees data")
parser.add_argument("-g", "--group_size", type=int, help="count of skills in skills group")
args = parser.parse_args()
defined = {k: v for k, v in vars(args).items() if v is not None}
report = Report(**defined)
report.write_report()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
6738,
7869,
1330,
14288,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
628,
198,
198,
361,
11593,
3672... | 3 | 181 |
import mongoengine.fields as fields
from base.db.mongo import MongoBaseModel, Decimal2Field
| [
11748,
285,
25162,
18392,
13,
25747,
355,
7032,
198,
6738,
2779,
13,
9945,
13,
76,
25162,
1330,
42591,
14881,
17633,
11,
4280,
4402,
17,
15878,
628,
628
] | 3.518519 | 27 |
import os
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
engine = create_engine(os.environ['DATABASE_URL_NOTION'])
DBSession = sessionmaker(bind=engine)
session = DBSession()
if not engine.dialect.has_table(engine, 'users'):
Base.metadata.create_all(engine)
| [
11748,
28686,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
283,
876,
62,
8692,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
34142,
11,
10903,
198,
6738,
... | 3.06383 | 141 |
from django.apps import AppConfig | [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934
] | 4.125 | 8 |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from math import radians
if __name__ == '__main__':
DrawASquare() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
686,
2777,
88,
198,
6738,
22939,
62,
907,
14542,
13,
19662,
1330,
44088,
198,
6738,
10688,
1330,
2511,
1547,
198,
220,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
83... | 2.75 | 52 |
"""
Exports the trained networks to the renderer
"""
import sys
import os
sys.path.insert(0, os.getcwd())
import torch
import h5py
import argparse
import io
from typing import Union
from collections import OrderedDict
from tests.volnet.network import InputParametrization, OutputParametrization, SceneNetwork
from diffdvr.utils import renderer_dtype_torch
import pyrenderer
if __name__ == '__main__':
__main()
| [
37811,
198,
3109,
3742,
262,
8776,
7686,
284,
262,
9851,
11882,
198,
37811,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
1136,
66,
16993,
28955,
198,
198,
11748,
28034,
198,
11748,
... | 3.248062 | 129 |
#!/usr/bin/env python
import glob, os, argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=str, help="input FASTA")
parser.add_argument("-o", "--output", type=str, help="output files path and prefix")
args = parser.parse_args()
fastas = glob.glob(args.input)
header_list = []
seq_dict = {}
len_dict = {}
for f in fastas:
seq_dict[f] = {}
with open(f, "r") as ifile:
line = ifile.readline()
seq = ""
while line != "":
if line[0] == ">":
if line not in header_list:
header_list.append(line)
header = line
seq = ""
line = ifile.readline()
while line != "" and line[0] != ">":
seq += line.strip()
line = ifile.readline()
seq_dict[f][header] = seq
len_dict[f] = len(seq)
print(header_list)
for f in fastas:
buffer = ""
for rec in header_list:
if rec in seq_dict[f]:
buffer += F"{rec}{seq_dict[f][rec]}\n"
else:
buffer += F"{rec}{'-'*len_dict[f]}\n"
out_name = ".." + f.strip(".fasta") + "_sorted.fasta"
with open(out_name, "w") as ofile:
ofile.write(buffer)
char_sum = 0
for i in len_dict:
print(F"{i} {len_dict[i]}")
char_sum += len_dict[i]
### Make PHYLIP file for RAxML
buffer = F"{len(header_list)} {char_sum}\n"
fastas.sort()
for i in header_list:
buffer += F"{i.strip().strip('>')} "
for locus in fastas:
if i in seq_dict[locus]:
buffer += F"{external_unknown(seq_dict[locus][i])}"
else:
buffer += F"{'N'*len_dict[locus]}"
buffer += "\n"
with open(F"{args.output}.phy", "w") as ofile:
ofile.write(buffer)
with open(F"{args.output}_part_file.txt", "w") as ofile:
start = 1
for locus in fastas:
out_locus = locus.split("/")[-1].strip("aligned.").strip("_trimmed.fasta")
if "SSU-LSU." in out_locus:
out_locus = out_locus.split("SSU-LSU.")[1]
if "1870" in out_locus:
out_locus = "XDH"
stop = start + len_dict[locus] - 1
ofile.write(F"DNA, {out_locus} = {start}-{stop}\n")
start = stop + 1
### Make NEXUS for Mr. Bayes
max_len = 0
for i in header_list:
if len(header_list) > max_len:
max_len = len(header_list)
buffer = F"#NEXUS\n\nBEGIN DATA;\n\tDIMENSIONS NTAX={len(header_list)} NCHAR={char_sum};\n"
buffer += F"\tFORMAT DATATYPE = DNA GAP = - MISSING = ?;\n\tMATRIX\n"
for i in header_list:
head_len = i.strip().strip('>')
buffer += F"\t{head_len + (max_len-len(head_len))*' '}"
for locus in fastas:
if i in seq_dict[locus]:
buffer += F"{external_unknown(seq_dict[locus][i], char='?')}"
else:
buffer += F"{'?'*len_dict[locus]}"
buffer += "\n"
buffer += "\n;\n\nEND;\n\nbegin mrbayes;\n\tset autoclose=yes nowarn=yes; \n\n\n"
out_locus_list = []
start = 1
for locus in fastas:
out_locus = locus.split("/")[-1].strip("aligned.").strip("_trimmed.fasta")
if "SSU-LSU." in out_locus:
out_locus = out_locus.split("SSU-LSU.")[1]
if "1870" in out_locus:
out_locus = "XDH"
stop = start + len_dict[locus] - 1
buffer += F"\tcharset {out_locus} = {start} - {stop};\n"
out_locus_list.append(out_locus)
start = stop + 1
buffer += F"\tpartition currentPartition = {len(out_locus_list)}: {', '.join(out_locus_list)};\n"
buffer += F"\tset partition = currentPartition;\n"
buffer += F"\tlset applyto=({str(list(range(1,len(out_locus_list) +1)))[1:-1]});\n\n"
buffer += "\tlset nst = 6 rates=invgamma;\n\tunlink statefreq=(all) revmat=(all) shape=(all) pinvar=(all);\n"
buffer += "\tprset applyto=(all) ratepr=variable;\n\tmcmcp ngen= 10000000 relburnin=yes burninfrac=0.25 printfreq=1000 samplefreq=1000 nchains=4 savebrlens=yes;\n"
buffer += "\tmcmc;\n\tsumt;\nend;\n"
with open(F"{args.output}.nex", "w") as ofile:
ofile.write(buffer)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
15095,
11,
28686,
11,
1822,
29572,
198,
198,
48610,
796,
1822,
29572,
13,
28100,
1713,
46677,
3419,
198,
48610,
13,
2860,
62,
49140,
7203,
12,
72,
1600,
366,
438,
15414,
1600,
2... | 2.019 | 2,000 |
from query_filter_builder.sql.sql_filters import convert_to_sql_with_params
simple_obj = {
"version": 0.1,
"filters": [
{
"col": "col1",
"value": "asd",
},
{
"col": "col2",
"value": "~asd",
},
{
"col": "col3",
"value": "<42>=40"
},
{
"col": "col4",
"value": [1, 2, 3]
}
]
}
nested_obj = {
"version": 0.1,
"join": "AND",
"filters": [
{
"col": "col1",
"negate": False,
"value": "asd"
},
{
"col": "col2",
"value": "~asd",
},
{
"col": "col3",
"value": [1, 2, 3]
},
{
"col": "col4",
"value": "<5>=3.2"
},
{
"join": "OR",
"filters": [
{
"col": "col5",
"negate": True,
"value": "negate this value"
},
{
"col": "col6",
"value": "~something like this"
}
]
}
]
}
| [
6738,
12405,
62,
24455,
62,
38272,
13,
25410,
13,
25410,
62,
10379,
1010,
1330,
10385,
62,
1462,
62,
25410,
62,
4480,
62,
37266,
198,
198,
36439,
62,
26801,
796,
1391,
198,
220,
220,
220,
366,
9641,
1298,
657,
13,
16,
11,
198,
220,
... | 1.440936 | 855 |
# ------------------------------------------------------------------------
# StonePaperPi
# Please review ReadMe for instructions on how to build and run the program
#
# (c) 2022 by Balaji
# MIT License
# paper
# scissor
# stone
# pip install lobe
# pip3 install --extra-index-url https://google-coral.github.io/py-repo/ tflite_runtime
# --------------------------------------------------------------------------
#Random is used for PC's gameplay
import random
import time
from lobe import ImageModel
import cv2
import os
# Load Lobe TF model
# --> Change model file path as needed
currentDir = os.path.dirname(__file__)
print(currentDir)
os.chdir(currentDir)
modelPath = currentDir+"\\..\\lobe\\model"
model = ImageModel.load(modelPath)
# Take Photo
# Identify prediction and turn on appropriate LED
# Main Function
cam = cv2.VideoCapture(0)
cv2.namedWindow("StonePaperPiv2")
while True:
gameplay = ['paper','stone','scissor']
computer = random.choice(gameplay)
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
cv2.imshow("StonePaperPiv2", frame)
KeyboardInput = cv2.waitKey(1)
if KeyboardInput%256 == 27:
# ESC pressed
print("Escape hit, closing...")
cv2.destroyAllWindows()
cam.release()
break
elif KeyboardInput%256 == 32:
# SPACE pressed
img_name = "StonePapaerPiv2.png"
cv2.imwrite(img_name, frame)
print("{} Saved!".format(img_name))
# Run photo through Lobe TF model
ml_result = model.predict_from_file('C:\\Users\\Balaji\\Documents\\GitHub\\stonepaperpi\\Windows\\main\\StonePapaerPiv2.png')
# --> Change image path
ml_predict(ml_result.prediction)
# Pulse status light
game(computer,ml_result.prediction)
time.sleep(1)
| [
2,
16529,
982,
198,
2,
8026,
42950,
38729,
198,
2,
4222,
2423,
4149,
5308,
329,
7729,
319,
703,
284,
1382,
290,
1057,
262,
1430,
198,
2,
220,
198,
2,
357,
66,
8,
33160,
416,
8528,
26436,
198,
2,
17168,
13789,
198,
2,
3348,
198,
... | 2.753754 | 666 |
# Lint as: python3
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for xls.dslx.fuzzer.ast_generator."""
import random
from xls.dslx import fakefs_test_util
from xls.dslx import parser_helpers
from xls.dslx import typecheck
from xls.dslx.fuzzer import ast_generator
from xls.dslx.span import PositionalError
from absl.testing import absltest
if __name__ == '__main__':
absltest.main()
| [
2,
406,
600,
355,
25,
21015,
18,
198,
2,
198,
2,
15069,
12131,
3012,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
1... | 3.232639 | 288 |
# Исключения
try: # в блоке try пишется код который потенциально приводит к ошибке
n = int(input())
import lalala
raise TypeError('Что-то пошло не так') # выбрасывается исключение
except ValueError: # except можно типизировать т.е. написать какие ошибки отлавливать
print('Не число!!')
except ImportError as e: # показывать описание ошибки
print(e)
except (ImportError, TypeError) as a: # выводит кортеж
print(a)
# в самом конце писать более общие ошибки | [
2,
12466,
246,
21727,
31583,
30143,
141,
236,
141,
229,
16843,
22177,
18849,
40623,
198,
198,
28311,
25,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
12466,
110,
12466,
109,
30... | 1.246269 | 402 |
import autogalaxy as ag
from autoarray.mock.fixtures import *
from autofit.mock.mock_search import MockSamples, MockSearch
from autogalaxy.plot.mat_wrap.lensing_include import Include1D, Include2D
#
# MODEL #
#
# PROFILES #
# GALAXY #
# Plane #
# GALAXY DATA #
# GALAXY FIT #
# HYPER GALAXIES #
| [
11748,
1960,
519,
282,
6969,
355,
556,
201,
198,
6738,
8295,
18747,
13,
76,
735,
13,
69,
25506,
1330,
1635,
201,
198,
6738,
1960,
1659,
270,
13,
76,
735,
13,
76,
735,
62,
12947,
1330,
44123,
50,
12629,
11,
44123,
18243,
201,
198,
... | 1.716599 | 247 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'calculator.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5178,
7822,
7560,
422,
3555,
334,
72,
2393,
705,
9948,
3129,
1352,
13,
9019,
6,
198,
2,
198,
2,
15622,
416,
25,
9485,
48,
83,
20,
12454,
2438,
17301,
642,
... | 2.816092 | 87 |
"""
Tests for the archive state
"""
import os
import pathlib
import shutil
import tempfile
import textwrap
import attr
import pytest
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
try:
import zipfile # pylint: disable=unused-import
HAS_ZIPFILE = True
except ImportError:
HAS_ZIPFILE = False
pytestmark = [
pytest.mark.windows_whitelisted,
]
@attr.s(frozen=True, slots=True)
@pytest.fixture(scope="module")
@pytest.fixture(params=[True, False], ids=unicode_filename_ids)
@pytest.mark.skip_if_binaries_missing("tar")
def test_tar_pack(archive, unicode_filename):
"""
Validate using the tar function to create archives
"""
with Archive("tar", unicode_filename=unicode_filename) as arch:
ret = archive.tar("-cvf", str(arch.archive), sources=str(arch.src))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
@pytest.mark.skip_if_binaries_missing("tar")
def test_tar_unpack(archive, unicode_filename):
"""
Validate using the tar function to extract archives
"""
with Archive("tar", unicode_filename=unicode_filename) as arch:
ret = archive.tar("-cvf", str(arch.archive), sources=str(arch.src))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
ret = archive.tar("-xvf", str(arch.archive), dest=str(arch.dst))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
@pytest.mark.skip_if_binaries_missing("tar")
def test_tar_list(archive, unicode_filename):
"""
Validate using the tar function to list archives
"""
with Archive("tar", unicode_filename=unicode_filename) as arch:
ret = archive.tar("-cvf", str(arch.archive), sources=str(arch.src))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
ret = archive.list(str(arch.archive))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
@pytest.mark.skip_if_binaries_missing("gzip")
def test_gzip(archive, unicode_filename):
"""
Validate using the gzip function
"""
with Archive("gz", unicode_filename=unicode_filename) as arch:
ret = archive.gzip(str(arch.src_file), options="-v")
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret, file_only=True)
@pytest.mark.skip_if_binaries_missing("gzip", "gunzip")
def test_gunzip(archive, unicode_filename):
"""
Validate using the gunzip function
"""
with Archive("gz", unicode_filename=unicode_filename) as arch:
ret = archive.gzip(str(arch.src_file), options="-v")
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret, file_only=True)
ret = archive.gunzip(str(arch.src_file) + ".gz", options="-v")
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret, file_only=True)
@pytest.mark.skip_if_binaries_missing("zip")
def test_cmd_zip(archive, unicode_filename):
"""
Validate using the cmd_zip function
"""
with Archive("zip", unicode_filename=unicode_filename) as arch:
ret = archive.cmd_zip(str(arch.archive), str(arch.src))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
@pytest.mark.skip_if_binaries_missing("zip", "unzip")
def test_cmd_unzip(archive, unicode_filename):
"""
Validate using the cmd_unzip function
"""
with Archive("zip", unicode_filename=unicode_filename) as arch:
ret = archive.cmd_zip(str(arch.archive), str(arch.src))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
ret = archive.cmd_unzip(str(arch.archive), str(arch.dst))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
@pytest.mark.skipif(not HAS_ZIPFILE, reason="Cannot find zipfile python module")
def test_zip(archive, unicode_filename):
"""
Validate using the zip function
"""
with Archive("zip", unicode_filename=unicode_filename) as arch:
ret = archive.zip(str(arch.archive), str(arch.src))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
@pytest.mark.skipif(not HAS_ZIPFILE, reason="Cannot find zipfile python module")
def test_unzip(archive, unicode_filename):
"""
Validate using the unzip function
"""
with Archive("zip", unicode_filename=unicode_filename) as arch:
ret = archive.zip(str(arch.archive), str(arch.src))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
ret = archive.unzip(str(arch.archive), str(arch.dst))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret, unix_sep=False)
@pytest.mark.skip_if_binaries_missing("rar")
def test_rar(archive, unicode_filename):
"""
Validate using the rar function
"""
with Archive("rar", unicode_filename=unicode_filename) as arch:
ret = archive.rar(str(arch.archive), str(arch.src))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
@pytest.mark.skip_if_binaries_missing("rar", "unrar")
def test_unrar(archive, unicode_filename):
"""
Validate using the unrar function
"""
with Archive("rar", unicode_filename=unicode_filename) as arch:
ret = archive.rar(str(arch.archive), str(arch.src))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
ret = archive.unrar(str(arch.archive), str(arch.dst))
assert isinstance(ret, list)
arch.assert_artifacts_in_ret(ret)
| [
37811,
198,
51,
3558,
329,
262,
15424,
1181,
198,
37811,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
4423,
346,
198,
11748,
20218,
7753,
198,
11748,
2420,
37150,
198,
198,
11748,
708,
81,
198,
11748,
12972,
9288,
198,
11748,
... | 2.527488 | 2,201 |
# Generated by Django 2.0.8 on 2018-12-07 11:17
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
23,
319,
2864,
12,
1065,
12,
2998,
1367,
25,
1558,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
#!/usr/bin/env python3
# If a user has staretd using a temp card this job will remove the card from the card field
import xmlrpc.client
import sys
host="http://localhost:9191/rpc/api/xmlrpc" # If not localhost then this address will need to be whitelisted in PaperCut
auth="atoken" # Value defined in advanced config property "auth.webservices.auth-token". Should be random
proxy = xmlrpc.client.ServerProxy(host)
cardDatabase = [ # List of the tempcards
"1234",
"2345",
"3456",
]
for card in cardDatabase:
username = proxy.api.lookUpUserNameByCardNo(auth, card)
print("Looking up card {}".format(card))
if len(username) > 0:
if proxy.api.getUserProperty(auth, username, "primary-card-number") == card:
print("Removing card number {} from primary card field for user {}".format(card, username))
proxy.api.setUserProperty(auth, username, "primary-card-number", "")
elif proxy.api.getUserProperty(auth, username, "secondary-card-number") == card:
print("Removing card number {} from secondary card field for user {}".format(card, username))
proxy.api.setUserProperty(auth, username, "secondary-card-number", "")
else:
print("Error can't find card number")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
201,
198,
201,
198,
2,
1002,
257,
2836,
468,
24170,
8671,
1262,
257,
20218,
2657,
428,
1693,
481,
4781,
262,
2657,
422,
262,
2657,
2214,
201,
198,
201,
198,
11748,
35555,
81,
14751,
1... | 2.696281 | 484 |
import torch
torch.backends.cudnn.benchmark = True
from torch.distributions import Normal
import numpy as np
import os
from core.network import Network
from core.optimizer import Optimizer
from core.buffer import RolloutBuffer
from .base import BaseAgent
class REINFORCE(BaseAgent):
"""REINFORCE agent.
Args:
state_size (int): dimension of state.
action_size (int): dimension of action.
hidden_size (int): dimension of hidden unit.
network (str): key of network class in _network_dict.txt.
head (str): key of head in _head_dict.txt.
optim_config (dict): dictionary of the optimizer info.
gamma (float): discount factor.
use_standardization (bool): parameter that determine whether to use standardization for return.
run_step (int): the number of total steps.
lr_decay: lr_decay option which apply decayed weight on parameters of network.
device (str): device to use.
(e.g. 'cpu' or 'gpu'. None can also be used, and in this case, the cpu is used.)
"""
@torch.no_grad()
| [
11748,
28034,
198,
198,
13165,
354,
13,
1891,
2412,
13,
66,
463,
20471,
13,
26968,
4102,
796,
6407,
198,
6738,
28034,
13,
17080,
2455,
507,
1330,
14435,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
198,
6738,
4755,
13,
... | 2.826425 | 386 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import time
import dbus # NOQA: F801
from dbus.mainloop.glib import DBusGMainLoop
ccd_regex = re.compile('(.*)?CCD.*')
temp = 20
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nExiting the program, bye!')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
302,
198,
11748,
640,
198,
198,
11748,
288,
10885,
220,
1303,
8005,
48,
32,
25,
376,
41531,
198,
6738,
288... | 2.262411 | 141 |
import ast
import smart_open
import pandas as pd
from im_tutorials.utilities import eval_cols, double_eval
def arxiv_papers(year=2017):
'''arxiv_papers
Get arXiv papers csv for a single year and return as dataframe.
Args:
year (`int`): Year of the dataset.
Returns:
arxiv_df (`pd.DataFrame`): Parsed dataframe of arXiv papers.
'''
bucket='innovation-mapping-tutorials'
key='arxiv_{}/arxiv_{}.csv'.format(year, year)
arxiv_df = pd.read_csv(
smart_open.smart_open('https://s3.us-east-2.amazonaws.com/{}/{}'.format(bucket, key)),
index_col=0,
converters={
'authors': double_eval,
},
parse_dates=['created'],
)
arxiv_df['year_created'] = arxiv_df['created'].dt.year
arxiv_df['category_ids'] = arxiv_df['category_ids'].str.split(',')
return arxiv_df
| [
11748,
6468,
198,
11748,
4451,
62,
9654,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
545,
62,
83,
44917,
82,
13,
315,
2410,
1330,
5418,
62,
4033,
82,
11,
4274,
62,
18206,
628,
198,
4299,
610,
87,
452,
62,
40491,
7,
1941,... | 2.189873 | 395 |
"""Generic functions for huntsman-drp."""
from contextlib import suppress
from datetime import datetime
from dateutil.parser import parse as parse_date_dateutil
def parse_date(object):
"""
Parse a date as a `datetime.datetime`.
Args:
object (Object): The object to parse.
Returns:
A `datetime.datetime` object.
"""
with suppress(AttributeError):
object = object.strip("(UTC)")
if type(object) is datetime:
return object
return parse_date_dateutil(object)
def date_to_ymd(object):
"""
Convert a date to YYYY:MM:DD format.
Args:
object (Object): An object that can be parsed using `parse_date`.
Returns:
str: The converted date.
"""
date = parse_date(object)
return date.strftime('%Y-%m-%d')
def current_date():
"""Returns the UTC time now as a `datetime.datetime` object."""
return datetime.utcnow()
def current_date_ymd():
"""
Get the UTC date now in YYYY-MM-DD format.
Returns:
str: The date.
"""
date = current_date()
return date_to_ymd(date)
| [
37811,
46189,
5499,
329,
42984,
805,
12,
7109,
79,
526,
15931,
198,
6738,
4732,
8019,
1330,
18175,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
3128,
22602,
13,
48610,
1330,
21136,
355,
21136,
62,
4475,
62,
4475,
22602,
628,
198,... | 2.547344 | 433 |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
"""Modelling an abstract group
Yet to be documented
"""
import sys
import re
import warnings
from copy import deepcopy
from random import sample, randint
from numbers import Integral, Number
from mmgroup.structures.parse_atoms import eval_atom_expression
from mmgroup.structures.parity import Parity
####################################################################
####################################################################
### Class AbstractGroup and helpers for that class
####################################################################
####################################################################
####################################################################
### Class AbstractGroupWord
####################################################################
class AbstractGroupWord(object):
"""Model an element of an abstract group.
Users should not refer to this class directly. They should create
a group as an instance of subclass of class AbstractGroup and use
the methods of that group for creating elements.
The standard group operations '*' '/' (=right multiplication with
the inverse) and '**' are implemented here.
g1 ** g2 means g2**(-1) * g1 * g2 for group elements g1, g2.
Here a group is an instance of (a subclass of) class AbstractGroup.
For each word a group 'g' should be passed as a keyword argument
'group = g'. If a class of type 'AbstractGroup' contains one
instance only, the corresponding subclass of this class may
contain a class attribute 'group' referring to that group. Then
the user may contruct elements of that group using the
constructor of that subclass of this class.
"""
__slots__ = "group"
# There is no need to modify an methods below this line.
# You should overwrite the corresonding methods in the
# subclasses of class AbstractGroup insead.
def copy(self):
"""Return a deep copy of the group element"""
return self.group.copy_word(self)
def __imul__(self, other):
"""Implementation of the group multiplication"""
g = self.group
return g._imul(self, g._to_group(other))
def __mul__(self, other):
"""Implementation of the group multiplication"""
g = self.group
try:
return g._imul(g.copy_word(self), g._to_group(other))
except (TypeError, NotImplementedError) as exc:
try:
myself = other.group._to_group(self)
return myself.__imul__(other)
except:
raise exc
def __rmul__(self, other):
"""Implementation of the reverse group multiplication"""
g = self.group
if isinstance(other, Parity):
return other
try:
return g._imul(g._to_group(other), self)
except (TypeError, NotImplementedError) as exc:
try:
myself = other.group._to_group(self)
return other.__imul__(myself)
except:
raise exc
def __itruediv__(self, other):
"""Implementation of the group division
Here self / other means self * other**(-1) .
"""
g = self.group
return g._imul(self, g._invert(g._to_group(other)))
def __truediv__(self, other):
"""Implementation of the group division
Here self / other means self * other**(-1) .
"""
g = self.group
return g._imul(g.copy_word(self), g._invert(g._to_group(other)))
def __rtruediv__(self, other):
"""Implementation of the reverse group division
Here self / other means self * other**(-1) .
"""
g = self.group
return g._imul(g.copy_word(g._to_group(other)), g._invert(self))
def __pow__(self, exp):
"""Implementation of the power operation
This is exponentiation for integer eponents and conjugation
if the exponent is a group element.
"""
g = self.group
if isinstance(exp, Integral):
if exp > 0:
res, start = g.copy_word(self), self
elif exp == 0:
return g.neutral()
else:
start, exp = g._invert(self), -exp
res = g.copy_word(start)
for i in range(int(exp).bit_length() - 2, -1, -1):
res = g._imul(res, res)
if exp & (1 << i):
res = g._imul(res, start)
return res
elif isinstance(exp, AbstractGroupWord):
e = self.group._to_group(exp)
return g._imul(g._imul(g._invert(e), self), e)
elif isinstance(exp, Parity):
one = self.group.neutral()
if self * self == one:
return self if other.value & 1 else one
raise ValueError("Group element has not order 1 or 2")
else:
return NotImplemented
def reduce(self, copy = False):
"""Reduce a group element
If group elements are implemented as words, some functions
may produce unreduced words. This function reduces the
group element in place.
Note that all operators return reduced words. Functions return
reduced words unless stated otherwise. However, reducing all
words representing the same group element to the same word may
be beyond the capabilties of a program.
If ``copy`` is set then a reduced copy of the element is
returned, in case that the input element is not already
reduced.
"""
return self.group.reduce(self, copy)
def str(self):
"""Represent group element as a string"""
try:
return self.group.str_word(self)
except NotImplementedError:
return super(AbstractGroupWord, str)()
__repr__ = str
def as_tuples(self):
"""Convert group element to a list of tuples
For a group element ``g`` the following should hold:
``g.group.word(*g.as_tuples()) == g`` .
So passing the tuples in the list returned by this method
as arguments to ``g.group`` or to ``g.group.word``
reconstructs the element ``g``.
This shows how to convert a group element to a list of tuples
and vice versa.
"""
return self.group.as_tuples(self)
####################################################################
### Class AbstractGroup
####################################################################
class AbstractGroup(object):
"""Model an abstract group"""
word_type = AbstractGroupWord # type of an element (=word) in the group
def __init__(self, *data, **kwds):
"""Creating instances is only possible for concrete groups
"""
pass
### The following methods must be overwritten ####################
def __call__(self, *args):
"""Convert args to group elements and return their product
"""
raise NotImplementedError
def _imul(self, g1, g2):
"""Return product g1 * g2 of group elements g1 and g2.
g1 may be destroyed but not g2.
This method is called for elements g1 and g2 of the group
'self' only. It should return the reduced product.
"""
raise NotImplementedError("No multiplication in abstract group")
def _invert(self, g1):
"""Return inverse g1**(-1) of group element g1.
g1 must not be destroyed.
This method is called for elements g1 of the group
'self' only. It should return the reduced inverse.
"""
raise NotImplementedError("No inversion in abstract group")
### The following methods should be overwritten ###################
def copy_word(self, g1):
"""Return deep copy of group element ``g1``"""
g_copy = deepcopy(g1)
# Even a deep copy of an element is still in the same group
g_copy.group = g1.group
return g_copy
def _equal_words(self, g1, g2):
"""Return True iff elements g1 and g2 are equal
This method is called for elements g1 and g2 of the group
'self' only.
In concrete group this method should be overwritten with
a comparison of the relevant attributes of g1 and g2.
Caution:
Non-reduced words may be considered unequal even if they
represent the same element. Use g1.reduce() or g1 * 1 to
obtain the reduced form of g1. See method reduce() for
details.
"""
return g1 == g2
def reduce(self, g, copy = False):
"""Reduce the word ``g`` which is an element of the group
We assume that the representation of a group element
is not always given in a unique form that we call
the reduced form.
This method tries to achieve this goal. Group elements
are reduced by any operator, except for the
``==`` and ``!=`` operators.
For test purposes, is is useful to obtain a group
element in non-reduced form. Applications should
create reduced group elements only.
One way to obtain avoid reduction is to call method
``word()`` of this class with elements separated by
commas. Then no reduction takes place across the factors
separated by commas.
If argument ``copy`` is True, a reduced copy of ``g``
should be returned if ``g`` is not reduced.
"""
return g
def as_tuples(self, g):
"""Convert group element ``g`` to a list of tuples.
The returned tuple should represent a reduced word.
The sequence::
l = g.group.as_tuples(g)
g1 = g.group(*l)
should compute a group element ``g1`` with ``g1 == g``.
"""
raise NotImplementedError("Abstract method")
def str_word(self, g):
"""Convert group atom ``g`` to a string
For an element ``g`` of this group ``g.group.str_word(g)``
should be equivalent to ``g.str()``.
"""
raise NotImplementedError
### The following methods need not be overwritten #################
def neutral(self):
"""Return neutral element of the group"""
return self.__call__()
def _to_group(self, g):
"""Convert the object ``g`` to an element of this group
This function tries the conversions on ``g``. This function
is applied in a group operation.
"""
if isinstance(g, AbstractGroupWord) and g.group == self:
return g
if g == 1:
return self.neutral()
err = "Cannot convert type '%s' object to group element"
raise TypeError(err % type(g))
### The following methods should not be overwritten ###############
def __contains__(self, other):
"""Return True iff 'other' is an element of the group"""
try:
if not isinstance(other, self.word_type):
return False
if other.group != self:
return False
return True
except:
return False
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
198,
6738,
11593,
37443,
834,
1330,
220,
28000,
1098,
62,
17201,
874,
628,
198,
37811,
5841,
9417,
281,
12531,
1448,
198,
198,
11486,
284,
307,
12395,
198,
... | 2.585222 | 4,412 |
# Copyright 2022 The Symanto Research Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
| [
2,
15069,
33160,
383,
15845,
14723,
4992,
4816,
46665,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789... | 3.93125 | 160 |
# Copyright [2018] [Sunayu LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
'''
This execution module is used to find files with differing mode, user, and/or group from
their rpm packages and reset them. Two public commands are defined:
disa_stig7.get_files
This will just list files identified as having differing mode, user, or group from their rpm package.
disa_stig7.reset_files
This will first identify files identified as having differeing mode, user, or group from their rpm
package then reset them.
'''
log = logging.getLogger(__name__)
file_pkg_lookup = {}
| [
2,
220,
220,
15069,
685,
7908,
60,
685,
16012,
323,
84,
11419,
60,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
220,
345,
743,
407,
779,
428,
2393,
2... | 3.540625 | 320 |
"""Initial Migration
Revision ID: df19fc248886
Revises:
Create Date: 2020-05-04 14:05:37.920674
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'df19fc248886'
down_revision = None
branch_labels = None
depends_on = None
| [
37811,
24243,
36991,
198,
198,
18009,
1166,
4522,
25,
47764,
1129,
16072,
1731,
3459,
4521,
198,
18009,
2696,
25,
220,
198,
16447,
7536,
25,
12131,
12,
2713,
12,
3023,
1478,
25,
2713,
25,
2718,
13,
37128,
45385,
198,
198,
37811,
198,
... | 2.754902 | 102 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# $File: mongo.py
# $Date: Fri Feb 14 20:24:26 2014 +0800
# $Author: Xiaoyu Liu <i[at]vuryleo[dot]com>
"""database connections"""
from mongoengine import connect
import config
connect(config.DATABASE_NAME)
# vim: foldmethod=marker
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
720,
8979,
25,
285,
25162,
13,
9078,
198,
2,
720,
10430,
25,
19480,
3158,
1478,
1160,
25,
1731,
25,
2075,
19... | 2.504425 | 113 |
import click as ck
import pandas as pd
import gzip
@ck.command()
@ck.option(
'--data-file', '-df', default='data/swissprot_exp_annots.pkl',
help='Data file generated by uni2pandas script')
@ck.option(
'--inter-file', '-if', default=f'data/protein.links.full.v11.0.txt.gz',
help='Data file with interactions from STRING DB')
@ck.option(
'--out-file', '-of', default='data/swissprot_interactions.pkl',
help='Result file with a list of proteins, sequences, annotations and interactions')
if __name__ == '__main__':
main()
| [
11748,
3904,
355,
269,
74,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
308,
13344,
198,
198,
31,
694,
13,
21812,
3419,
198,
31,
694,
13,
18076,
7,
198,
220,
220,
220,
705,
438,
7890,
12,
7753,
3256,
705,
12,
7568,
3256,
4277... | 2.661836 | 207 |
from __future__ import annotations
from sqlalchemy.orm import RelationshipProperty
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.base import (
ONETOMANY,
MANYTOONE,
MANYTOMANY,
)
from sqlalchemy.orm.dynamic import DynaLoader
try:
# Python 3.9+
from functools import cache
except ImportError:
# Python 3.8
from functools import lru_cache as cache
from jessiql.sainfo.names import model_name
from jessiql.typing import SAModelOrAlias, SAAttribute
from jessiql import exc
# region: Relation Attribute types
@cache
@cache
@cache
# endregion
# region Relation Attribute info
@cache
@cache
# endregion
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
39771,
21746,
198,
6738,
44161,
282,
26599,
13,
579,
13,
1078,
7657,
1330,
42410,
276,
33682,
198,
6738,
44161,
282,
26599,
13,
579,
13,
8692,
1330,... | 2.87234 | 235 |
# -*- coding: utf-8 -*-
from adm import views
from django.urls import path
app_name = "adm"
urlpatterns = [
# Processo
path('instaurar/', views.criar_processo_adm, name='criar_processo_adm'),
path('listar/', views.listar_adm, name='listar_adm'),
path('listar/ajax/', views.processos_adm_ajax, name='processos_adm_ajax'),
path('<int:pk>/detalhes/', views.detalhe_processo_adm, name='detalhe_processo_adm'),
path('<int:pk>/editar/', views.editar_processo_adm, name='editar_processo_adm'),
path('<int:pk>/extrato_administrativo.pdf', views.extrato_pdf_adm, name='extrato_pdf_adm'),
path('vincular_processos/<int:pk>/', views.vincular_processos, name='vincular_processos'),
# Ato: Expedir ofício
path('<int:pk>/expedir-oficio/<int:tipo_ato>/adicionar/', views.add_ofinterno_adm, name='add_ofinterno_adm'),
path('expedir-oficio/<int:pk>/editar/', views.editar_ofinterno_adm, name='editar_ofinterno_adm'),
path('expedir-oficio/<int:pk>/editar_arquivo/', views.editar_ofinterno_arq_adm, name='editar_ofinterno_arq_adm'), # noqa: E501
path('expedir-oficio/<int:pk>/confirmacao/', views.editar_confirmacao_adm, name='editar_confirmacao_adm'),
path('expedir-oficio/<int:pk>/data-envio/', views.editar_dataenvio_adm, name='editar_dataenvio_adm'),
# Ato: Ofício Recebido
path('<int:pk>/oficio-externo/<int:tipo_ato>/adicionar/', views.add_ofexterno_adm, name='add_ofexterno_adm'),
path('oficio-externo/<int:pk>/editar/', views.editar_ofexterno_adm, name='editar_ofexterno_adm'),
# Ato: Ofícios para empresas
path('<int:pk>/oficio-empresas/<int:tipo_ato>/adicionar/', views.add_ofempresas, name='add_ofempresas'),
path('oficio-empresas/<int:pk>/arquivo/upload/', views.ofempresas_upload_arquivo, name='ofempresas_upload_arquivo'),
path('oficio-empresas/<int:pk>/confirmar/', views.ofempresas_confirmar, name='ofempresas_confirmar'),
path('oficio-empresas/<int:pk>/editar/', views.ofempresas_editar, name='ofempresas_editar'),
# Ato: Despacho
path('<int:pk>/despacho/add/<int:tipo_ato>/', views.add_despacho_adm, name='add_despacho_adm'),
path('despacho/<int:pk>/editar/', views.editar_despacho_adm, name='editar_despacho_adm'),
# Ato: Status
path('<int:pk>/status/add/<int:tipo_ato>/', views.add_status_adm, name='add_status_adm'),
path('status/<int:pk>/editar/', views.editar_status_adm, name='editar_status_adm'),
# Ato: Mídia
path('<int:pk>/gravacao/add/<int:tipo_ato>/', views.add_gravacao_adm, name='add_gravacao_adm'),
path('<int:pk>/documento/add/<int:tipo_ato>/', views.add_documento_adm, name='add_documento_adm'),
# Ato: Documentos Gerais
path('documento/<int:pk>/editar/', views.editar_documento_adm, name='editar_documento_adm'),
# Ato: Seleção & Permissão de usuários
path('<int:pk>/selecionar_user_permitidos/', views.select_user_adm, name='select_user_adm'),
path('<int:pk>/selecionar_user_externos_permitidos/', views.add_external_users_adm, name='add_external_users_adm'),
path('<int:pk>/perm_user/', views.select_perm_adm, name='select_perm_adm'),
# Ato: Ações
path('anular/<int:pk>', views.anular_ato, name='anular_ato'),
]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
6178,
1330,
5009,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
1324,
62,
3672,
796,
366,
324,
76,
1,
198,
198,
6371,
33279,
82,
796,
685,
... | 2.119601 | 1,505 |
#!/usr/bin/python
"""
Calculate Fisher matrix and P(k) constraints for all redshift bins for a given
experiment.
"""
import numpy as np
import pylab as P
import scipy.spatial, scipy.integrate, scipy.interpolate
from scipy.integrate import simps
import radiofisher as rf
from radiofisher.units import *
from radiofisher.experiments import USE, foregrounds
from mpi4py import MPI
comm = MPI.COMM_WORLD
myid = comm.Get_rank()
size = comm.Get_size()
################################################################################
# Set-up experiment parameters
################################################################################
# Define cosmology and experiment settings
survey_name = "ExptA"
root = "output/" + survey_name
# Planck 2015 base_plikHM_TTTEEE_lowTEB_post_BAO
cosmo = {
'omega_M_0': 0.3108,
'omega_lambda_0': 0.6892,
'omega_b_0': 0.04883,
'omega_HI_0': 4.86e-4,
'N_eff': 3.046,
'h': 0.6761,
'ns': 0.96708,
'sigma_8': 0.8344,
'w0': -1.,
'wa': 0.,
'mnu': 0.,
'k_piv': 0.05,
'aperp': 1.,
'apar': 1.,
'bHI0': 0.677,
'sigma_nl': 1e-8, #7., # FIXME
'mnu': 0.,
'gamma': 0.55,
'foregrounds': foregrounds,
}
# Experimental setup A
expt = {
'mode': 'idish', # Interferometer or single dish
'Ndish': 32**2, # No. of dishes
'Nbeam': 1, # No. of beams
'Ddish': 10., # Single dish diameter [m]
'Tinst': 10.*(1e3), # Receiver temp. [mK]
'survey_dnutot': 1000., # Total bandwidth of *entire* survey [MHz]
'survey_numax': 1420., # Max. freq. of survey
'dnu': 0.2, # Bandwidth of single channel [MHz]
'Sarea': 2.*np.pi, # Total survey area [radians^2]
'epsilon_fg': 1e-14, # Foreground amplitude
'ttot': 43829.*HRS_MHZ, # Total integration time [MHz^-1]
'nu_line': 1420.406, # Rest-frame freq. of emission line [MHz]
'epsilon_fg': 1e-12, # FG subtraction residual amplitude
'use': USE # Which constraints to use/ignore
}
def baseline_dist(Nx, Ny, Ddish, nu=1420.):
"""
Creates interpolation function for (circularised) baseline density n(d),
assuming a regular grid.
"""
# Generate regular grid
y = np.arange(Ny, step=Ddish)
x, y = np.meshgrid( np.arange(Nx) * Ddish,
np.arange(Ny) * Ddish )
# Calculate baseline separations
d = scipy.spatial.distance.pdist(np.column_stack((x.flatten(),
y.flatten())) ).flatten()
# Calculate FOV and sensible uv-plane bin size
Ndish = Nx * Ny
l = 3e8 / (nu*1e6)
fov = 180. * 1.22 * (l/Ddish) * (np.pi/180.)**2.
du = 1. / np.sqrt(fov) # 1.5 / ...
# Remove D < Ddish baselines
d = d[np.where(d > Ddish)] # Cut sub-FOV baselines
d /= l # Rescale into u = d / lambda
# Calculate bin edges
imax = int(np.max(d) / du) + 1
edges = np.linspace(0., imax * du, imax+1)
edges = np.arange(0., imax * du, 1.)# FIXME
print edges[1] - edges[0]
# Calculate histogram (no. baselines in each ring of width du)
bins, edges = np.histogram(d, edges)
u = np.array([ 0.5*(edges[i+1] + edges[i])
for i in range(edges.size-1) ]) # Centroids
# Convert to a density, n(u)
nn = bins / (2. * np.pi * u * du)
# Integrate n(u) to find norm. (should give 1 if no baseline cuts used)
norm = scipy.integrate.simps(2.*np.pi*nn*u, u)
#print "n(u) renorm. factor:", 0.5 * Ndish * (Ndish - 1) / norm
# Convert to freq.-independent expression, n(x) = n(u) * nu^2,
# where nu is in MHz.
n_x = nn * nu**2.
x = u / nu
# Plot n(u) as a fn. of k_perp
kperp = 2.*np.pi*u / (0.5*(2733 + 1620.)) # @ avg. of z = 0.42, 0.78
P.plot(kperp, n_x / 900.**2., lw=1.8, color='r')
#P.xscale('log')
P.ylabel("$n(u)$", fontsize=18)
P.xlabel(r"$k_\perp$ ${\rm Mpc}^{-1}$", fontsize=18)
P.gca().tick_params(axis='both', which='major', labelsize=20, size=8.,
width=1.5, pad=8.)
P.gca().tick_params(axis='both', which='minor', labelsize=20, size=5.,
width=1.5, pad=8.)
P.tight_layout()
P.show()
exit()
return scipy.interpolate.interp1d(x, n_x, kind='linear',
bounds_error=False, fill_value=0.)
# Set baseline density
expt['n(x)'] = baseline_dist(32, 32, 10.) # Interferometer antenna density
# Define redshift bins
dat = np.genfromtxt("slosar_background_zlow.dat").T
zmin = dat[0]
bias = dat[4]
#zs = np.concatenate((zmin, [zmin[1] - zmin[0],]))
#zc = 0.5 * (zs[:-1] + zs[1:])
# Single bin between 800 - 1000 MHz
zs = np.array([1420./1000. - 1., 1420./800. - 1.])
zc = 0.5 * (zs[:-1] + zs[1:])
# Define kbins (used for output)
kbins = np.arange(0., 5.*cosmo['h'], 0.1*cosmo['h']) # Bins of 0.1 h/Mpc
################################################################################
# Precompute cosmological functions and P(k)
cosmo_fns = rf.background_evolution_splines(cosmo)
# Load P(k) and split into smooth P(k) and BAO wiggle function
k_in, pk_in = np.genfromtxt("slosar_pk_z0.dat").T # Already in non-h^-1 units
cosmo['pk_nobao'], cosmo['fbao'] = rf.spline_pk_nobao(k_in, pk_in)
cosmo['k_in_max'] = np.max(k_in)
cosmo['k_in_min'] = np.min(k_in)
# Switch-off massive neutrinos, fNL, MG etc.
mnu_fn = None
transfer_fn = None
Neff_fn = None
switches = []
H, r, D, f = cosmo_fns
################################################################################
# Compare Anze's functions with the ones we calculate internally
################################################################################
"""
# Distance, r(z) [Mpc]
zz = dat[0]
P.plot(zz, dat[1], 'b-', lw=1.8)
P.plot(zz, (1.+zz)*r(zz), 'y--', lw=1.8)
# Growth (normalised to 1 at z=0)
P.plot(zz, dat[2], 'r-', lw=1.8)
P.plot(zz, D(zz)/D(0.), 'y--', lw=1.8)
# Growth rate, f(z)
P.plot(zz, dat[3], 'g-', lw=1.8)
P.plot(zz, f(zz), 'y--', lw=1.8)
P.show()
exit()
"""
################################################################################
# Loop through redshift bins, assigning them to each process
################################################################################
for i in range(zs.size-1):
if i % size != myid:
continue
print ">>> %2d working on redshift bin %2d -- z = %3.3f" % (myid, i, zc[i])
# Calculate bandwidth
numin = expt['nu_line'] / (1. + zs[i+1])
numax = expt['nu_line'] / (1. + zs[i])
expt['dnutot'] = numax - numin
z = zc[i]
# Pack values and functions into the dictionaries cosmo, expt
HH, rr, DD, ff = cosmo_fns
cosmo['A'] = 1.
cosmo['omega_HI'] = rf.omega_HI(z, cosmo)
cosmo['bHI'] = rf.bias_HI(z, cosmo) # FIXME
cosmo['btot'] = cosmo['bHI']
cosmo['Tb'] = rf.Tb(z, cosmo)
cosmo['z'] = z; cosmo['D'] = DD(z)
cosmo['f'] = ff(z)
cosmo['r'] = rr(z); cosmo['rnu'] = C*(1.+z)**2. / HH(z)
cosmo['switches'] = switches
# Physical volume (in rad^2 Mpc^3) (note factor of nu_line in here)
Vphys = expt['Sarea'] * (expt['dnutot']/expt['nu_line']) \
* cosmo['r']**2. * cosmo['rnu']
print "Vphys = %3.3e Mpc^3" % Vphys
#---------------------------------------------------------------------------
# Noise power spectrum
#---------------------------------------------------------------------------
# Get grid of (q,y) coordinates
kgrid = np.linspace(1e-4, 5.*cosmo['h'], 500)
KPAR, KPERP = np.meshgrid(kgrid, kgrid)
y = cosmo['rnu'] * KPAR
q = cosmo['r'] * KPERP
# Get noise power spectrum (units ~ mK^2)
cn = rf.Cnoise(q, y, cosmo, expt) * cosmo['r']**2. * cosmo['rnu'] \
* cosmo['h']**3. \
* 0.1**3. # FIXME: Fudge factor to get in
# the same ballpark!
print "%3.3e Mpc^3" % (cosmo['r']**2. * cosmo['rnu'])
# Plot noise power spectrum
fig, ax = P.subplots(1)
ax.set_aspect('equal')
mat = ax.matshow(np.log10(cn).T, origin='lower',
extent=[0., np.max(kgrid)/cosmo['h'],
0., np.max(kgrid)/cosmo['h']],
aspect='auto', vmin=-3.7, vmax=-2.)
# Lines of constant |k|
from matplotlib.patches import Circle
for n in range(1, 6):
ax.add_patch( Circle((0., 0.), n, fc='none', ec='w', alpha=0.5, lw=2.2) )
P.xlabel(r"$k_\perp$ $[h/{\rm Mpc}]$", fontsize=18)
P.ylabel(r"$k_\parallel$ $[h/{\rm Mpc}]$", fontsize=18)
# Colour bar
clr = P.colorbar(mat)
clr.set_label(r"$\log_{10}[P_N(k_\perp, k_\parallel)]$ $[{\rm mK}^2 {\rm Mpc}^3]$", fontsize=18)
# Tweak tick labels
P.gca().tick_params(axis='both', which='major', labelsize=20, size=8.,
width=1.5, pad=8.)
P.gca().tick_params(axis='both', which='minor', labelsize=20, size=5.,
width=1.5, pad=8.)
P.show()
exit()
#---------------------------------------------------------------------------
# Set binning
Nperp = 50
Npar = 45
dk = 0.1 * cosmo['h'] # k bin size
# Loop over bins
dP = np.zeros((Nperp, Npar))
for ii in range(Nperp):
kperp_min = 1e-4 + ii*dk
kperp_max = kperp_min + dk
kperp = np.logspace(np.log10(kperp_min), np.log10(kperp_max), 80)
#kperp = np.linspace(kperp_min, kperp_max, 120)
for jj in range(Npar):
kpar_min = 1e-4 + jj*dk
kpar_max = kpar_min + dk
kpar = np.logspace(np.log10(kpar_min), np.log10(kpar_max), 40)
#kpar = np.linspace(kpar_min, kpar_max, 80)
# Get grid of (q,y) coordinates
KPAR, KPERP = np.meshgrid(kpar, kperp)
y = cosmo['rnu'] * KPAR
q = cosmo['r'] * KPERP
# Calculate integrand
cs = rf.Csignal(q, y, cosmo, expt)
cn = rf.Cnoise(q, y, cosmo, expt)
integrand = KPERP * (cs / (cs + cn))**2.
# Do double integration
Ik = [simps(integrand.T[i], kperp) for i in range(kpar.size)]
dP[ii,jj] = simps(Ik, kpar)
# Rescale deltaP/P
dP *= Vphys / (8. * np.pi**2.)
dP = 1. / np.sqrt(dP)
fig, ax = P.subplots(1)
ax.set_aspect('equal')
mat = ax.matshow(np.log10(dP).T, vmin=-3.7, vmax=-2., origin='lower',
extent=[0., Nperp*0.1, 0., Npar*0.1], aspect='auto')
from matplotlib.patches import Circle
for n in range(1, 6):
ax.add_patch( Circle((0., 0.), n, fc='none', ec='w', alpha=0.5, lw=2.2) )
P.xlabel(r"$k_\perp$ $[h/{\rm Mpc}]$", fontsize=18)
P.ylabel(r"$k_\parallel$ $[h/{\rm Mpc}]$", fontsize=18)
#P.yscale('log')
#P.xscale('log')
clr = P.colorbar(mat)
clr.set_label("$\log_{10}[\sigma_P / P\,(k_\perp, k_\parallel)]$", fontsize=18)
P.gca().tick_params(axis='both', which='major', labelsize=20, size=8.,
width=1.5, pad=8.)
P.gca().tick_params(axis='both', which='minor', labelsize=20, size=5.,
width=1.5, pad=8.)
#P.tight_layout()
P.show()
exit()
# Evaluate at output grid points
#Ikpar(kgrid)
#cumtrapz(Ik, kgrid, initial=0.)
comm.barrier()
if myid == 0: print "Finished."
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
37811,
198,
9771,
3129,
378,
14388,
17593,
290,
350,
7,
74,
8,
17778,
329,
477,
2266,
30846,
41701,
329,
257,
1813,
220,
198,
23100,
3681,
13,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
... | 1.974709 | 6,010 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `sarbor` package."""
import unittest
import sarbor
class TestSarborToy(unittest.TestCase):
"""Tests for `sarbor` package."""
def setUp(self):
"""
0-1-2-3-4-5
| |
6 10
|
7-9
|
8
"""
self.skeleton = sarbor.Skeleton()
self.skeleton.input_nid_pid_x_y_z(
[
[0, 0, 0, 0, 0],
[1, 0, 1, 0, 0],
[2, 1, 2, 0, 0],
[3, 2, 3, 0, 0],
[4, 3, 4, 0, 0],
[5, 4, 5, 0, 0],
[6, 2, 2, 1, 0],
[7, 6, 2, 2, 0],
[8, 7, 2, 3, 0],
[9, 7, 3, 2, 0],
[10, 4, 4, 1, 0],
]
)
def tearDown(self):
"""Tear down test fixtures, if any."""
@unittest.expectedFailure
def test_get_segments(self):
"""
breadth first segment iteration
"""
segment_iter = self.skeleton.get_segments()
self.assertEqual([node.key for node in next(segment_iter)], [0, 1, 2])
self.assertEqual([node.key for node in next(segment_iter)], [2, 3, 4])
self.assertEqual([node.key for node in next(segment_iter)], [2, 6, 7])
self.assertEqual([node.key for node in next(segment_iter)], [4, 5])
self.assertEqual([node.key for node in next(segment_iter)], [4, 10])
self.assertEqual([node.key for node in next(segment_iter)], [7, 8])
self.assertEqual([node.key for node in next(segment_iter)], [7, 9])
@unittest.expectedFailure
@unittest.expectedFailure
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
51,
3558,
329,
4600,
82,
283,
2865,
63,
5301,
526,
15931,
628,
198,
11748,
555,
715,
395,
198,
198,
11748... | 1.779058 | 955 |
import pytest
from ahd2fhir.mappers.ahd_to_observation_smkstat import AHD_TYPE, get_fhir_resources
from tests.utils import map_resources
AHD_PAYLOADS_EXPECTED_NUMBER_OF_CONDITIONS = [
("payload_1.json", 3),
("payload_2.json", 0),
]
@pytest.mark.parametrize(
"ahd_json_path,expected_number_of_conditions",
AHD_PAYLOADS_EXPECTED_NUMBER_OF_CONDITIONS,
)
@pytest.mark.parametrize(
"ahd_json_path,_",
AHD_PAYLOADS_EXPECTED_NUMBER_OF_CONDITIONS,
)
@pytest.mark.parametrize(
"ahd_json_path,_",
AHD_PAYLOADS_EXPECTED_NUMBER_OF_CONDITIONS,
)
| [
11748,
12972,
9288,
198,
198,
6738,
29042,
67,
17,
69,
71,
343,
13,
76,
46629,
13,
993,
67,
62,
1462,
62,
672,
3168,
341,
62,
5796,
74,
14269,
1330,
317,
10227,
62,
25216,
11,
651,
62,
69,
71,
343,
62,
37540,
198,
6738,
5254,
13... | 2.091241 | 274 |
import setuptools
setuptools.setup(
name="thingspeak",
version='0.0.1',
author='Roger Selzler',
description='Tools to ease the manipulation of data on thingspek from Mathworks using REST API and python.',
url='https://github.com/roger-selzler/ThingSpeak',
packages=setuptools.find_packages(),
python_requires='>=3.6',
) | [
11748,
900,
37623,
10141,
198,
2617,
37623,
10141,
13,
40406,
7,
198,
220,
220,
220,
220,
220,
220,
220,
1438,
2625,
27971,
36729,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
2196,
11639,
15,
13,
15,
13,
16,
3256,
198,
220,
220,
... | 2.408805 | 159 |
# Copyright (C) 2021 Clinton Garwood
# MIT Open Source Initiative Approved License
# handle_data_types_clinton.py
# CIS-135 Python
# Assignment #5
# # Include five variables:
# # Include three variables:
# # An integer with value 1 named one
one = 1
# # A float with a value 10.10 named tenTen
tenTen = 10.10
# # A variable named sum, which adds tenTen and one
sum = one + tenTen
# cast values
one_float = 1.0
tenTenint = int(one_float)
print(type(tenTenint))
# Print values of sum
print(sum)
print(type(sum))
print(type(int(sum)))
| [
2,
15069,
357,
34,
8,
33448,
2605,
7164,
3822,
201,
198,
2,
17168,
4946,
8090,
18362,
20010,
1079,
13789,
201,
198,
2,
5412,
62,
7890,
62,
19199,
62,
37821,
13,
9078,
201,
198,
2,
36159,
12,
17059,
11361,
201,
198,
2,
50144,
1303,
... | 2.837563 | 197 |
from .matter import *
name = 'matter' | [
6738,
764,
47635,
1330,
1635,
198,
3672,
796,
705,
47635,
6
] | 3.363636 | 11 |
from django import forms
from app.models import BrewPiDevice, OldControlConstants, NewControlConstants, SensorDevice, FermentationProfile, FermentationProfilePoint
from django.core import validators
import fermentrack_django.settings as settings
from django.forms import ModelForm
from . import udev_integration
import re
import datetime
import pytz
import random
| [
6738,
42625,
14208,
1330,
5107,
198,
6738,
598,
13,
27530,
1330,
9702,
38729,
24728,
11,
5706,
15988,
34184,
1187,
11,
968,
15988,
34184,
1187,
11,
35367,
24728,
11,
12880,
14374,
37046,
11,
12880,
14374,
37046,
12727,
198,
6738,
42625,
1... | 3.968085 | 94 |
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import norm
from soak19 import wl_to_rgb
import pandas as pd
cone = pd.read_csv('data/cone_response_5nm.csv', index_col=0, comment='#')
fig, axs = plt.subplots(1, 3)
_, h = fig.get_size_inches()
fig.set_size_inches(h, h)
for ax, (name, c) in zip(axs[::-1], cone.items()):
ax.set_axis_off()
wl = c.index.values
ax.barh(wl, width=10**c.values, height=5, color=wl_to_rgb(wl))
ax.set_title(name)
ax.set_ylim(700, 390)
fig.tight_layout()
fig.savefig(f'build/plots/cone_response_matrix.pdf')
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
13,
34242,
1330,
2593,
198,
6738,
33182,
1129,
1330,
266,
75,
62,
1462,
62,
81,
22296,
198,
11748,
19798,
292,
355,
... | 2.226923 | 260 |
from django import template
from django.template.defaultfilters import pluralize
from django.utils.formats import date_format as djd_fmt
register = template.Library()
@register.filter
# noinspection PyShadowingBuiltins
@register.filter
# noinspection PyShadowingBuiltins
@register.filter
@register.filter
@register.filter
@register.filter
| [
6738,
42625,
14208,
1330,
11055,
198,
6738,
42625,
14208,
13,
28243,
13,
12286,
10379,
1010,
1330,
22801,
1096,
198,
6738,
42625,
14208,
13,
26791,
13,
687,
1381,
1330,
3128,
62,
18982,
355,
42625,
67,
62,
69,
16762,
198,
198,
30238,
79... | 3.460784 | 102 |
"""Base MLP model."""
from typing import Dict, Any
import tensorflow as tf
import tensorflow.keras.layers as L
import configlib
from configlib import config as C
from components.inputlayers.categorical import OneHotCategoricalInput
import components.inputlayers.image
import utils.factory
# Setup configurable parameters of the model
add_argument = configlib.add_group(
"MLP Image Model Options.", prefix="mlp_image_classifier"
)
# ---
# Image layer parameters
configlib.add_arguments_dict(
add_argument, components.inputlayers.image.configurable, prefix="image"
)
# ---
# Predinet Layer options
add_argument(
"--hidden_sizes",
type=int,
nargs="+",
default=[32],
help="Hidden layer sizes, length determines number of layers.",
)
add_argument(
"--hidden_activations",
nargs="+",
default=["relu"],
help="Hidden layer activations, must match hidden_sizes.",
)
# ---------------------------
def process_image(image: tf.Tensor, _: Dict[str, Any]) -> tf.Tensor:
"""Process given image input extract objects."""
# image (B, W, H, C)
image_layer = utils.factory.get_and_init(
components.inputlayers.image, C, "mlp_image_", name="image_layer"
)
raw_objects = image_layer(image) # (B, W, H, E)
return L.Flatten()(raw_objects) # (B, W*H*E)
def process_task_id(task_id: tf.Tensor, input_desc: Dict[str, Any]) -> tf.Tensor:
"""Process given task ids."""
return OneHotCategoricalInput(input_desc["num_categories"])(task_id) # (B, T)
def build_model(task_description: Dict[str, Any]) -> Dict[str, Any]:
"""Build the predinet model."""
# ---------------------------
# Setup and process inputs
processors = {"image": process_image, "task_id": process_task_id}
mlp_inputs = utils.factory.create_input_layers(task_description, processors)
# ---------------------------
# Concatenate processed inputs
concat_in = next(iter(mlp_inputs["processed"].values()))
if len(mlp_inputs["processed"]) > 1:
concat_in = L.Concatenate()(list(mlp_inputs["processed"].values()))
# ---------------------------
for size, activation in zip(C["mlp_hidden_sizes"], C["mlp_hidden_activations"]):
concat_in = L.Dense(size, activation=activation)(concat_in)
predictions = L.Dense(task_description["output"]["num_categories"])(concat_in)
# ---------------------------
# Create model instance
model = tf.keras.Model(
inputs=mlp_inputs["input_layers"],
outputs=predictions,
name="mlp_image_classifier",
)
# ---------------------------
# Compile model for training
dataset_type = task_description["output"]["type"]
assert (
dataset_type == "binary"
), f"MLP image classifier requires a binary classification dataset, got {dataset_type}"
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.keras.metrics.BinaryAccuracy(name="acc")
# ---------------------------
return {"model": model, "loss": loss, "metrics": metrics}
| [
37811,
14881,
10373,
47,
2746,
526,
15931,
198,
6738,
19720,
1330,
360,
713,
11,
4377,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
11192,
273,
11125,
13,
6122,
292,
13,
75,
6962,
355,
406,
198,
198,
11748,
4566,
8019,
198,
... | 2.754991 | 1,102 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""reorder_lines_inversed.py.
This module is designed to reorganize the string sequence of a simple text
document. There are 2 types of string sequence reorganization in reverse
order: strict and blocks.
In the strict reorganization method, each row is rearranged in reverse order.
In the block reorganization method, the rows are rearranged in accordance with
the entered string delimiter (separator).
That is, if you need to swap strings using an empty string as a separator,
but not swap strings without an empty string between them.
Use default block reorganization method without entering delimiter.
!!! Look closely at lines that contains - 'line five!' 'line sixth!' !!!
| STRICT REORDER EXAMPLE | BLOCK REORDER EXAMPLE |
| (source document): | (source document): |
| 1 line one | 1 line one |
| 2 (2 empty_line) | 2 (2 empty_line) |
| 3 line three | 3 line three |
| 4 (4 empty_line) | 4 (4 empty_line) |
| 5 line five! | 5 line five! |
| 6 line sixth! | 6 line sixth! |
| (output document): | (output document): |
| 1 line sixth! | 1 line five! |
| 2 line five! | 2 line sixth! |
| 3 (4 empty_line) | 3 (4 empty_line) |
| 4 line three | 4 line three |
| 5 (2 empty_line) | 5 (2 empty_line) |
| 6 line one | 6 line one |
"""
__author__ = "WANDEX"
from os import getcwd, path
from typing import List
FILE = ""
while not path.exists(FILE):
print("ENTER VALID RELATIVE/FULL FILE PATH WITH EXTENSION:\n")
FILE = input("file: ")
if FILE.startswith("\\"):
CURRENT_DIR = getcwd()
FILE = CURRENT_DIR + FILE
print("relative file path:\n" + FILE)
OUTPUT = input("output (if empty ' + _new'): ")
if OUTPUT.isspace() or OUTPUT == "":
PAIR = path.splitext(FILE)
OUTPUT = PAIR[0] + "_new" + PAIR[1]
ENCODING = input("encoding (if empty 'UTF-8'): ").lower() or "utf-8"
def idelimiter():
"""input string delimiter.
By default empty_line is used.
"""
empty_line = "\n"
delimiter = input("delimiter (if empty - 'empty line'): ")
return delimiter + empty_line
def ireorder():
"""input string reorder method.
Requires manual entering - 'strict' or 'blocks'.
"""
reorder = input("reorder method('strict'/'blocks'): ").lower()
return reorder
def strict(f_in, f_out):
"""strict reorder method.
In the strict reorganization method, each row is rearranged in
reverse order.
"""
f_out.writelines(reversed(f_in.readlines()))
print("SUCCESS STRICT REORDER COMPLETE")
def blocks(f_in, f_out):
"""blocks reorder method.
In the block reorganization method, the rows are rearranged in
accordance with the entered string delimiter (separator).
"""
blocks_list: List[str] = []
line_index = 0
line_counter = 0
delimiter = idelimiter()
for line in f_in:
line_counter += 1
line_index += 1
if line != delimiter:
blocks_list.insert(line_index, line)
elif line == delimiter:
line_index = 0
blocks_list.insert(line_index, line)
else:
print(
"SOMETHING HAPPENED AT LINE: {0}\n"
"STRING CONTENT: {1}".format(line_counter, line)
)
f_out.writelines(blocks_list)
print("SUCCESS BLOCKS REORDER COMPLETE")
def execute():
"""main execute method."""
with open(FILE, "r", 1, ENCODING, errors="replace") as f_in, open(
OUTPUT, "w", 1, ENCODING, errors="replace"
) as f_out:
reorder = ireorder()
if reorder == "strict":
strict(f_in, f_out)
elif reorder == "blocks":
blocks(f_in, f_out)
else:
print("THERE'S NO SUCH METHOD, TYPE IN ONE OF THE FOLLOWING.")
execute()
execute()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
260,
2875,
62,
6615,
62,
259,
690,
276,
13,
9078,
13,
198,
198,
1212,
8265,
318,
3562,
284,
35459,
1096,
... | 2.236926 | 1,874 |
import time
from random import randint
from time import sleep
itens = ('Pedra','Papel', 'Tesoura')
computador = randint(0, 2)
print('''Suas opções:
[0] PEDRA
[1] PAPEL
[2] TESOURA
''')
jogador = int(input('Qual é a sua jogada? '))
print('-'* 20)
t = 1
print('JO')
time.sleep(t)
print('KEN')
time.sleep(t)
print('PO!!!')
print('O computador jogou {}'.format(itens[computador]))
print('O jogador jogou {}'.format(itens[jogador]))
print('-' * 20)
if computador == 0: #computador jogou PEDRA
if jogador == 0:
print('EMPATE!')
elif jogador == 1:
print('O jogador ganhou!')
elif jogador == 2:
print('O computador ganhou!')
else:
print('JOGADA INVÁLIDA!')
elif computador == 1: #computador jogou PAPEL
if jogador == 0:
print('O computador ganhou!')
elif jogador == 1:
print('EMPATE!')
elif jogador == 2:
print('O jogador ganhou!')
else:
print('JOGADA INVÁLIDA!')
elif computador == 2: #computador jogou PEDRA
if jogador == 0:
print('O jogador ganhou!')
elif jogador == 1:
print('O computador ganhou!')
elif jogador == 2:
print('EMPATE!')
else:
print('JOGADA INVÁLIDA!') | [
11748,
640,
198,
6738,
4738,
1330,
43720,
600,
198,
6738,
640,
1330,
3993,
198,
270,
641,
796,
19203,
43468,
430,
41707,
47,
499,
417,
3256,
705,
36504,
280,
430,
11537,
198,
785,
1996,
7079,
796,
43720,
600,
7,
15,
11,
362,
8,
198,... | 2.147687 | 562 |
"""
=====================
Methods vs. Functions
=====================
Placeholder for Methods vs. Functions documentation.
"""
| [
37811,
198,
198,
4770,
1421,
28,
198,
46202,
3691,
13,
40480,
198,
4770,
1421,
28,
198,
198,
27271,
13829,
329,
25458,
3691,
13,
40480,
10314,
13,
198,
198,
37811,
198
] | 4.333333 | 30 |
# from stl_dsa.users.models import User
# def test_user_is_member(faker):
# email = faker.email()
# taggings =
# user = User(email=email, first_name=faker.first_name(), last_name=faker.last_name())
| [
2,
422,
336,
75,
62,
9310,
64,
13,
18417,
13,
27530,
1330,
11787,
628,
198,
2,
825,
1332,
62,
7220,
62,
271,
62,
19522,
7,
69,
3110,
2599,
198,
2,
220,
220,
220,
220,
3053,
796,
277,
3110,
13,
12888,
3419,
198,
2,
220,
220,
22... | 2.409091 | 88 |
from app.drivers.prottable.base import PepProttableDriver
from app.actions.headers import peptable as head
from app.readers import tsv as tsvreader
from app.actions.peptable import model_qvals as prep
from app.drivers.options import peptable_options
class ModelQValuesDriver(PepProttableDriver):
"""Given a peptide table, this uses linear regression to model the
peptide q-values against a score, e.g. svm-score.
# FIXME
It currently also removes the column with PEPs, since it will no
longer be correct.
"""
outsuffix = '_qmodel.txt'
command = 'modelqvals'
commandhelp = ('Recalculate peptide q-values by creating a linear model '
'of them against a score (partial least squares '
'regression).')
| [
6738,
598,
13,
36702,
13,
1676,
926,
540,
13,
8692,
1330,
24226,
2964,
926,
540,
32103,
198,
6738,
598,
13,
4658,
13,
50145,
1330,
34337,
540,
355,
1182,
198,
6738,
598,
13,
961,
364,
1330,
256,
21370,
355,
256,
21370,
46862,
198,
6... | 2.898876 | 267 |
from . import camera, mat_utils, rgbd_util | [
6738,
764,
1330,
4676,
11,
2603,
62,
26791,
11,
48670,
17457,
62,
22602
] | 3.230769 | 13 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011-2013, Cédric Krier
# Copyright (c) 2011-2013, B2CK
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from sql import Join, Table, AliasManager
from sql.functions import Now
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
66,
8,
2813,
12,
6390,
11,
327,
2634,
67,
1173,
509,
5277,
198,
2,
15069,
357,
66,
8,
2813,
12,
6390,
11,
347,
17,
34,
42,
198,
2,
1439,
... | 3.331373 | 510 |
#!/usr/bin/python3
import argparse
import git
import ruamel.yaml
import os
import sys
print("Entering updateEndpoint script..")
parser = argparse.ArgumentParser()
parser.add_argument('--action', required=True, type=str,
help="action to take: 'add' or 'delete'")
parser.add_argument('--cluster_name', required=True, type=str,
help="cluster name to which the endpoint is added")
parser.add_argument('--endpoint', required=True, type=str,
help="endpoint to add")
args = parser.parse_args()
action = args.action
clusterName = args.cluster_name
endpoint = args.endpoint
repo = None
config = {}
commit_msg = ''
sitePath = './decapod-site'
siteFileName = "{}/lma/site-values.yaml".format(clusterName)
siteFileNameFull = "{}/{}".format(sitePath, siteFileName)
# Tested with 'robertchoi80' repo
repoOrgName = ''
# Clone or re-use decapod-site repository #
if not os.path.isdir(sitePath):
print("Cloning repository...")
repo = git.Repo.clone_from("https://github.com/{}/decapod-site".format(repoOrgName), 'decapod-site')
with repo.config_writer() as git_config:
git_config.set_value('user', 'email', 'tks-argo@tks.com')
git_config.set_value('user', 'name', 'TKS Argo')
else:
repo = git.Repo(sitePath)
repo.remotes.origin.pull()
with open(siteFileNameFull, 'r') as f:
config = ruamel.yaml.round_trip_load(f, preserve_quotes=True)
charts = config["charts"]
thanosChart = [chart for chart in charts if chart['name'] == "thanos"][0]
if action == 'add':
if (endpoint in thanosChart['override']['querier.stores']):
print("The endpoint already exists.")
sys.exit(0)
else:
#print("Before insertion: {}".format(thanosChart))
thanosChart['override']['querier.stores'].append(endpoint)
#print("After insertion: {}".format(thanosChart))
commit_msg = "add new thanos-sidecar endpoint to '{}' cluster".format(clusterName)
elif action == 'delete':
if (endpoint in thanosChart['override']['querier.stores']):
print("Found endpoint. Deleting it...")
thanosChart['override']['querier.stores'].remove(endpoint)
commit_msg = "delete thanos-sidecar endpoint from '{}' cluster".format(clusterName)
else:
print("The endpoint {} doesn't exist. Exiting script...".format(endpoint))
sys.exit(0)
else:
sys.exit("Wrong action type")
with open(siteFileNameFull, 'w') as f:
ruamel.yaml.round_trip_dump(config, f)
diff = repo.git.diff(repo.head.commit.tree)
print(diff)
# Provide a list of the files to stage
repo.index.add([siteFileName])
# Provide a commit message
repo.index.commit(commit_msg)
res = repo.remotes.origin.push()[0]
# flag '256' means successful fast-forward
if res.flags != 256:
print(res.summary)
sys.exit("Push failed!")
print("Exiting updateEndpoint script..")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
17606,
198,
11748,
7422,
17983,
13,
88,
43695,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
4798,
7203,
17469,
278,
4296,
12915,
4122,
4226,
492,
494... | 2.617431 | 1,090 |
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013--, biocore development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from os import getcwd, remove, rmdir, mkdir, path
from subprocess import Popen, PIPE, STDOUT
import tempfile
import shutil
from unittest import TestCase, main
from cogent.core.moltype import RNA, DNA
from cogent.util.misc import flatten
from bfillings.muscle_v38 import (Muscle, muscle_seqs, aln_tree_seqs,
align_unaligned_seqs, build_tree_from_alignment,
align_and_build_tree, add_seqs_to_alignment,
align_two_alignments)
class MuscleTests(GeneralSetUp):
"""Tests for the Muscle application controller"""
def test_base_command(self):
"""Muscle BaseCommand should return the correct BaseCommand"""
c = Muscle()
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','muscle']))
c.Parameters['-in'].on('seq.txt')
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','muscle -in "seq.txt"']))
c.Parameters['-cluster2'].on('neighborjoining')
self.assertEqual(c.BaseCommand,\
''.join(['cd "',getcwd(),'/"; ','muscle -cluster2 neighborjoining' +
' -in "seq.txt"']))
def test_maxmb(self):
"""maxmb option should not break Muscle"""
app = Muscle()
app.Parameters['-maxmb'].on('250')
outfile = tempfile.NamedTemporaryFile()
app.Parameters['-out'].on(outfile.name)
infile = tempfile.NamedTemporaryFile()
infile.write(
">Seq1\nAAAGGGTTTCCCCT\n"
">Seq2\nAAAGGGGGTTTCCACT\n")
infile.flush()
result = app(infile.name)
observed = result['MuscleOut'].read()
expected = (
">Seq1\nAAA--GGGTTTCCCCT\n"
">Seq2\nAAAGGGGGTTTCCACT\n"
)
self.assertEqual(observed, expected)
def test_changing_working_dir(self):
"""Muscle BaseCommand should change according to WorkingDir"""
c = Muscle(WorkingDir='/tmp/muscle_test')
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/muscle_test','/"; ','muscle']))
c = Muscle()
c.WorkingDir = '/tmp/muscle_test2'
self.assertEqual(c.BaseCommand,\
''.join(['cd "','/tmp/muscle_test2','/"; ','muscle']))
#removing the dirs is proof that they were created at the same time
#if the dirs are not there, an OSError will be raised
rmdir('/tmp/muscle_test')
rmdir('/tmp/muscle_test2')
def test_aln_tree_seqs(self):
"aln_tree_seqs returns the muscle alignment and tree from iteration2"
tree, aln = aln_tree_seqs(path.join(self.temp_dir, 'seq1.txt'),
tree_type="neighborjoining",
WorkingDir=self.temp_dir,
clean_up=True)
self.assertEqual(str(tree), '((1:1.125,2:1.125):0.375,3:1.5);')
self.assertEqual(len(aln), 6)
self.assertEqual(aln[-2], '>3\n')
self.assertEqual(aln[-1], 'GCGGCUAUUAGAUCGUA------\n')
def test_aln_tree_seqs_spaces(self):
"aln_tree_seqs should work on filename with spaces"
try:
#create sequence files
f = open(path.join(self.temp_dir_spaces, 'muscle_test_seq1.txt'),'w')
f.write('\n'.join(self.lines1))
f.close()
except OSError:
pass
tree, aln = aln_tree_seqs(path.join(self.temp_dir_spaces,\
'muscle_test_seq1.txt'),
tree_type="neighborjoining",
WorkingDir=getcwd(),
clean_up=True)
self.assertEqual(str(tree), '((1:1.125,2:1.125):0.375,3:1.5);')
self.assertEqual(len(aln), 6)
self.assertEqual(aln[-2], '>3\n')
self.assertEqual(aln[-1], 'GCGGCUAUUAGAUCGUA------\n')
remove(self.temp_dir_spaces+'/muscle_test_seq1.txt')
def test_align_unaligned_seqs(self):
"""align_unaligned_seqs should work as expected"""
res = align_unaligned_seqs(self.seqs1, RNA)
self.assertEqual(res.toFasta(), align1)
def test_build_tree_from_alignment(self):
"""Muscle should return a tree built from the passed alignment"""
tree_short = build_tree_from_alignment(build_tree_seqs_short, DNA)
num_seqs = flatten(build_tree_seqs_short).count('>')
self.assertEqual(len(tree_short.tips()), num_seqs)
tree_long = build_tree_from_alignment(build_tree_seqs_long, DNA)
seq_names = []
for line in build_tree_seqs_long.split('\n'):
if line.startswith('>'):
seq_names.append(line[1:])
for node in tree_long.tips():
if node.Name not in seq_names:
self.fail()
def test_align_and_build_tree(self):
"""Should align and build a tree from a set of sequences"""
res = align_and_build_tree(self.seqs1, RNA)
self.assertEqual(res['Align'].toFasta(), align1)
tree = res['Tree']
seq_names = []
for line in align1.split('\n'):
if line.startswith('>'):
seq_names.append(line[1:])
for node in tree.tips():
if node.Name not in seq_names:
self.fail()
def test_add_seqs_to_alignment(self):
"""Should add sequences to an alignment"""
res = add_seqs_to_alignment(seqs_to_add, align1)
self.assertEqual(res.toFasta(), added_align_result)
def test_align_two_alignments(self):
"""Should align to multiple sequence alignments"""
res = align_two_alignments(align1, aln_to_merge)
self.assertEqual(res.toFasta(), merged_align_result)
align1 = ">seq_0\nACUGCUAGCUAGUAGCGUACGUA\n>seq_1\n---GCUACGUAGCUAC-------\n>seq_2\nGCGGCUAUUAGAUCGUA------"
# for use in test_add_seqs_to_alignment()
seqs_to_add = ">foo\nGCUACGUAGCU\n>bar\nGCUACGUAGCC"
added_align_result = ">bar\n---GCUACGUAGCC---------\n>foo\n---GCUACGUAGCU---------\n>seq_0\nACUGCUAGCUAGUAGCGUACGUA\n>seq_1\n---GCUACGUAGCUAC-------\n>seq_2\nGCGGCUAUUAGAUCGUA------"
# for use in test_align_two_alignments()
aln_to_merge = ">foo\nGCUACGUAGCU\n>bar\n--UACGUAGCC"
merged_align_result = ">bar\n-----UACGUAGCC---------\n>foo\n---GCUACGUAGCU---------\n>seq_0\nACUGCUAGCUAGUAGCGUACGUA\n>seq_1\n---GCUACGUAGCUAC-------\n>seq_2\nGCGGCUAUUAGAUCGUA------"
build_tree_seqs_short = """>muscle_test_seqs_0
AACCCCCACGGTGGATGCCACACGCCCCATACAAAGGGTAGGATGCTTAAGACACATCGCGTCAGGTTTGTGTCAGGCCT
AGCTTTAAATCATGCCAGTG
>muscle_test_seqs_1
GACCCACACGGTGGATGCAACAGATCCCATACACCGAGTTGGATGCTTAAGACGCATCGCGTGAGTTTTGCGTCAAGGCT
TGCTTTCAATAATGCCAGTG
>muscle_test_seqs_2
AACCCCCACGGTGGCAGCAACACGTCACATACAACGGGTTGGATTCTAAAGACAAACCGCGTCAAAGTTGTGTCAGAACT
TGCTTTGAATCATGCCAGTA
>muscle_test_seqs_3
AAACCCCACGGTAGCTGCAACACGTCCCATACCACGGGTAGGATGCTAAAGACACATCGGGTCTGTTTTGTGTCAGGGCT
TGCTTTACATCATGCAAGTG
>muscle_test_seqs_4
AACCGCCACGGTGGGTACAACACGTCCACTACATCGGCTTGGAAGGTAAAGACACGTCGCGTCAGTATTGCGTCAGGGCT
TGCTTTAAATCATGCCAGTG
>muscle_test_seqs_5
AACCCCCGCGGTAGGTGCAACACGTCCCATACAACGGGTTGGAAGGTTAAGACACAACGCGTTAATTTTGTGTCAGGGCA
TGCTTTAAATCATGCCAGTT
>muscle_test_seqs_6
GACCCCCGCGGTGGCTGCAAGACGTCCCATACAACGGGTTGGATGCTTAAGACACATCGCAACAGTTTTGAGTCAGGGCT
TACTTTAGATCATGCCGGTG
>muscle_test_seqs_7
AACCCCCACGGTGGCTACAAGACGTCCCATCCAACGGGTTGGATACTTAAGGCACATCACGTCAGTTTTGTGTCAGAGCT
TGCTTTAAATCATGCCAGTG
>muscle_test_seqs_8
AACCCCCACGGTGGCTGCAACACGTGGCATACAACGGGTTGGATGCTTAAGACACATCGCCTCAGTTTTGTGTCAGGGCT
TGCATTAAATCATGCCAGTG
>muscle_test_seqs_9
AAGCCCCACGGTGGCTGAAACACATCCCATACAACGGGTTGGATGCTTAAGACACATCGCATCAGTTTTATGTCAGGGGA
TGCTTTAAATCCTGACAGCG
"""
build_tree_seqs_long = """>muscle_test_seqs_0
AACCCCCACGGTGGATGCCACACGCCCCATACAAAGGGTAGGATGCTTAAGACACATCGCGTCAGGTTTGTGTCAGGCCT
AGCTTTAAATCATGCCAGTG
>muscle_test_seqsaaaaaaaa_1
GACCCACACGGTGGATGCAACAGATCCCATACACCGAGTTGGATGCTTAAGACGCATCGCGTGAGTTTTGCGTCAAGGCT
TGCTTTCAATAATGCCAGTG
>muscle_test_seqsaaaaaaaa_2
AACCCCCACGGTGGCAGCAACACGTCACATACAACGGGTTGGATTCTAAAGACAAACCGCGTCAAAGTTGTGTCAGAACT
TGCTTTGAATCATGCCAGTA
>muscle_test_seqsaaaaaaaa_3
AAACCCCACGGTAGCTGCAACACGTCCCATACCACGGGTAGGATGCTAAAGACACATCGGGTCTGTTTTGTGTCAGGGCT
TGCTTTACATCATGCAAGTG
>muscle_test_seqsaaaaaaaa_4
AACCGCCACGGTGGGTACAACACGTCCACTACATCGGCTTGGAAGGTAAAGACACGTCGCGTCAGTATTGCGTCAGGGCT
TGCTTTAAATCATGCCAGTG
>muscle_test_seqsaaaaaaaa_5
AACCCCCGCGGTAGGTGCAACACGTCCCATACAACGGGTTGGAAGGTTAAGACACAACGCGTTAATTTTGTGTCAGGGCA
TGCTTTAAATCATGCCAGTT
>muscle_test_seqsaaaaaaaa_6
GACCCCCGCGGTGGCTGCAAGACGTCCCATACAACGGGTTGGATGCTTAAGACACATCGCAACAGTTTTGAGTCAGGGCT
TACTTTAGATCATGCCGGTG
>muscle_test_seqsaaaaaaaa_7
AACCCCCACGGTGGCTACAAGACGTCCCATCCAACGGGTTGGATACTTAAGGCACATCACGTCAGTTTTGTGTCAGAGCT
TGCTTTAAATCATGCCAGTG
>muscle_test_seqsaaaaaaaa_8
AACCCCCACGGTGGCTGCAACACGTGGCATACAACGGGTTGGATGCTTAAGACACATCGCCTCAGTTTTGTGTCAGGGCT
TGCATTAAATCATGCCAGTG
>muscle_test_seqsaaaaaaaa_9
AAGCCCCACGGTGGCTGAAACACATCCCATACAACGGGTTGGATGCTTAAGACACATCGCATCAGTTTTATGTCAGGGGA
TGCTTTAAATCCTGACAGCG
"""
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
10097,
32501,
198,
2,
15069,
357,
66,
8,
2211,
438,
11,
3182,
420,
382,
2478,
1074,
13,
198,
2,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
40499,
347,
10305,
13789,
13,
... | 2.001699 | 4,710 |
""" STEP ONE """
import requests
def request_tg_code_get_random_hash(input_phone_number):
""" requests Login Code
and returns a random_hash
which is used in STEP TWO """
request_url = "https://my.telegram.org/auth/send_password"
request_data = {
"phone": input_phone_number
}
response_c = requests.post(request_url, data=request_data)
try:
json_response = True, response_c.json()
except:
json_response = False, response_c.text
return json_response
| [
37811,
49154,
16329,
37227,
201,
198,
201,
198,
11748,
7007,
201,
198,
201,
198,
201,
198,
4299,
2581,
62,
25297,
62,
8189,
62,
1136,
62,
25120,
62,
17831,
7,
15414,
62,
4862,
62,
17618,
2599,
201,
198,
220,
220,
220,
37227,
7007,
2... | 2.495327 | 214 |
for c in range (0,6):
print('oi')
for c in range (0,6):
print(c)
for c in range (6,0,-1):
print(c)
for c in range (0,20,2):
print(c)
n = int(input('Digite um número'))
for c in range (0,n+1):
print(c)
i = int(input('Digite um número inicial'))
f = int(input('Digite um número final'))
p = int(input('Digite a frequência'))
for c in range (i, f+1, p):
print(c)
s = 0
for c in range (0,4):
n = int(input('digite um número para ser somado'))
s += n
print (s) | [
1640,
269,
287,
2837,
357,
15,
11,
21,
2599,
198,
220,
220,
220,
3601,
10786,
23013,
11537,
198,
1640,
269,
287,
2837,
357,
15,
11,
21,
2599,
198,
220,
220,
220,
3601,
7,
66,
8,
198,
1640,
269,
287,
2837,
357,
21,
11,
15,
12095,... | 2.090129 | 233 |
# -*- coding: utf-8 -*-
from datetime import datetime
import uuid
import os
import json
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
334,
27112,
198,
11748,
28686,
198,
11748,
33918,
628,
628,
198
] | 2.787879 | 33 |
# cd_raw_collection.py
# "cd" stands for class discovery. This script is used by class discovery
# related scripts, and represents a raw input list file.
#
# Steven Lu 5/20/2019
from entity.cd_subject import CDSubject
from collection.raw_collection import RawCollection
# Overwrite parent's add_subject() function | [
2,
22927,
62,
1831,
62,
43681,
13,
9078,
198,
2,
366,
10210,
1,
6296,
329,
1398,
9412,
13,
770,
4226,
318,
973,
416,
1398,
9412,
198,
2,
3519,
14750,
11,
290,
6870,
257,
8246,
5128,
1351,
2393,
13,
198,
2,
198,
2,
8239,
6026,
64... | 3.678161 | 87 |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_translate 1'] = [
'ca',
"n't",
"'m",
"'s",
"'ve",
'ha',
'wo',
'atm',
'xmas',
"'ll",
'im'
]
snapshots['test_translate 2'] = [
'can',
'not',
'am',
'is',
'have',
'have',
'will',
'at',
'the',
'moment',
'Christmas',
'will',
'I',
'am'
]
snapshots['test_translate 3'] = [
'can',
'not',
'am',
'is',
'have',
'have',
'will',
'at',
'the',
'moment',
'Christmas',
'will',
'I',
'am'
]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
11495,
1477,
24879,
25,
410,
16,
532,
3740,
1378,
42469,
13,
4743,
14,
89,
34,
19,
88,
52,
66,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
19... | 1.84715 | 386 |
#!/usr/bin/env python
# coding: utf-8
# In[22]:
import cv2
import glob
import numpy as np
def findClickCoordinate(img,dot,dot_size = 5):
"""
Find coordinate of clicked location
Parameters:
-img: input image
-dot: amount of output dot
-dot_size: size of dot
Returns:
-circles: list of clicked coordinate [(x,y),...]
-img: output image
"""
#create window
cv2.namedWindow("Frame")
#set mouse call back
cv2.setMouseCallback("Frame", mouse_drawing)
#create lit to contain coordinate
circles = []
while True:
for center_position in circles:
cv2.circle(img, center_position, dot_size, (0, 0, 255), -1)
cv2.imshow("Frame", img)
if len(circles) == dot:
break
key = cv2.waitKey(30)
if key == 27:
print("esc")
break
elif key == ord("d"):
circles = []
cv2.destroyAllWindows()#test
return circles,img
# In[24]:
#=========USER START================
#folder path
path = 'RAW_FUNDUS_INPUT/*.jpg'
image_number = 2
#=========USER END================
#read image
image_list = []
for filename in glob.glob(path):
image_list.append(filename)
img = cv2.imread(image_list[image_number])
#find clicked coordinate
coor,out= findClickCoordinate(img,1,dot_size = 5)
#print coordinate
print(coor)
#show image
cv2.imshow("Output", out)
cv2.waitKey(0)#test
cv2.destroyAllWindows()#test
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
1828,
5974,
628,
198,
11748,
269,
85,
17,
198,
11748,
15095,
198,
11748,
299,
32152,
355,
45941,
198,
198,
4299,
1064,
8164,
7... | 2.339144 | 631 |
"""A ProFile "magic block" plugin for Cameo/Aphid system information.
Forfeited into the public domain with NO WARRANTY. Read LICENSE for details.
This plugin allows the Apple to obtain some basic system information from a
Cameo/Aphid.
By convention, this plugin is associated with block $FFFEFD. There's no reason
it can't be attached to different blocks, but for the following $FFFEFD will be
used as a shorthand for whatever "magic block" is in use.
Operations:
- ProFile reads to $FFFEFD: Retrieve information about the Cameo/Aphid. The
data returned by this plugin has the following format:
Bytes 0-9: DDDDHHMMSS ASCII uptime; days right-justified space padded
Bytes 10-24: ASCII right-aligned space-padded filesystem bytes free
Bytes 25-31: ASCII null-terminated 1-minute load average
Bytes 32-38: ASCII null-terminated 5-minute load average
Bytes 39-45: ASCII null-terminated 15-minute load average
Bytes 46-50: ASCII null-terminated number of processes running
Bytes 51-55: ASCII null-terminated number of total processes
- ProFile writes to $FFFEFD: do nothing at all.
"""
import logging
import os
from typing import Optional
import profile_plugins
PROFILE_READ = 0x00 # The ProFile protocol op byte that means "read a block"
class SystemInfoPlugin(profile_plugins.Plugin):
"""System information plugin.
See the file header comment for usage details.
"""
def __call__(
self,
op: int,
block: int,
retry_count: int,
sparing_threshold: int,
data: Optional[bytes],
) -> Optional[bytes]:
"""Implements the protocol described in the file header comment."""
# We simply log and ignore non-reads.
if op != PROFILE_READ:
logging.warning(
'System info plugin: ignoring non-read operation %02X', op)
return None
# Collect the information that this plugin returns. First, system uptime:
with open('/proc/uptime', 'r') as f:
seconds_left = round(float(f.read().split(' ')[0]))
u_days, seconds_left = divmod(seconds_left, 86400)
u_hours, seconds_left = divmod(seconds_left, 3600)
u_minutes, seconds_left = divmod(seconds_left, 60)
uptime = '{:4d}{:02d}{:02d}{:02d}'.format(
u_days, u_hours, u_minutes, seconds_left)
# Filesystem bytes free.
st_statvfs = os.statvfs('.')
bytes_free = '{:15d}'.format(st_statvfs.f_bsize * st_statvfs.f_bavail)
# System load.
with open('/proc/loadavg', 'r') as f:
l_1min, l_5min, l_15min, l_processes, _ = f.read().split(' ')
l_running, l_total = l_processes.split('/')
# Helper: convert to binary and zero-pad to the right.
data = b''.join([
uptime.encode(),
bytes_free.encode(),
encode_and_pad(l_1min, 7),
encode_and_pad(l_5min, 7),
encode_and_pad(l_15min, 7),
encode_and_pad(l_running, 5),
encode_and_pad(l_total, 5),
])
return data[:532] + bytes(max(0, 532 - len(data)))
# By calling plugin() within this module, the plugin service instantiates a
# new FilesystemOpsPlugin.
plugin = SystemInfoPlugin
| [
37811,
32,
1041,
8979,
366,
32707,
2512,
1,
13877,
329,
32653,
78,
14,
32,
746,
312,
1080,
1321,
13,
198,
198,
1890,
5036,
863,
656,
262,
1171,
7386,
351,
8005,
34764,
56,
13,
4149,
38559,
24290,
329,
3307,
13,
198,
198,
1212,
13877... | 2.693231 | 1,167 |
from django.test import TestCase
from django_hosts import reverse
from util.test_utils import CleanUpTempFilesTestMixin, Get, MOCK_JPG_FILE, assert_requesting_paths_succeeds
from ..models import Equipment
| [
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
62,
4774,
82,
1330,
9575,
198,
198,
6738,
7736,
13,
9288,
62,
26791,
1330,
5985,
4933,
30782,
25876,
14402,
35608,
259,
11,
3497,
11,
337,
11290,
62,
41,
6968,
... | 3.285714 | 63 |
# -*- coding: utf-8 -*-
"""`bottle_jwt.auth` module.
Main auth providers class implementation.
"""
from __future__ import print_function
from __future__ import unicode_literals
import abc
import six
from .compat import signature
__all__ = ["BaseAuthBackend", ]
@six.add_metaclass(abc.ABCMeta)
class BaseAuthBackend(object):
"""Auth Provider Backend Interface. Defines a standard API for implementation
in order to work with different backends (SQL, Redis, Filesystem-based, external
API services, etc.)
Notes:
It is not necessary to subclass `BaseAuthBackend` in order to make `bottle-jwt` plugin to
work, as long as you implement it's API. For example all the following examples are valid.
Examples:
>>> class DummyExampleBackend(object):
... credentials = ('admin', 'qwerty')
... user_id = 1
...
... def authenticate_user(self, username, password):
... if (username, password) == self.credentials
... return {'user': 'admin', 'id': 1}
... return None
...
... def get_user(self, user_id):
... return {'user': 'admin '} if user_id == self.user_id else None
...
>>> class SQLAlchemyExampleBackend(object):
... def __init__(self, some_orm_model):
... self.orm_model = some_orm_model
...
... def authenticate(self, user_uid, user_password):
... return self.orm_model.get(email=user_uid, password=user_password) or None
...
... def get_user(self, user_uid):
... return self.orm_model.get(id=user_uid) or None
"""
@abc.abstractmethod
def authenticate_user(self, username, password): # pragma: no cover
"""User authentication method. All subclasses must implement the
`authenticate_user` method with the following specs.
Args:
username (str): User identity for the backend (email/username).
password (str): User secret password.
Returns:
A dict representing User record if authentication is succesful else None.
Raises:
`bottle_jwt.error.JWTBackendError` if any exception occurs.
"""
pass
@abc.abstractmethod
def get_user(self, user_uid): # pragma: no cover
"""User data retrieval method. All subclasses must implement the
`get_user` method with the following specs.
Args:
user_uid (object): User identity in backend.
Returns:
User data (dict) if user exists or None.
Raises:
`bottle_jwt.error.JWTBackendError` if any exception occurs.
"""
pass
@classmethod
def __subclasshook__(cls, subclass):
"""Useful for checking interface for backends that don't inherit from
BaseAuthBackend.
"""
if cls is BaseAuthBackend:
try:
authenticate_user_signature = set(signature(subclass.authenticate_user).parameters)
get_user_signature = set(signature(subclass.get_user).parameters)
return authenticate_user_signature.issuperset({"username", "password"}) and \
get_user_signature.issuperset({"user_id"})
except AttributeError:
return False
return NotImplemented # pragma: no cover
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
63,
10985,
293,
62,
73,
46569,
13,
18439,
63,
8265,
13,
198,
198,
13383,
6284,
9549,
1398,
7822,
13,
198,
37811,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
... | 2.381379 | 1,450 |
import subprocess
import log
| [
11748,
850,
14681,
198,
11748,
2604,
628
] | 4.285714 | 7 |
from setuptools import setup, find_packages
import os
setup(name="jondis",
version="0.1",
description="Redis pool for HA redis clusters",
long_description=read('README.md'),
author="Jon Haddad",
author_email="jon@grapheffect.com",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Environment :: Plugins",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords="redis",
install_requires=["redis"],
url="https://github.com/StartTheShift/jondis",
packages=find_packages(),
include_package_data=True
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
11748,
28686,
198,
198,
40406,
7,
3672,
2625,
73,
623,
271,
1600,
198,
220,
220,
220,
220,
220,
2196,
2625,
15,
13,
16,
1600,
198,
220,
220,
220,
220,
220,
6764,
2625,
... | 2.458667 | 375 |
import pytest
import sys
from snsql import metadata
from snsql.sql.privacy import Privacy
privacy = Privacy(alphas=[0.01, 0.05], epsilon=30.0, delta=0.1)
overrides = {'censor_dims': False} | [
11748,
12972,
9288,
198,
11748,
25064,
198,
6738,
3013,
25410,
1330,
20150,
198,
198,
6738,
3013,
25410,
13,
25410,
13,
13776,
1590,
1330,
16777,
198,
198,
13776,
1590,
796,
16777,
7,
282,
5902,
41888,
15,
13,
486,
11,
657,
13,
2713,
... | 2.652778 | 72 |
import sys
import os.path
import logging
import warnings
from . import PACKAGEDIR
from contextlib import contextmanager
from matplotlib.backends.backend_pdf import PdfPages
import copy
import numpy as np
import pandas as pd
import lightkurve as lk
import matplotlib.pyplot as plt
from lightkurve import MPLSTYLE
from astropy.table import Table
import corner
import pymc3 as pm
from fbpca import pca
import exoplanet as xo
import astropy.units as u
import theano.tensor as tt
from astropy.constants import G
from astropy.stats import sigma_clip
from astropy.convolution import convolve, Box1DKernel
from itertools import combinations_with_replacement as multichoose
| [
11748,
25064,
198,
11748,
28686,
13,
6978,
198,
11748,
18931,
198,
11748,
14601,
198,
6738,
764,
1330,
47035,
4760,
1961,
4663,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
2603,
29487,
8019,
13,
1891,
2412,
13,
1891,
437,
62,
1... | 3.34 | 200 |
# -*- coding:utf-8 -*-
"""
"""
import numpy as np
import pandas as pd
from pandas.util import hash_pandas_object
from hypernets.tabular.datasets.dsutils import load_bank
from . import if_dask_ready, is_dask_installed
from ..dask_transofromer_test import setup_dask
if is_dask_installed:
import dask.dataframe as dd
from hypernets.tabular.dask_ex import DaskToolBox
dd_selector = DaskToolBox.feature_selector_with_drift_detection
@if_dask_ready | [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
37811,
198,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
19798,
292,
13,
22602,
1330,
12234,
62,
79,
392,
292,
62,
15252... | 2.649425 | 174 |
import os
import sys
from os.path import join as pjoin
from loguru import logger
from pathlib import Path
from rich.logging import RichHandler
try:
from pyinspect import install_traceback
install_traceback()
except ImportError:
pass # fails in notebooks
from . import settings, actors
from .scene import Scene
from .video import VideoMaker, Animation
base_dir = Path(os.path.join(os.path.expanduser("~"), ".brainrender"))
base_dir.mkdir(exist_ok=True)
vedo_path = pjoin(os.environ['HOME'], 'Dropbox/git/vedo/vedo')
sys.path.insert(0, vedo_path)
import vedo
from vedo import Plotter
__version__ = "2.0.3.0rc"
# set logger level
def set_logging(level="INFO", path=None):
"""
Sets loguru to save all logs to a file i
brainrender's base directory and to print
to stdout only logs >= to a given level
"""
logger.remove()
# logger.add(sys.stdout, level=level)
path = path or str(base_dir / "log.log")
if Path(path).exists():
Path(path).unlink()
logger.add(path, level="DEBUG")
if level == "DEBUG":
logger.configure(
handlers=[
{
"sink": RichHandler(level="WARNING", markup=True),
"format": "{message}",
}
]
)
if not settings.DEBUG:
set_logging()
else:
set_logging(level="DEBUG")
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
28686,
13,
6978,
1330,
4654,
355,
279,
22179,
198,
6738,
2604,
14717,
1330,
49706,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
5527,
13,
6404,
2667,
1330,
3998,
25060,
198,
198,
28311,
25,
... | 2.442068 | 561 |
from unittest.mock import patch
from mau.parsers import nodes
from mau.parsers.main_parser import MainParser
from tests.helpers import init_parser_factory, parser_test_factory
init_parser = init_parser_factory(MainParser)
_test = parser_test_factory(MainParser)
@patch("mau.parsers.main_parser.header_anchor")
@patch("mau.parsers.main_parser.header_anchor")
@patch("mau.parsers.main_parser.header_anchor")
@patch("mau.parsers.main_parser.header_anchor")
@patch("mau.parsers.main_parser.header_anchor")
@patch("mau.parsers.main_parser.header_anchor")
@patch("mau.parsers.main_parser.header_anchor")
@patch("mau.parsers.main_parser.header_anchor")
@patch("mau.parsers.main_parser.header_anchor")
| [
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
198,
198,
6738,
285,
559,
13,
79,
945,
364,
1330,
13760,
198,
6738,
285,
559,
13,
79,
945,
364,
13,
12417,
62,
48610,
1330,
8774,
46677,
198,
198,
6738,
5254,
13,
16794,
364,
1330,
231... | 2.580645 | 279 |
from __future__ import unicode_literals
from mayan.apps.smart_settings.classes import NamespaceMigration
from .serialization import yaml_load
class CommonSettingMigration(NamespaceMigration):
"""
From version 0001 to 0002 backend arguments are no longer quoted
but YAML valid too. Changed in version 3.3.
"""
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
743,
272,
13,
18211,
13,
27004,
62,
33692,
13,
37724,
1330,
28531,
10223,
44,
4254,
198,
198,
6738,
764,
46911,
1634,
1330,
331,
43695,
62,
2220,
628,
198,
48... | 3.357143 | 98 |
import json
import numpy as np
# import torchtext
from torchtext.vocab import Vectors
from tqdm import tqdm
import torch
from torch.utils.data import TensorDataset, DataLoader
import nltk
nltk.download('punkt')
# def clean_text(text):
# text = re.sub(r"<.*?>", " ", text)
# text = re.sub(r"[^A-Za-z0-9(),!?\'`]", " ", text)
# text = re.sub(r"\s{2,}", " ", text)
# return text.strip().lower() | [
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
2,
1330,
28034,
5239,
198,
6738,
28034,
5239,
13,
18893,
397,
1330,
569,
478,
669,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
... | 2.372093 | 172 |
import unittest
import fix15
import io
import unittest.mock
import fix15.__main__
test_file = \
"""id,firstname,lastname,accountid
00300000053jhOc,john,smith,00100006gG70oPa
003000A694fjJ21,allison,brown,001000000043463
0030000bB09fQt9,thomas,tomlinson,001000004FfoA00
003000000044000,hannah,anderson,001000000000001
003000000npQ9vB,sarah,white,00100006gG70oPa"""
test_file_converted = \
"""id,firstname,lastname,accountid
00300000053jhOcAAI,john,smith,00100006gG70oPaAQI
003000A694fjJ21ACE,allison,brown,001000000043463AAA
0030000bB09fQt9AIE,thomas,tomlinson,001000004FfoA00AQE
003000000044000AAA,hannah,anderson,001000000000001AAA
003000000npQ9vBAAS,sarah,white,00100006gG70oPaAQI"""
if __name__ == '__main__':
unittest.main() | [
11748,
555,
715,
395,
198,
11748,
4259,
1314,
198,
11748,
33245,
198,
11748,
555,
715,
395,
13,
76,
735,
198,
11748,
4259,
1314,
13,
834,
12417,
834,
198,
198,
9288,
62,
7753,
796,
3467,
198,
37811,
312,
11,
11085,
3672,
11,
12957,
... | 2.333333 | 315 |
"""
This name generator is an improved version of das' random syllable-based
name generator.
Original source at: https://codereview.stackexchange.com/q/156903
Improved by: Gustavo R. Rehermann (Gustavo6046)
"""
import random
vowels = 'aeiou'
consonants = 'bcdfghjklmnpqrstvwxyz'
pre_consonants = 'tspdkcmnlxrg'
post_consonants = 'rhpzkltg'
triple_consonants = ['str', 'spl', 'xpl']
ditongs = ["ae", "ai", "ou", "ao", "oe", "oi", "oy", "aeo", "eio", "ee", "oo"] | [
37811,
198,
1212,
1438,
17301,
318,
281,
6596,
2196,
286,
288,
292,
6,
4738,
27226,
540,
12,
3106,
198,
3672,
17301,
13,
198,
198,
20556,
2723,
379,
25,
3740,
1378,
19815,
567,
1177,
13,
301,
330,
365,
87,
3803,
13,
785,
14,
80,
1... | 2.398964 | 193 |
import pytest
from streamsets.testframework.decorators import stub
@stub
@pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'USER_PASS'}])
@stub
@pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'LDAP'},
{'authentication_type': 'NONE'},
{'authentication_type': 'USER_PASS'}])
@stub
@stub
@stub
@stub
@stub
@pytest.mark.parametrize('stage_attributes', [{'cursor_finalizer_enabled': False}, {'cursor_finalizer_enabled': True}])
@stub
@stub
@stub
@stub
@stub
@stub
@stub
@stub
@stub
@stub
@stub
@pytest.mark.parametrize('stage_attributes', [{'on_record_error': 'DISCARD'},
{'on_record_error': 'STOP_PIPELINE'},
{'on_record_error': 'TO_ERROR'}])
@stub
@pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'LDAP'}, {'authentication_type': 'USER_PASS'}])
@stub
@stub
@stub
@stub
@stub
@pytest.mark.parametrize('stage_attributes', [{'socket_keep_alive': False}, {'socket_keep_alive': True}])
@stub
@stub
@pytest.mark.parametrize('stage_attributes', [{'ssl_enabled': False}, {'ssl_enabled': True}])
@stub
@pytest.mark.parametrize('stage_attributes', [{'ssl_invalid_host_name_allowed': False},
{'ssl_invalid_host_name_allowed': True}])
@stub
@stub
@stub
@pytest.mark.parametrize('stage_attributes', [{'upsert': False}, {'upsert': True}])
@stub
@pytest.mark.parametrize('stage_attributes', [{'authentication_type': 'LDAP'}, {'authentication_type': 'USER_PASS'}])
@stub
@pytest.mark.parametrize('stage_attributes', [{'write_concern': 'ACKNOWLEDGED'},
{'write_concern': 'FSYNCED'},
{'write_concern': 'FSYNC_SAFE'},
{'write_concern': 'JOURNALED'},
{'write_concern': 'JOURNAL_SAFE'},
{'write_concern': 'MAJORITY'},
{'write_concern': 'NORMAL'},
{'write_concern': 'REPLICAS_SAFE'},
{'write_concern': 'REPLICA_ACKNOWLEDGED'},
{'write_concern': 'SAFE'},
{'write_concern': 'UNACKNOWLEDGED'}])
| [
11748,
12972,
9288,
198,
198,
6738,
15190,
1039,
13,
9288,
30604,
13,
12501,
273,
2024,
1330,
17071,
628,
198,
31,
301,
549,
198,
31,
9078,
9288,
13,
4102,
13,
17143,
316,
380,
2736,
10786,
14247,
62,
1078,
7657,
3256,
685,
90,
6,
4... | 1.729766 | 1,495 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-03-05 16:59
from __future__ import unicode_literals
from django.db import migrations, models
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
319,
2177,
12,
3070,
12,
2713,
1467,
25,
3270,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,... | 2.767857 | 56 |
'''
Class for evaluating trained Classifier model
'''
from sklearn.metrics import accuracy_score
func_map = {
'accuracy_score': accuracy_score
}
class ModelEvaluator:
'''
Class for evaluating trained Classifier model
'''
def load_input(self, trained_classifier):
'''
Handles loading of trained classifier
Input:
trained_classifier: Classifier instance
Returns: nothing
'''
print('ModelEvaluator loading with ', type(trained_classifier))
self.input = trained_classifier
def configure(self, params):
'''
Configures metrics used in evaluation type
Input:
params: {'metrics': ['accuracy_score']}
Returns: nothing
'''
self.config = params
def execute(self):
'''
Pipeline execution method. Kicks off evaluation process
Input: none
Returns: tuple containing trained model and metric dict
'''
print('ModelEvaluator execution called')
metrics = {}
for metric in self.config['metrics']:
metrics[metric] = func_map[metric](self.input.y_train,
self.input.y_hats)
self.output = (self.input.trained_model, metrics)
return self.output
| [
198,
7061,
6,
198,
9487,
329,
22232,
8776,
5016,
7483,
2746,
198,
7061,
6,
198,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
9922,
62,
26675,
198,
198,
20786,
62,
8899,
796,
1391,
198,
220,
220,
220,
705,
4134,
23843,
62,
26675,
1... | 2.325744 | 571 |