hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
690c0c0baa72e71fb3302292e6f136edd4cafd27 | 3,594 | py | Python | deep_rl/utils/plot.py | JACKHAHA363/DeepRL | 5e91086c17fd6de85f4d53873fab17e049dd5df5 | [
"Apache-2.0"
] | null | null | null | deep_rl/utils/plot.py | JACKHAHA363/DeepRL | 5e91086c17fd6de85f4d53873fab17e049dd5df5 | [
"Apache-2.0"
] | null | null | null | deep_rl/utils/plot.py | JACKHAHA363/DeepRL | 5e91086c17fd6de85f4d53873fab17e049dd5df5 | [
"Apache-2.0"
] | null | null | null | # Adapted from https://github.com/openai/baselines/blob/master/baselines/results_plotter.py
from ..component.bench import load_monitor_log
import numpy as np
import os
import re
class Plotter:
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue']
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
def __init__(self):
pass
def rolling_window(self, a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(self, x, y, window, func):
yw = self.rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window - 1:], yw_func
def ts2xy(self, ts, xaxis):
if xaxis == Plotter.X_TIMESTEPS:
x = np.cumsum(ts.l.values)
y = ts.r.values
elif xaxis == Plotter.X_EPISODES:
x = np.arange(len(ts))
y = ts.r.values
elif xaxis == Plotter.X_WALLTIME:
x = ts.t.values / 3600.
y = ts.r.values
else:
raise NotImplementedError
return x, y
def load_results(self, dirs, max_timesteps=1e8, x_axis=X_TIMESTEPS, episode_window=100):
tslist = []
for dir in dirs:
ts = load_monitor_log(dir)
ts = ts[ts.l.cumsum() <= max_timesteps]
tslist.append(ts)
xy_list = [self.ts2xy(ts, x_axis) for ts in tslist]
if episode_window:
xy_list = [self.window_func(x, y, episode_window, np.mean) for x, y in xy_list]
return xy_list
def average(self, xy_list, bin, max_timesteps, top_k=0):
if top_k:
perf = [np.max(y) for _, y in xy_list]
top_k_runs = np.argsort(perf)[-top_k:]
new_xy_list = []
for r, (x, y) in enumerate(xy_list):
if r in top_k_runs:
new_xy_list.append((x, y))
xy_list = new_xy_list
new_x = np.arange(0, max_timesteps, bin)
new_y = []
for x, y in xy_list:
new_y.append(np.interp(new_x, x, y))
return new_x, np.asarray(new_y)
def plot_results(self, dirs, max_timesteps=1e8, x_axis=X_TIMESTEPS, episode_window=100, title=None):
import matplotlib.pyplot as plt
plt.ticklabel_format(axis='x', style='sci', scilimits=(1, 1))
xy_list = self.load_results(dirs, max_timesteps, x_axis, episode_window)
for (i, (x, y)) in enumerate(xy_list):
color = Plotter.COLORS[i]
plt.plot(x, y, color=color)
plt.xlabel(x_axis)
plt.ylabel("Episode Rewards")
if title is not None:
plt.title(title)
def load_log_dirs(self, pattern, negative_pattern=' ', root='./log', **kwargs):
dirs = [item[0] for item in os.walk(root)]
leaf_dirs = []
for i in range(len(dirs)):
if i + 1 < len(dirs) and dirs[i + 1].startswith(dirs[i]):
continue
leaf_dirs.append(dirs[i])
names = []
p = re.compile(pattern)
np = re.compile(negative_pattern)
for dir in leaf_dirs:
if p.match(dir) and not np.match(dir):
names.append(dir)
print(dir)
return sorted(names)
| 36.673469 | 104 | 0.569004 |
7b87bea272c714bc08d65bbd54ca8f0cafb83010 | 722 | py | Python | services/ec2/sample.py | rolandovillca/aws_samples_boto3_sd | 92cb404b96e252c15547593e273e0ab15aa74e14 | [
"MIT"
] | null | null | null | services/ec2/sample.py | rolandovillca/aws_samples_boto3_sd | 92cb404b96e252c15547593e273e0ab15aa74e14 | [
"MIT"
] | null | null | null | services/ec2/sample.py | rolandovillca/aws_samples_boto3_sd | 92cb404b96e252c15547593e273e0ab15aa74e14 | [
"MIT"
] | null | null | null | '''
====================
Boto 3 - EC2 Example
====================
This application implements the EC2 service that lets you gets
information from Amazon EC2. See the README for more details.
'''
import boto3
import json
config = json.loads(open('config/defaults.json').read())
credentials = config['credentials']
AWS_ACCESS_KEY_ID = credentials['aws_access_key_id']
AWS_SECRET_ACCESS_KEY = credentials['aws_secret_access_key']
REGION_NAME = 'us-west-1'
ec2 = boto3.client('ec2',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
region_name=REGION_NAME)
print ec2.describe_regions()
print
print ec2.describe_instances()
print | 27.769231 | 64 | 0.691136 |
a465135aefbe385160c9287bd595183c71c2e124 | 4,826 | py | Python | tuun/probo/models/regression/transfer_regression.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 33 | 2020-08-30T16:22:35.000Z | 2022-02-26T13:48:32.000Z | tuun/probo/models/regression/transfer_regression.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 2 | 2021-01-18T19:46:43.000Z | 2021-03-24T09:59:14.000Z | tuun/probo/models/regression/transfer_regression.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 2 | 2020-08-25T17:02:15.000Z | 2021-04-21T16:40:44.000Z | import os
import shutil
import tempfile
import joblib
import tarfile
from urllib.request import urlopen
from pathlib import Path
import numpy as np
ENV_TUUN_CACHE = 'TUUN_CACHE'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
def get_tuun_cache_dir():
"""Return default cache directory."""
cache_dir = os.path.expanduser(
os.getenv(
ENV_TUUN_CACHE,
os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'tuun'),
)
)
return cache_dir
def listdir_nohidden(path):
"""
Returns list of non-hidden entries in path directory, similar to os.listdir.
"""
entry_list = [entry for entry in os.listdir(path) if not entry.startswith('.')]
return entry_list
class TransferRegression:
def __init__(self, transfer_config, use_cached=True):
"""
Generic regression model class for predicting the prior mean for a GP model
Parameters
----------
transfer_config : dict
Config dictionary for tranfer regression model.
use_cached : bool
Flag to use cached models.
"""
self.task_name = transfer_config['task_name']
self.model_types = {'sklearn.linear_model.Ridge'}
self.best_model = None
self.model_type = transfer_config['model_type']
self.output_mode = transfer_config.get('output_mode', 'val_accuracy')
assert self.output_mode in ['val_accuracy', 'val_error']
self.cache_dir = get_tuun_cache_dir()
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
self.model_dir = os.path.join(self.cache_dir, self.task_name)
if not os.path.exists(self.model_dir) or not use_cached:
Path(self.model_dir).mkdir(parents=True, exist_ok=True)
tmp_file = tempfile.NamedTemporaryFile()
if 'remote_url' in transfer_config:
# Download file to a temp dir to avoid overwriting
# with corrupted file in case dowload fails
with urlopen(transfer_config['remote_url']) as response:
shutil.copyfileobj(response, tmp_file, length=8192)
filename = tmp_file.name
else:
filename = os.path.expanduser(transfer_config['local_path'])
tarball = tarfile.open(filename)
tarball.extractall(self.model_dir)
tarball.close()
tmp_file.close()
# Get all non-hidden entries in self.model_dir
self.model_fnames = listdir_nohidden(self.model_dir)
# TODO: if extra files are in model_dir (e.g. accidentally/purposefully placed
# by user, or automatically generated by OS), it breaks the following logic.
if len(self.model_fnames) == 1:
if not os.path.isfile(os.path.join(self.model_dir, self.model_fnames[0])):
self.model_dir = os.path.join(self.model_dir, self.model_fnames[0])
# Get all non-hidden entries in self.model_dir
self.model_fnames = listdir_nohidden(self.model_dir)
def get_model_prediction(self, model, X):
"""Return model prediction, based on self.model_type and config."""
# Support custom regression models
if 'sklearn' in self.model_type:
# sklearn requires 2d numpy ndarray
X = np.array(X)
if len(X.shape) == 1:
X = X.reshape(1, -1)
# Make prediction
prediction = model.predict(X)
else:
# Otherwise, assume model is callable
prediction = model(X)
# If val_error mode, return 1 - validation accuracy prediction
if self.output_mode == 'val_error':
prediction = 1 - prediction
return prediction
def evaluate_model(self, model_name, X):
model_path = os.path.join(self.model_dir, model_name)
model = self.load_model(model_path)
prediction = self.get_model_prediction(model, X)
return prediction
def set_best_model(self, file_ind):
if file_ind == -1:
self.best_model = None
else:
model_name = self.model_fnames[file_ind]
model_path = os.path.join(self.model_dir, model_name)
self.best_model = self.load_model(model_path)
def load_model(self, model_path):
# TODO: Support custom regression models
if 'sklearn' in self.model_type:
model = joblib.load(model_path)
return model
else:
raise NotImplementedError
def __call__(self, X):
if self.best_model is None:
raise ValueError("Regression model not assigned")
prediction = self.get_model_prediction(self.best_model, X)
return prediction
| 34.971014 | 86 | 0.631372 |
3217b00d6a8c355ea2b2ea7926ccd4a29f1b0506 | 603 | py | Python | h/migrations/versions/1a40e75a524d_add_normalised_username_index.py | y3g0r/h | a057144956fe25e669aeba5d0f0eb38f9dc09566 | [
"BSD-2-Clause"
] | null | null | null | h/migrations/versions/1a40e75a524d_add_normalised_username_index.py | y3g0r/h | a057144956fe25e669aeba5d0f0eb38f9dc09566 | [
"BSD-2-Clause"
] | null | null | null | h/migrations/versions/1a40e75a524d_add_normalised_username_index.py | y3g0r/h | a057144956fe25e669aeba5d0f0eb38f9dc09566 | [
"BSD-2-Clause"
] | null | null | null | """
Add normalised username index
Revision ID: 1a40e75a524d
Revises: 02db2fa6ea98
Create Date: 2017-03-02 13:55:24.290975
"""
from alembic import op
import sqlalchemy as sa
revision = "1a40e75a524d"
down_revision = "02db2fa6ea98"
def upgrade():
# Creating an index concurrently does not work inside a transaction
op.execute("COMMIT")
op.create_index(
op.f("ix__user__userid"),
"user",
[sa.text("lower(replace(username, '.', ''))"), "authority"],
postgresql_concurrently=True,
)
def downgrade():
op.drop_index(op.f("ix__user__userid"), "user")
| 20.1 | 71 | 0.676617 |
245e39c83b02406f6ee86409a4188b30420705ac | 1,132 | py | Python | idt_oauth2/clients/slack.py | idonethis/idt_oauth2 | 1df13ff424a86e855663bd0d30a9a2679597db88 | [
"MIT"
] | null | null | null | idt_oauth2/clients/slack.py | idonethis/idt_oauth2 | 1df13ff424a86e855663bd0d30a9a2679597db88 | [
"MIT"
] | null | null | null | idt_oauth2/clients/slack.py | idonethis/idt_oauth2 | 1df13ff424a86e855663bd0d30a9a2679597db88 | [
"MIT"
] | null | null | null | import logging
import requests
from django.conf import settings
OAUTH2_AUTHORIZE_URL = '%s/oauth/authorize' % settings.SLACK_DOMAIN
OAUTH2_TOKEN_EXCHANGE_URL = '%s/api/oauth.access' % settings.SLACK_DOMAIN
def get_authorize_config(redirect_uri):
service_url = OAUTH2_AUTHORIZE_URL
query_params = {
'client_id': settings.SLACK_OAUTH2_CLIENT_ID,
'scope': 'read,post,identify',
}
return (service_url, query_params)
def token_exchange(code, redirect_uri):
# See: https://api.slack.com/docs/oauth and https://api.slack.com/methods/oauth.access
post_data = {
'client_id': settings.SLACK_OAUTH2_CLIENT_ID,
'client_secret': settings.SLACK_OAUTH2_CLIENT_SECRET,
'code': code,
'redirect_uri': redirect_uri,
}
logging.info(
'slack oauth2 token exchange post: %s',
post_data)
response = requests.post(
OAUTH2_TOKEN_EXCHANGE_URL, data=post_data)
logging.info(
'slack oauth2 token exchange response status code: %s, content: %s',
response.status_code, response.json())
return response.json()
| 31.444444 | 90 | 0.689046 |
b8206d5bb41899ed9ef9d0c87a325410f03499c6 | 445 | py | Python | knx_stack/definition/knxnet_ip/core/connect/req.py | majamassarini/knx-stack | 11a9baac6b7600649b5fbca43c93b200b23676b4 | [
"MIT"
] | 2 | 2021-07-28T07:42:28.000Z | 2022-01-25T18:56:05.000Z | knx_stack/definition/knxnet_ip/core/connect/req.py | majamassarini/knx-stack | 11a9baac6b7600649b5fbca43c93b200b23676b4 | [
"MIT"
] | 6 | 2021-07-25T21:36:01.000Z | 2022-02-20T21:11:31.000Z | knx_stack/definition/knxnet_ip/core/connect/req.py | majamassarini/knx-stack | 11a9baac6b7600649b5fbca43c93b200b23676b4 | [
"MIT"
] | null | null | null | from typing import NamedTuple
class Msg(NamedTuple):
addr_control_endpoint: str
port_control_endpoint: int
addr_data_endpoint: str
port_data_endpoint: int
def __repr__(self):
return "ConnectReq (control endpoint = {}:{} data endpoint {}:{})".format(
self.addr_control_endpoint,
self.port_control_endpoint,
self.addr_data_endpoint,
self.port_data_endpoint,
)
| 26.176471 | 82 | 0.65618 |
3f0b7326cc64ecfb87374c43ab65ea8946843669 | 12,326 | py | Python | userbot/plugins/user_info.py | gamerfuckerofficial/userbot | 887332ab492a2deb6152257f0c169a895234eb7b | [
"MIT"
] | 3 | 2020-09-04T09:34:51.000Z | 2020-09-04T09:39:26.000Z | userbot/plugins/user_info.py | gamerfuckerofficial/userbot | 887332ab492a2deb6152257f0c169a895234eb7b | [
"MIT"
] | null | null | null | userbot/plugins/user_info.py | gamerfuckerofficial/userbot | 887332ab492a2deb6152257f0c169a895234eb7b | [
"MIT"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for getting info
about any user on Telegram(including you!). """
from telethon.events import NewMessage
from typing import Union
from userbot import CMD_HELP
from userbot.events import register
from re import findall, match
from typing import List
from telethon.tl.custom import Message
from telethon.tl.functions.channels import GetFullChannelRequest
from telethon.tl.functions.messages import GetFullChatRequest
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import (
MessageEntityMentionName,
ChannelParticipantsAdmins,
ChannelParticipantsBots,
MessageEntityMention,
InputPeerChannel,
InputPeerChat)
def parse_arguments(message: str, valid: List[str]) -> (dict, str):
options = {}
# Handle boolean values
for opt in findall(r'([.!]\S+)', message):
if opt[1:] in valid:
if opt[0] == '.':
options[opt[1:]] = True
elif opt[0] == '!':
options[opt[1:]] = False
message = message.replace(opt, '')
# Handle key/value pairs
for opt in findall(r'(\S+):(?:"([\S\s]+)"|(\S+))', message):
key, val1, val2 = opt
value = val2 or val1[1:-1]
if key in valid:
if value.isnumeric():
value = int(value)
elif match(r'[Tt]rue|[Ff]alse', value):
match(r'[Tt]rue', value)
options[key] = value
message = message.replace(f"{key}:{value}", '')
return options, message.strip()
def freeze(d):
if isinstance(d, dict):
return frozenset((key, freeze(value)) for key, value in d.items())
elif isinstance(d, list):
return tuple(freeze(value) for value in d)
return d
def extract_urls(message):
matches = findall(r'(https?://\S+)', str(message))
return list(matches)
async def get_user_from_id(user, event):
if isinstance(user, str):
user = int(user)
try:
user_obj = await event.client.get_entity(user)
except (TypeError, ValueError) as err:
await event.edit(str(err))
return None
return user_obj
async def get_user_from_event(event: NewMessage.Event, **kwargs):
""" Get the user from argument or replied message. """
reply_msg: Message = await event.get_reply_message()
user = kwargs.get('user', None)
if user:
# First check for a user id
if user.isnumeric():
user = int(user)
# Then check for a user mention (@username)
elif event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity,
MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
replied_user = await event.client(GetFullUserRequest(user_id))
return replied_user
try:
user_object = await event.client.get_entity(user)
replied_user = await event.client(
GetFullUserRequest(user_object.id))
except (TypeError, ValueError) as err:
return None
# Check for a forwarded message
elif (reply_msg and
reply_msg.forward and
reply_msg.forward.sender_id and
kwargs['forward']):
forward = reply_msg.forward
replied_user = await event.client(GetFullUserRequest(forward.sender_id))
# Check for a replied to message
elif event.reply_to_msg_id:
previous_message = await event.get_reply_message()
replied_user = await event.client(GetFullUserRequest(previous_message.from_id))
# Last case scenario is to get the current user
else:
self_user = await event.client.get_me()
replied_user = await event.client(GetFullUserRequest(self_user.id))
return replied_user
async def get_chat_from_event(event: NewMessage.Event, **kwargs):
reply_msg: Message = await event.get_reply_message()
chat = kwargs.get('chat', None)
if chat:
try:
input_entity = await event.client.get_input_entity(chat)
if isinstance(input_entity, InputPeerChannel):
return await event.client(GetFullChannelRequest(input_entity.channel_id))
elif isinstance(input_entity, InputPeerChat):
return await event.client(GetFullChatRequest(input_entity.chat_id))
else:
return None
except(TypeError, ValueError):
return None
# elif reply_msg and reply_msg.forward:
# return None
else:
chat = await event.get_chat()
return await event.client(GetFullChannelRequest(chat.id))
async def list_admins(event):
adms = await event.client.get_participants(event.chat, filter=ChannelParticipantsAdmins)
adms = map(lambda x: x if not x.bot else None, adms)
adms = [i for i in list(adms) if i]
return adms
async def list_bots(event):
bots = await event.client.get_participants(event.chat, filter=ChannelParticipantsBots)
return bots
def make_mention(user):
if user.username:
return f"@{user.username}"
else:
return inline_mention(user)
def inline_mention(user):
full_name = user_full_name(user) or "No Name"
return f"[{full_name}](tg://user?id={user.id})"
def user_full_name(user):
names = [user.first_name, user.last_name]
names = [i for i in list(names) if i]
full_name = ' '.join(names)
return full_name
class FormattedBase:
text: str
def __add__(self, other: Union[str, 'FormattedBase']) -> str:
return str(self) + str(other)
def __repr__(self) -> str:
return f'{type(self).__name__}({self.text})'
def __str__(self) -> str:
return self.text
class String(FormattedBase):
def __init__(self, text: Union[str, int]) -> None:
self.text = str(text)
class Bold(FormattedBase):
def __init__(self, text: Union[str, int]) -> None:
self.text = f'**{text}**'
class Italic(FormattedBase):
def __init__(self, text: Union[str, int]) -> None:
self.text = f'__{text}__'
class Code(FormattedBase):
def __init__(self, text: Union[str, int]) -> None:
self.text = f'`{text}`'
class Pre(FormattedBase):
def __init__(self, text: Union[str, int]) -> None:
self.text = f'```{text}```'
class Link(FormattedBase):
def __init__(self, label: String, url: str) -> None:
self.text = f'[{label}]({url})'
class Mention(Link):
def __init__(self, label: String, uid: int):
super().__init__(label, f'tg://user?id={uid}')
class KeyValueItem(FormattedBase):
def __init__(self, key: Union[str, FormattedBase],
value: Union[str, FormattedBase]) -> None:
self.key = key
self.value = value
self.text = f'{key}: {value}'
class Item(FormattedBase):
def __init__(self, text: Union[str, int]) -> None:
self.text = str(text)
class Section:
def __init__(self,
*args: Union[String,
'FormattedBase'],
spacing: int = 1,
indent: int = 4) -> None:
self.header = args[0]
self.items = list(args[1:])
self.indent = indent
self.spacing = spacing
def __add__(self, other: Union[String, 'FormattedBase']) -> str:
return str(self) + '\n\n' + str(other)
def __str__(self) -> str:
return ('\n' *
self.spacing).join([str(self.header)] +
[' ' *
self.indent +
str(item) for item in self.items if item is not None])
class SubSection(Section):
def __init__(self,
*args: Union[String,
'SubSubSection'],
indent: int = 8) -> None:
super().__init__(*args, indent=indent)
class SubSubSection(SubSection):
def __init__(self, *args: String, indent: int = 12) -> None:
super().__init__(*args, indent=indent)
class TGDoc:
def __init__(self, *args: Union[String, 'Section']) -> None:
self.sections = args
def __str__(self) -> str:
return '\n\n'.join([str(section) for section in self.sections])
@register(pattern=r"^\.u(?:ser)?(\s+[\S\s]+|$)", outgoing=True)
async def who(event: NewMessage.Event):
""" For .user command, get info about a user. """
if event.fwd_from:
return
args, user = parse_arguments(event.pattern_match.group(1), [
'id', 'forward', 'general', 'bot', 'misc', 'all', 'mention'
])
args['forward'] = args.get('forward', True)
args['user'] = user
replied_user = await get_user_from_event(event, **args)
if not replied_user:
await event.edit("**Failed to get information for user**")
return
user_info = await fetch_info(replied_user, **args)
message_id_to_reply = event.message.reply_to_msg_id
if not message_id_to_reply:
pass
await event.edit(str(user_info), parse_mode="markdown")
async def fetch_info(replied_user, **kwargs):
""" Get details from the User object. """
user = replied_user.user
id_only = kwargs.get('id', False)
show_general = kwargs.get('general', True)
show_bot = kwargs.get('bot', False)
show_misc = kwargs.get('misc', False)
show_all = kwargs.get('all', False)
mention_name = kwargs.get('mention', False)
if show_all:
show_general = True
show_bot = True
show_misc = True
full_name = str(user.first_name + ' ' + (user.last_name or ''))
if mention_name:
title = Link(full_name, f'tg://user?id={user.id}')
else:
title = Bold(full_name)
if id_only:
return KeyValueItem(title, Code(user.id))
general = SubSection(
Bold('general'), KeyValueItem(
'id', Code(
user.id)), KeyValueItem(
'first_name', Code(
user.first_name)), KeyValueItem(
'last_name', Code(
user.last_name)), KeyValueItem(
'username', Code(
user.username)), KeyValueItem(
'mutual_contact', Code(
user.mutual_contact)), KeyValueItem(
'common groups', Code(
replied_user.common_chats_count)))
bot = SubSection(Bold('bot'),
KeyValueItem('bot', Code(user.bot)),
KeyValueItem('bot_chat_history', Code(user.bot_chat_history)),
KeyValueItem('bot_info_version', Code(user.bot_info_version)),
KeyValueItem('bot_inline_geo', Code(user.bot_inline_geo)),
KeyValueItem('bot_inline_placeholder',
Code(user.bot_inline_placeholder)),
KeyValueItem('bot_nochats', Code(user.bot_nochats)))
misc = SubSection(
Bold('misc'), KeyValueItem(
'restricted', Code(
user.restricted)), KeyValueItem(
'restriction_reason', Code(
user.restriction_reason)), KeyValueItem(
'deleted', Code(
user.deleted)), KeyValueItem(
'verified', Code(
user.verified)), KeyValueItem(
'min', Code(
user.min)), KeyValueItem(
'lang_code', Code(
user.lang_code)))
return Section(title,
general if show_general else None,
misc if show_misc else None,
bot if show_bot else None)
CMD_HELP.update({
"android":
"`.u(ser) [options] (username|id)`"
"Or, in response to a message"
"`.u(ser) [options]`"
"Options:"
"`.id`: Show only the user's ID"
"`.general`: Show general user info"
"`.bot`: Show bot related info"
"`.misc`: Show miscelanious info"
"`.all`: Show all info (overrides other options)"
"`.mention`: Inline mention the user"
"`.forward`: Follow forwarded message"
})
| 29.417661 | 92 | 0.601331 |
6df3b8b6c75246abe657f148c97f1d731ac74fa3 | 848 | py | Python | examples/example.py | pgraafstra/pastas | c065059e1df5b6c8e4afeb5278de2ef70fdf726c | [
"MIT"
] | null | null | null | examples/example.py | pgraafstra/pastas | c065059e1df5b6c8e4afeb5278de2ef70fdf726c | [
"MIT"
] | null | null | null | examples/example.py | pgraafstra/pastas | c065059e1df5b6c8e4afeb5278de2ef70fdf726c | [
"MIT"
] | null | null | null | """
This test file is meant for developing purposes. Providing an easy method to
test the functioning of PASTAS during development.
"""
import pandas as pd
import pastas as ps
ps.set_log_level("ERROR")
# read observations and create the time series model
obs = pd.read_csv("data/head_nb1.csv", index_col=0, parse_dates=True,
squeeze=True)
# Create the time series model
ml = ps.Model(obs, name="head")
# read weather data
rain = pd.read_csv("data/rain_nb1.csv", index_col=0, parse_dates=True,
squeeze=True)
evap = pd.read_csv("data/evap_nb1.csv", index_col=0, parse_dates=True,
squeeze=True)
# Create stress
sm = ps.RechargeModel(prec=rain, evap=evap, rfunc=ps.Exponential,
recharge="Linear", name='recharge')
ml.add_stressmodel(sm)
# Solve
ml.solve()
ml.plot()
| 25.69697 | 76 | 0.683962 |
b858eee43e266d8c44c741a7f664b31d1f3fe383 | 4,858 | py | Python | tests/test_update.py | timgates42/blitzdb | 36191579be14fbc2d7a47ede099bcdf31297a9fa | [
"MIT"
] | 252 | 2015-01-02T13:05:12.000Z | 2021-12-29T13:36:47.000Z | tests/test_update.py | epatters/blitzdb | 4b459e0bcde9e1f6224dd4e3bea74194586864b0 | [
"MIT"
] | 33 | 2015-01-09T20:05:10.000Z | 2019-11-08T15:48:34.000Z | tests/test_update.py | epatters/blitzdb | 4b459e0bcde9e1f6224dd4e3bea74194586864b0 | [
"MIT"
] | 39 | 2015-01-20T01:15:04.000Z | 2022-03-26T01:01:15.000Z | from __future__ import absolute_import
import pytest
from blitzdb.backends.file import Backend as FileBackend
from .helpers.movie_data import Actor
def test_update_by_list(backend):
if isinstance(backend,FileBackend):
return
actor = Actor({'name': 'Robert de Niro', 'age': 54})
backend.save(actor)
backend.commit()
assert len(backend.filter(Actor, {'name': 'Robert de Niro'})) == 1
actor.name = 'Patrick Stewart'
actor.age = 50
backend.update(actor, ('name',))
backend.commit()
assert len(backend.filter(Actor, {'name': 'Robert de Niro'})) == 0
assert len(backend.filter(Actor, {'name': 'Patrick Stewart'})) == 1
#we did not update the age field...
assert backend.get(Actor, {'name': 'Patrick Stewart'}).age == 54
def test_update_non_indexed_field(backend):
if isinstance(backend,FileBackend):
return
actor = Actor({'name': 'Robert de Niro', 'age': 54})
backend.save(actor)
backend.commit()
assert len(backend.filter(Actor, {'name': 'Robert de Niro'})) == 1
actor.name = 'Patrick Stewart'
actor.age = 50
backend.update(actor, ('name','age'))
backend.commit()
assert len(backend.filter(Actor, {'name': 'Robert de Niro'})) == 0
assert len(backend.filter(Actor, {'name': 'Patrick Stewart'})) == 1
assert backend.get(Actor, {'name': 'Patrick Stewart'}).age == 50
def test_multiple_updates(backend):
if isinstance(backend,FileBackend):
return
actor = Actor({'name': 'Robert de Niro', 'age': 54})
backend.save(actor)
backend.commit()
assert len(backend.filter(Actor, {'name': 'Robert de Niro'})) == 1
actor.name = 'Patrick Stewart'
actor.age = 50
backend.update(actor, ('name',))
backend.update(actor, ('age',))
backend.commit()
assert len(backend.filter(Actor, {'name': 'Robert de Niro'})) == 0
assert len(backend.filter(Actor, {'name': 'Patrick Stewart'})) == 1
assert backend.get(Actor, {'name' : 'Patrick Stewart'}).age == 50
def test_update_on_deleted_document_fails(backend):
if isinstance(backend,FileBackend):
return
actor = Actor({'name': 'Robert de Niro', 'age': 54})
backend.save(actor)
backend.commit()
assert len(backend.filter(Actor, {'name': 'Robert de Niro'})) == 1
backend.delete(actor)
actor.name = 'Patrick Stewart'
actor.age = 50
with pytest.raises(actor.DoesNotExist):
backend.update(actor, ('name',))
def test_update_with_dict(backend):
if isinstance(backend,FileBackend):
return
actor = Actor({'name': 'Robert de Niro', 'age': 54})
backend.save(actor)
backend.commit()
assert len(backend.filter(Actor, {'name': 'Robert de Niro'})) == 1
backend.update(actor, {'name': 'Ian McKellan'})
backend.commit()
assert len(backend.filter(Actor, {'name': 'Ian McKellan'})) == 1
assert actor.name == 'Ian McKellan'
backend.update(actor, {'name': 'Roger Moore'}, update_obj=False)
backend.commit()
assert len(backend.filter(Actor, {'name': 'Roger Moore'})) == 1
assert actor.name == 'Ian McKellan'
def test_update_unset(backend):
if isinstance(backend,FileBackend):
return
actor = Actor({'name': 'Robert de Niro', 'age': 54})
backend.save(actor)
backend.commit()
assert len(backend.filter(Actor, {'name': 'Robert de Niro'})) == 1
backend.update(actor, unset_fields=['name'])
backend.commit()
assert len(backend.filter(Actor, {'name': 'Ian McKellan'})) == 0
recovered_actor = backend.get(Actor, {'pk': actor.pk})
assert recovered_actor.get('name') is None
def test_update_set_then_unset(backend):
if isinstance(backend,FileBackend):
return
actor = Actor({'name': 'Robert de Niro', 'age': 54})
backend.save(actor)
backend.commit()
assert len(backend.filter(Actor, {'name': 'Robert de Niro'})) == 1
backend.update(actor, set_fields={'name': 'Patrick Stewart'})
backend.update(actor, unset_fields=['name'])
backend.commit()
assert len(backend.filter(Actor, {'name': 'Patrick Stewart'})) == 0
recovered_actor = backend.get(Actor, {'pk': actor.pk})
assert recovered_actor.get('name') is None
def test_update_unset_then_set(backend):
if isinstance(backend,FileBackend):
return
actor = Actor({'name': 'Robert de Niro', 'age': 54})
backend.save(actor)
backend.commit()
assert len(backend.filter(Actor, {'name': 'Robert de Niro'})) == 1
backend.update(actor, unset_fields=['name'])
backend.update(actor, set_fields={'name': 'Patrick Stewart'})
backend.commit()
assert len(backend.filter(Actor, {'name': 'Patrick Stewart'})) == 1
recovered_actor = backend.get(Actor, {'pk': actor.pk})
assert recovered_actor.name == 'Patrick Stewart'
| 23.582524 | 71 | 0.64224 |
0d1e849673ca2ac8ab79f66f816f4f88131662f5 | 908 | py | Python | ch03/q3-02.py | iamnicoj/ctci | f71f995cb3d3257d3d58f1f167fcab8eaf84d457 | [
"MIT"
] | null | null | null | ch03/q3-02.py | iamnicoj/ctci | f71f995cb3d3257d3d58f1f167fcab8eaf84d457 | [
"MIT"
] | 3 | 2021-03-19T14:35:27.000Z | 2021-03-20T16:12:34.000Z | ch03/q3-02.py | iamnicoj/ctci | f71f995cb3d3257d3d58f1f167fcab8eaf84d457 | [
"MIT"
] | null | null | null | from stack import stack
class min_stack(stack):
def __init__(self):
super().__init__()
self.minima = stack() # This could've been a stack itself
def min(self):
if len(self.list) > 0:
return self.list[self.minima.peek()]
return None
def push(self, item):
super().push(item)
if len(self.minima) == 0:
self.minima.push(0)
return
minvalue = self.min()
if item < minvalue:
self.minima.push(len(self.minima))
else:
self.minima.push(self.minima.peek())
def pop(self):
self.minima.pop()
return super().pop()
#############################
mystack = min_stack()
mystack.push(2)
mystack.push(1)
mystack.push(33)
mystack.push(5)
mystack.push(0)
print(mystack)
print(mystack.pop())
print(mystack)
print(mystack.min())
| 19.73913 | 68 | 0.54185 |
803e294015eca47af25e6366107a407ac778e8ca | 3,903 | py | Python | ui/dialogs/ui_addseq.py | dongniu/cadnano2 | 6805fe2af856c59b06373c0ee0142ad6bc286262 | [
"Unlicense"
] | 17 | 2015-02-07T03:46:49.000Z | 2021-09-25T09:23:41.000Z | ui/dialogs/ui_addseq.py | scholer/cadnano2 | 0b8bba1ab3277ac9859ef78615890d351561784c | [
"Unlicense"
] | 2 | 2017-08-22T03:17:16.000Z | 2021-07-03T14:42:41.000Z | ui/dialogs/ui_addseq.py | scholer/cadnano2 | 0b8bba1ab3277ac9859ef78615890d351561784c | [
"Unlicense"
] | 9 | 2015-09-06T22:41:38.000Z | 2022-03-27T13:57:37.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dialogs/addseq.ui'
#
# Created: Thu Jul 21 17:35:26 2011
# by: PyQt4 UI code generator snapshot-4.8.3-fbc8b1362812
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_AddSeqDialog(object):
def setupUi(self, AddSeqDialog):
AddSeqDialog.setObjectName(_fromUtf8("AddSeqDialog"))
AddSeqDialog.resize(500, 500)
AddSeqDialog.setModal(True)
self.dialogGridLayout = QtGui.QGridLayout(AddSeqDialog)
self.dialogGridLayout.setObjectName(_fromUtf8("dialogGridLayout"))
self.tabWidget = QtGui.QTabWidget(AddSeqDialog)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tabStandard = QtGui.QWidget()
self.tabStandard.setObjectName(_fromUtf8("tabStandard"))
self.standardTabGridLayout = QtGui.QGridLayout(self.tabStandard)
self.standardTabGridLayout.setObjectName(_fromUtf8("standardTabGridLayout"))
self.groupBox = QtGui.QGroupBox(self.tabStandard)
self.groupBox.setTitle(_fromUtf8(""))
self.groupBox.setFlat(True)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.standardTabGridLayout.addWidget(self.groupBox, 0, 1, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.standardTabGridLayout.addItem(spacerItem, 0, 2, 1, 1)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.standardTabGridLayout.addItem(spacerItem1, 0, 0, 1, 1)
self.tabWidget.addTab(self.tabStandard, _fromUtf8(""))
self.tabCustom = QtGui.QWidget()
self.tabCustom.setObjectName(_fromUtf8("tabCustom"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.tabCustom)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.seqTextEdit = QtGui.QTextEdit(self.tabCustom)
self.seqTextEdit.setObjectName(_fromUtf8("seqTextEdit"))
self.verticalLayout_2.addWidget(self.seqTextEdit)
self.tabWidget.addTab(self.tabCustom, _fromUtf8(""))
self.dialogGridLayout.addWidget(self.tabWidget, 0, 0, 1, 1)
self.customButtonBox = QtGui.QDialogButtonBox(AddSeqDialog)
self.customButtonBox.setStandardButtons(QtGui.QDialogButtonBox.Apply|QtGui.QDialogButtonBox.Cancel)
self.customButtonBox.setCenterButtons(True)
self.customButtonBox.setObjectName(_fromUtf8("customButtonBox"))
self.dialogGridLayout.addWidget(self.customButtonBox, 1, 0, 1, 1)
self.retranslateUi(AddSeqDialog)
self.tabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.customButtonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), AddSeqDialog.reject)
QtCore.QObject.connect(self.customButtonBox, QtCore.SIGNAL(_fromUtf8("clicked(QAbstractButton*)")), AddSeqDialog.accept)
QtCore.QMetaObject.connectSlotsByName(AddSeqDialog)
AddSeqDialog.setTabOrder(self.customButtonBox, self.tabWidget)
AddSeqDialog.setTabOrder(self.tabWidget, self.seqTextEdit)
def retranslateUi(self, AddSeqDialog):
AddSeqDialog.setWindowTitle(QtGui.QApplication.translate("AddSeqDialog", "Choose a sequence", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabStandard), QtGui.QApplication.translate("AddSeqDialog", "Standard", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabCustom), QtGui.QApplication.translate("AddSeqDialog", "Custom", None, QtGui.QApplication.UnicodeUTF8))
| 55.757143 | 171 | 0.738663 |
3a90056e723e3b566c5fdc79a6d16015248e3a03 | 1,781 | py | Python | universalTranslator.py | GHawk1124/Universal-Translator | 0acb0059f994912c813adfb755a27662f3dd80fd | [
"MIT"
] | null | null | null | universalTranslator.py | GHawk1124/Universal-Translator | 0acb0059f994912c813adfb755a27662f3dd80fd | [
"MIT"
] | null | null | null | universalTranslator.py | GHawk1124/Universal-Translator | 0acb0059f994912c813adfb755a27662f3dd80fd | [
"MIT"
] | null | null | null | """Universal Translator.
Usage:
universalTranslator.py (-h | --help)
universalTranslator.py --version
universalTranslator.py [-rc | --record] [-u | --upload] [-rg | --recognize] [-t | --translate] [-s | --say] [--config] [--noise-reduce]
universalTranslator.py (-a | -all) [-u | --upload]
universalTranslator.py (--no-rec)
Options:
-h --help Show this screen.
--version Show version.
-rc --record Record the audio again.
-u --upload Upload File to Google Storage.
-rg --recognize Recognize Audio Speech.
-t --translate Translate the Transcript.
-s --say Say the Transcript.
-a --all Record, Recognize, Translate, and Say.
--no-rec Recognize, Translate, and Say.
--config Record a Sample for Noise Reduction.
--noise-reduce Reduce Noise in the Audio.
"""
from docopt import docopt
from utUtils import *
if __name__ == '__main__':
arguments = docopt(__doc__, version='universalTranslator v0.0.1')
rec = False
upl = False
if arguments['--all']:
rec = True
if arguments['--upload']:
upl = True
Main(rec, upl)
sys.exit()
if arguments['--config']: config()
if arguments['--no-rec']:
rec = False
Main(rec, upl)
sys.exit()
if arguments['--record']: record(RecordOptions)
if arguments['--upload']: upload(RecordOptions['Filename'])
if arguments['--noise-reduce']: noiseCancel(NO_NOISE)
if arguments['--recognize']:
text = recognize(RecordOptions['Filename'])
if arguments['--translate']:
translation = translate(text, NATIVE_LANGUAGE)
if arguments['--say']: say(translation)
| 35.62 | 138 | 0.586188 |
b0d7c038fc4a60ecdb6b3ed2aa8e1d0c18be22c0 | 14,530 | py | Python | exchangelib/items/contact.py | mishmashclone/ecederstrand-exchangelib | 1bbae0e527dc82a45bf3b5946b438d69de96c20f | [
"BSD-2-Clause"
] | null | null | null | exchangelib/items/contact.py | mishmashclone/ecederstrand-exchangelib | 1bbae0e527dc82a45bf3b5946b438d69de96c20f | [
"BSD-2-Clause"
] | null | null | null | exchangelib/items/contact.py | mishmashclone/ecederstrand-exchangelib | 1bbae0e527dc82a45bf3b5946b438d69de96c20f | [
"BSD-2-Clause"
] | null | null | null | import datetime
import logging
from .item import Item
from ..fields import BooleanField, Base64Field, TextField, ChoiceField, URIField, DateTimeBackedDateField, \
PhoneNumberField, EmailAddressesField, PhysicalAddressField, Choice, MemberListField, CharField, TextListField, \
EmailAddressField, IdElementField, EWSElementField, DateTimeField, EWSElementListField, \
BodyContentAttributedValueField, StringAttributedValueField, PhoneNumberAttributedValueField, \
PersonaPhoneNumberField, EmailAddressAttributedValueField, PostalAddressAttributedValueField, MailboxField, \
MailboxListField
from ..properties import PersonaId, IdChangeKeyMixIn, CompleteName, Attribution, EmailAddress, Address, FolderId
from ..util import TNS
from ..version import EXCHANGE_2010, EXCHANGE_2010_SP2
log = logging.getLogger(__name__)
class Contact(Item):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/contact"""
ELEMENT_NAME = 'Contact'
file_as = TextField(field_uri='contacts:FileAs')
file_as_mapping = ChoiceField(field_uri='contacts:FileAsMapping', choices={
Choice('None'), Choice('LastCommaFirst'), Choice('FirstSpaceLast'), Choice('Company'),
Choice('LastCommaFirstCompany'), Choice('CompanyLastFirst'), Choice('LastFirst'),
Choice('LastFirstCompany'), Choice('CompanyLastCommaFirst'), Choice('LastFirstSuffix'),
Choice('LastSpaceFirstCompany'), Choice('CompanyLastSpaceFirst'), Choice('LastSpaceFirst'),
Choice('DisplayName'), Choice('FirstName'), Choice('LastFirstMiddleSuffix'), Choice('LastName'),
Choice('Empty'),
})
display_name = TextField(field_uri='contacts:DisplayName', is_required=True)
given_name = CharField(field_uri='contacts:GivenName')
initials = TextField(field_uri='contacts:Initials')
middle_name = CharField(field_uri='contacts:MiddleName')
nickname = TextField(field_uri='contacts:Nickname')
complete_name = EWSElementField(field_uri='contacts:CompleteName', value_cls=CompleteName, is_read_only=True)
company_name = TextField(field_uri='contacts:CompanyName')
email_addresses = EmailAddressesField(field_uri='contacts:EmailAddress')
physical_addresses = PhysicalAddressField(field_uri='contacts:PhysicalAddress')
phone_numbers = PhoneNumberField(field_uri='contacts:PhoneNumber')
assistant_name = TextField(field_uri='contacts:AssistantName')
birthday = DateTimeBackedDateField(field_uri='contacts:Birthday', default_time=datetime.time(11, 59))
business_homepage = URIField(field_uri='contacts:BusinessHomePage')
children = TextListField(field_uri='contacts:Children')
companies = TextListField(field_uri='contacts:Companies', is_searchable=False)
contact_source = ChoiceField(field_uri='contacts:ContactSource', choices={
Choice('Store'), Choice('ActiveDirectory')
}, is_read_only=True)
department = TextField(field_uri='contacts:Department')
generation = TextField(field_uri='contacts:Generation')
im_addresses = CharField(field_uri='contacts:ImAddresses', is_read_only=True)
job_title = TextField(field_uri='contacts:JobTitle')
manager = TextField(field_uri='contacts:Manager')
mileage = TextField(field_uri='contacts:Mileage')
office = TextField(field_uri='contacts:OfficeLocation')
postal_address_index = ChoiceField(field_uri='contacts:PostalAddressIndex', choices={
Choice('Business'), Choice('Home'), Choice('Other'), Choice('None')
}, default='None', is_required_after_save=True)
profession = TextField(field_uri='contacts:Profession')
spouse_name = TextField(field_uri='contacts:SpouseName')
surname = CharField(field_uri='contacts:Surname')
wedding_anniversary = DateTimeBackedDateField(field_uri='contacts:WeddingAnniversary',
default_time=datetime.time(11, 59))
has_picture = BooleanField(field_uri='contacts:HasPicture', supported_from=EXCHANGE_2010, is_read_only=True)
phonetic_full_name = TextField(field_uri='contacts:PhoneticFullName', supported_from=EXCHANGE_2010_SP2,
is_read_only=True)
phonetic_first_name = TextField(field_uri='contacts:PhoneticFirstName', supported_from=EXCHANGE_2010_SP2,
is_read_only=True)
phonetic_last_name = TextField(field_uri='contacts:PhoneticLastName', supported_from=EXCHANGE_2010_SP2,
is_read_only=True)
email_alias = EmailAddressField(field_uri='contacts:Alias', is_read_only=True,
supported_from=EXCHANGE_2010_SP2)
# 'notes' is documented in MSDN but apparently unused. Writing to it raises ErrorInvalidPropertyRequest. OWA
# put entries into the 'notes' form field into the 'body' field.
notes = CharField(field_uri='contacts:Notes', supported_from=EXCHANGE_2010_SP2, is_read_only=True)
# 'photo' is documented in MSDN but apparently unused. Writing to it raises ErrorInvalidPropertyRequest. OWA
# adds photos as FileAttachments on the contact item (with 'is_contact_photo=True'), which automatically flips
# the 'has_picture' field.
photo = Base64Field(field_uri='contacts:Photo', supported_from=EXCHANGE_2010_SP2, is_read_only=True)
user_smime_certificate = Base64Field(field_uri='contacts:UserSMIMECertificate', supported_from=EXCHANGE_2010_SP2,
is_read_only=True)
ms_exchange_certificate = Base64Field(field_uri='contacts:MSExchangeCertificate', supported_from=EXCHANGE_2010_SP2,
is_read_only=True)
directory_id = TextField(field_uri='contacts:DirectoryId', supported_from=EXCHANGE_2010_SP2, is_read_only=True)
manager_mailbox = MailboxField(field_uri='contacts:ManagerMailbox', supported_from=EXCHANGE_2010_SP2,
is_read_only=True)
direct_reports = MailboxListField(field_uri='contacts:DirectReports', supported_from=EXCHANGE_2010_SP2,
is_read_only=True)
class Persona(IdChangeKeyMixIn):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/persona"""
ELEMENT_NAME = 'Persona'
ID_ELEMENT_CLS = PersonaId
_id = IdElementField(field_uri='persona:PersonaId', value_cls=ID_ELEMENT_CLS, namespace=TNS)
persona_type = CharField(field_uri='persona:PersonaType')
persona_object_type = TextField(field_uri='persona:PersonaObjectStatus')
creation_time = DateTimeField(field_uri='persona:CreationTime')
bodies = BodyContentAttributedValueField(field_uri='persona:Bodies')
display_name_first_last_sort_key = TextField(field_uri='persona:DisplayNameFirstLastSortKey')
display_name_last_first_sort_key = TextField(field_uri='persona:DisplayNameLastFirstSortKey')
company_sort_key = TextField(field_uri='persona:CompanyNameSortKey')
home_sort_key = TextField(field_uri='persona:HomeCitySortKey')
work_city_sort_key = TextField(field_uri='persona:WorkCitySortKey')
display_name_first_last_header = CharField(field_uri='persona:DisplayNameFirstLastHeader')
display_name_last_first_header = CharField(field_uri='persona:DisplayNameLastFirstHeader')
file_as_header = TextField(field_uri='persona:FileAsHeader')
display_name = CharField(field_uri='persona:DisplayName')
display_name_first_last = CharField(field_uri='persona:DisplayNameFirstLast')
display_name_last_first = CharField(field_uri='persona:DisplayNameLastFirst')
file_as = CharField(field_uri='persona:FileAs')
file_as_id = TextField(field_uri='persona:FileAsId')
display_name_prefix = CharField(field_uri='persona:DisplayNamePrefix')
given_name = CharField(field_uri='persona:GivenName')
middle_name = CharField(field_uri='persona:MiddleName')
surname = CharField(field_uri='persona:Surname')
generation = CharField(field_uri='persona:Generation')
nickname = TextField(field_uri='persona:Nickname')
yomi_company_name = TextField(field_uri='persona:YomiCompanyName')
yomi_first_name = TextField(field_uri='persona:YomiFirstName')
yomi_last_name = TextField(field_uri='persona:YomiLastName')
title = CharField(field_uri='persona:Title')
department = TextField(field_uri='persona:Department')
company_name = CharField(field_uri='persona:CompanyName')
email_address = EWSElementField(field_uri='persona:EmailAddress', value_cls=EmailAddress)
email_addresses = EWSElementListField(field_uri='persona:EmailAddresses', value_cls=Address)
PhoneNumber = PersonaPhoneNumberField(field_uri='persona:PhoneNumber')
im_address = CharField(field_uri='persona:ImAddress')
home_city = CharField(field_uri='persona:HomeCity')
work_city = CharField(field_uri='persona:WorkCity')
relevance_score = CharField(field_uri='persona:RelevanceScore')
folder_ids = EWSElementListField(field_uri='persona:FolderIds', value_cls=FolderId)
attributions = EWSElementListField(field_uri='persona:Attributions', value_cls=Attribution)
display_names = StringAttributedValueField(field_uri='persona:DisplayNames')
file_ases = StringAttributedValueField(field_uri='persona:FileAses')
file_as_ids = StringAttributedValueField(field_uri='persona:FileAsIds')
display_name_prefixes = StringAttributedValueField(field_uri='persona:DisplayNamePrefixes')
given_names = StringAttributedValueField(field_uri='persona:GivenNames')
middle_names = StringAttributedValueField(field_uri='persona:MiddleNames')
surnames = StringAttributedValueField(field_uri='persona:Surnames')
generations = StringAttributedValueField(field_uri='persona:Generations')
nicknames = StringAttributedValueField(field_uri='persona:Nicknames')
initials = StringAttributedValueField(field_uri='persona:Initials')
yomi_company_names = StringAttributedValueField(field_uri='persona:YomiCompanyNames')
yomi_first_names = StringAttributedValueField(field_uri='persona:YomiFirstNames')
yomi_last_names = StringAttributedValueField(field_uri='persona:YomiLastNames')
business_phone_numbers = PhoneNumberAttributedValueField(field_uri='persona:BusinessPhoneNumbers')
business_phone_numbers2 = PhoneNumberAttributedValueField(field_uri='persona:BusinessPhoneNumbers2')
home_phones = PhoneNumberAttributedValueField(field_uri='persona:HomePhones')
home_phones2 = PhoneNumberAttributedValueField(field_uri='persona:HomePhones2')
mobile_phones = PhoneNumberAttributedValueField(field_uri='persona:MobilePhones')
mobile_phones2 = PhoneNumberAttributedValueField(field_uri='persona:MobilePhones2')
assistant_phone_numbers = PhoneNumberAttributedValueField(field_uri='persona:AssistantPhoneNumbers')
callback_phones = PhoneNumberAttributedValueField(field_uri='persona:CallbackPhones')
car_phones = PhoneNumberAttributedValueField(field_uri='persona:CarPhones')
home_faxes = PhoneNumberAttributedValueField(field_uri='persona:HomeFaxes')
organization_main_phones = PhoneNumberAttributedValueField(field_uri='persona:OrganizationMainPhones')
other_faxes = PhoneNumberAttributedValueField(field_uri='persona:OtherFaxes')
other_telephones = PhoneNumberAttributedValueField(field_uri='persona:OtherTelephones')
other_phones2 = PhoneNumberAttributedValueField(field_uri='persona:OtherPhones2')
pagers = PhoneNumberAttributedValueField(field_uri='persona:Pagers')
radio_phones = PhoneNumberAttributedValueField(field_uri='persona:RadioPhones')
telex_numbers = PhoneNumberAttributedValueField(field_uri='persona:TelexNumbers')
tty_tdd_phone_numbers = PhoneNumberAttributedValueField(field_uri='persona:TTYTDDPhoneNumbers')
work_faxes = PhoneNumberAttributedValueField(field_uri='persona:WorkFaxes')
emails1 = EmailAddressAttributedValueField(field_uri='persona:Emails1')
emails2 = EmailAddressAttributedValueField(field_uri='persona:Emails2')
emails3 = EmailAddressAttributedValueField(field_uri='persona:Emails3')
business_home_pages = StringAttributedValueField(field_uri='persona:BusinessHomePages')
personal_home_pages = StringAttributedValueField(field_uri='persona:PersonalHomePages')
office_locations = StringAttributedValueField(field_uri='persona:OfficeLocations')
im_addresses = StringAttributedValueField(field_uri='persona:ImAddresses')
im_addresses2 = StringAttributedValueField(field_uri='persona:ImAddresses2')
im_addresses3 = StringAttributedValueField(field_uri='persona:ImAddresses3')
business_addresses = PostalAddressAttributedValueField(field_uri='persona:BusinessAddresses')
home_addresses = PostalAddressAttributedValueField(field_uri='persona:HomeAddresses')
other_addresses = PostalAddressAttributedValueField(field_uri='persona:OtherAddresses')
titles = StringAttributedValueField(field_uri='persona:Titles')
departments = StringAttributedValueField(field_uri='persona:Departments')
company_names = StringAttributedValueField(field_uri='persona:CompanyNames')
managers = StringAttributedValueField(field_uri='persona:Managers')
assistant_names = StringAttributedValueField(field_uri='persona:AssistantNames')
professions = StringAttributedValueField(field_uri='persona:Professions')
spouse_names = StringAttributedValueField(field_uri='persona:SpouseNames')
children = StringAttributedValueField(field_uri='persona:Children')
schools = StringAttributedValueField(field_uri='persona:Schools')
hobbies = StringAttributedValueField(field_uri='persona:Hobbies')
wedding_anniversaries = StringAttributedValueField(field_uri='persona:WeddingAnniversaries')
birthdays = StringAttributedValueField(field_uri='persona:Birthdays')
locations = StringAttributedValueField(field_uri='persona:Locations')
# ExtendedPropertyAttributedValueField('extended_properties', field_uri='persona:ExtendedProperties')
class DistributionList(Item):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/distributionlist"""
ELEMENT_NAME = 'DistributionList'
display_name = CharField(field_uri='contacts:DisplayName', is_required=True)
file_as = CharField(field_uri='contacts:FileAs', is_read_only=True)
contact_source = ChoiceField(field_uri='contacts:ContactSource', choices={
Choice('Store'), Choice('ActiveDirectory')
}, is_read_only=True)
members = MemberListField(field_uri='distributionlist:Members')
| 69.855769 | 119 | 0.783345 |
b1238a20fa74c4357c4a183f0972f544f912ad9f | 7,465 | py | Python | plugins/filters.py | binamracode/Auto-Filter-Bot-V2 | bec828192628d22c64c5d13110656ede766a099a | [
"MIT"
] | null | null | null | plugins/filters.py | binamracode/Auto-Filter-Bot-V2 | bec828192628d22c64c5d13110656ede766a099a | [
"MIT"
] | null | null | null | plugins/filters.py | binamracode/Auto-Filter-Bot-V2 | bec828192628d22c64c5d13110656ede766a099a | [
"MIT"
] | 1 | 2021-11-08T09:53:14.000Z | 2021-11-08T09:53:14.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @trojanzhex
import re
import pyrogram
from pyrogram import (
filters,
Client
)
from pyrogram.types import (
InlineKeyboardButton,
InlineKeyboardMarkup,
Message,
CallbackQuery
)
from bot import Bot
from script import script
from database.mdb import searchquery
from plugins.channel import deleteallfilters
from config import AUTH_USERS
BUTTONS = {}
@Client.on_message(filters.group & filters.text)
async def filter(client: Bot, message: Message):
#if re.findall("((^\/|^,|^!|^\.|^[\U0001F600-\U000E007F]).*)", message.text):
#return
if 2 < len(message.text) < 50:
btn = []
group_id = message.chat.id
name = message.text
filenames, links = await searchquery(group_id, name)
if filenames and links:
for filename, link in zip(filenames, links):
btn.append(
[InlineKeyboardButton(text=f"{filename}",url=f"{link}")]
)
else:
return
if not btn:
return
if len(btn) > 10:
btns = list(split_list(btn, 10))
keyword = f"{message.chat.id}-{message.message_id}"
BUTTONS[keyword] = {
"total" : len(btns),
"buttons" : btns
}
else:
buttons = btn
buttons.append(
[InlineKeyboardButton(text="📃 Pages 1/1",callback_data="pages")]
)
await message.reply_text(
f"<b> Here is the result for {message.text}</b>",
reply_markup=InlineKeyboardMarkup(buttons)
)
return
data = BUTTONS[keyword]
buttons = data['buttons'][0].copy()
buttons.append(
[InlineKeyboardButton(text="NEXT ⏩",callback_data=f"next_0_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(text=f"📃 Pages 1/{data['total']}",callback_data="pages")]
)
await message.reply_text(
f"<b> Here is the result for {message.text}</b>",
reply_markup=InlineKeyboardMarkup(buttons)
)
@Client.on_callback_query()
async def cb_handler(client: Bot, query: CallbackQuery):
clicked = query.from_user.id
typed = query.message.reply_to_message.from_user.id
if (clicked == typed) or (clicked in AUTH_USERS):
if query.data.startswith("next"):
await query.answer()
ident, index, keyword = query.data.split("_")
data = BUTTONS[keyword]
if int(index) == int(data["total"]) - 2:
buttons = data['buttons'][int(index)+1].copy()
buttons.append(
[InlineKeyboardButton("⏪ BACK", callback_data=f"back_{int(index)+1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(f"📃 Pages {int(index)+2}/{data['total']}", callback_data="pages")]
)
await query.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
else:
buttons = data['buttons'][int(index)+1].copy()
buttons.append(
[InlineKeyboardButton("⏪ BACK", callback_data=f"back_{int(index)+1}_{keyword}"),InlineKeyboardButton("NEXT ⏩", callback_data=f"next_{int(index)+1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(f"📃 Pages {int(index)+2}/{data['total']}", callback_data="pages")]
)
await query.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
elif query.data.startswith("back"):
await query.answer()
ident, index, keyword = query.data.split("_")
data = BUTTONS[keyword]
if int(index) == 1:
buttons = data['buttons'][int(index)-1].copy()
buttons.append(
[InlineKeyboardButton("NEXT ⏩", callback_data=f"next_{int(index)-1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(f"📃 Pages {int(index)}/{data['total']}", callback_data="pages")]
)
await query.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
else:
buttons = data['buttons'][int(index)-1].copy()
buttons.append(
[InlineKeyboardButton("⏪ BACK", callback_data=f"back_{int(index)-1}_{keyword}"),InlineKeyboardButton("NEXT ⏩", callback_data=f"next_{int(index)-1}_{keyword}")]
)
buttons.append(
[InlineKeyboardButton(f"📃 Pages {int(index)}/{data['total']}", callback_data="pages")]
)
await query.edit_message_reply_markup(
reply_markup=InlineKeyboardMarkup(buttons)
)
return
elif query.data == "pages":
await query.answer()
elif query.data == "start_data":
await query.answer()
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton("HELP", callback_data="help_data"),
InlineKeyboardButton("ABOUT", callback_data="about_data")],
[InlineKeyboardButton("⭕️ JOIN OUR GROUP ⭕️", url="https://t.me/akimaxmovies")]
])
await query.message.edit_text(
script.START_MSG.format(query.from_user.mention),
reply_markup=keyboard,
disable_web_page_preview=True
)
elif query.data == "help_data":
await query.answer()
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton("BACK", callback_data="start_data"),
InlineKeyboardButton("ABOUT", callback_data="about_data")],
[InlineKeyboardButton("⭕️ SUPPORT ⭕️", url="https://t.me/kodilearn")]
])
await query.message.edit_text(
script.HELP_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
elif query.data == "about_data":
await query.answer()
keyboard = InlineKeyboardMarkup([
[InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("START", callback_data="start_data")],
[InlineKeyboardButton("SOURCE CODE", url="https://github.com/binamracode/Auto-Filter-Bot-V2")]
])
await query.message.edit_text(
script.ABOUT_MSG,
reply_markup=keyboard,
disable_web_page_preview=True
)
elif query.data == "delallconfirm":
await query.message.delete()
await deleteallfilters(client, query.message)
elif query.data == "delallcancel":
await query.message.reply_to_message.delete()
await query.message.delete()
else:
await query.answer("Thats not for you!!",show_alert=True)
def split_list(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
| 32.885463 | 179 | 0.538513 |
28b495ef33070f67096abebea4a9e2af97c9d58d | 826 | py | Python | tests/binarycodec/test_field_id_codec.py | mDuo13/xrpl-py | 70f927dcd2dbb8644b3e210b0a8de2a214e71e3d | [
"0BSD"
] | null | null | null | tests/binarycodec/test_field_id_codec.py | mDuo13/xrpl-py | 70f927dcd2dbb8644b3e210b0a8de2a214e71e3d | [
"0BSD"
] | null | null | null | tests/binarycodec/test_field_id_codec.py | mDuo13/xrpl-py | 70f927dcd2dbb8644b3e210b0a8de2a214e71e3d | [
"0BSD"
] | null | null | null | from unittest import TestCase
import xrpl.core.binarycodec.field_id_codec as field_id_codec
from tests.binarycodec.fixtures import data_driven_fixtures
class TestFieldIDCodec(TestCase):
"""`See FieldIDs <https://xrpl.org/serialization.html#field-ids>`_."""
def setUp(self):
self.field_tests = data_driven_fixtures.get_field_tests()
def test_encode(self):
for test in self.field_tests:
# .hex().upper() just formats the resulting bytes for comparison
# with upper case hex string in fixture data
self.assertEqual(
test.expected_hex, field_id_codec.encode(test.name).hex().upper()
)
def test_decode(self):
for test in self.field_tests:
self.assertEqual(test.name, field_id_codec.decode(test.expected_hex))
| 34.416667 | 81 | 0.688862 |
7302d41cefc2c103341a34d8bfe8f8c8c4613d5c | 13,322 | py | Python | tests/test_band_mapper.py | whatnick/datacube-ows | 408dd7617b800d4fa0424462936e54782f402b1b | [
"Apache-2.0"
] | null | null | null | tests/test_band_mapper.py | whatnick/datacube-ows | 408dd7617b800d4fa0424462936e54782f402b1b | [
"Apache-2.0"
] | null | null | null | tests/test_band_mapper.py | whatnick/datacube-ows | 408dd7617b800d4fa0424462936e54782f402b1b | [
"Apache-2.0"
] | null | null | null | import datetime
import datacube_ows.band_mapper as bm
from datacube_ows.band_mapper import StyleDef
from datacube_ows.ogc_utils import DataCollection, DatasetCollection
from datacube_ows.ows_configuration import BandIndex, OWSProductLayer
from xarray import DataArray, Dataset
from unittest.mock import patch
import pytest
import numpy as np
@pytest.fixture
def product_layer():
product_layer = OWSProductLayer.__new__(OWSProductLayer)
product_layer.name = "test_product"
product_layer.pq_band = "test_band"
product_layer.always_fetch_bands = ["red", "green", "blue"]
product_layer.band_idx = BandIndex.__new__(BandIndex)
product_layer.band_idx.band_cfg = {
"red": [ "crimson", "foo", ],
"green": [ ],
"blue": [ "azure" ],
"fake": []
}
product_layer.band_idx._idx = {
"red": "red",
"crimson": "red",
"foo": "red",
"green": "green",
"blue": "blue",
"azure": "red",
"fake": "fake",
}
return product_layer
@pytest.fixture
def style_cfg_lin():
cfg = {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"needed_bands": ["red", "green", "blue"],
"scale_factor": 1.0,
"scale_range": [1, 2],
"components": {
"red": {"red": 1.0},
"green": {"green": 1.0},
"blue": {"blue": 1.0}
}
}
return cfg
@pytest.fixture
def style_cfg_map():
cfg = {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"needed_bands": ["foo"],
"value_map": {
"foo": [
{
"title": "Invalid",
"abstract": "An Invalid Value",
"flags": {
"bar": True,
"baz": False
},
"color": "#000000"
},
{
"title": "Valid",
"abstract": "A Valid Value",
"flags": {
"or": {
"x": True,
"y": True
}
},
"color": "#FFFFFF"
}
]
}
}
return cfg
@pytest.fixture
def product_layer_alpha_map():
product_layer = OWSProductLayer.__new__(OWSProductLayer)
product_layer.name = "test_product"
product_layer.pq_band = "test_band"
product_layer.always_fetch_bands = ["foo"]
product_layer.band_idx = BandIndex.__new__(BandIndex)
product_layer.band_idx.band_cfg = {
"foo": ["foo"]
}
product_layer.band_idx._idx = {
"foo": "foo"
}
return product_layer
@pytest.fixture
def style_cfg_map_alpha_1():
cfg = {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"needed_bands": ["foo"],
"value_map": {
"foo": [
{
"title": "Transparent",
"abstract": "A Transparent Value",
"flags": {
"bar": True,
},
"color": "#000000",
"alpha": 0.0
}
]
}
}
return cfg
@pytest.fixture
def style_cfg_map_alpha_2():
cfg = {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"needed_bands": ["foo"],
"value_map": {
"foo": [
{
"title": "Semi-Transparent",
"abstract": "A Semi-Transparent Value",
"flags": {
"bar": False,
},
"color": "#000000",
"alpha": 0.5
}
]
}
}
return cfg
@pytest.fixture
def style_cfg_map_alpha_3():
cfg = {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"needed_bands": ["foo"],
"value_map": {
"foo": [
{
"title": "Non-Transparent",
"abstract": "A Non-Transparent Value",
"flags": {
"bar": False,
},
"color": "#000000",
}
]
}
}
return cfg
@pytest.fixture
def style_cfg_ramp():
cfg = {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"needed_bands": ["foo"],
"index_function": {
"function": "datacube_ows.band_utils.constant",
"pass_product_cfg": True,
"kwargs": {
"const": "0.1"
}
},
"color_ramp": [
{"value": 0.0, "color": "#FFFFFF", "alpha": 0.0},
{"value": 1.0, "color": "#000000", "alpha": 1.0}
]
}
return cfg
def test_correct_style_hybrid(product_layer, style_cfg_lin):
style_cfg_lin["component_ratio"] = 1.0
style_cfg_lin["range"] = [1, 2]
style_cfg_lin["index_function"] = {
"function": "datacube_ows.band_utils.constant",
"pass_product_cfg": True,
"kwargs": {
"const": "0.1"
}
}
style_def = StyleDef(product_layer, style_cfg_lin)
assert isinstance(style_def, bm.HybridStyleDef)
def test_correct_style_linear(product_layer, style_cfg_lin):
style_def = StyleDef(product_layer, style_cfg_lin)
assert isinstance(style_def, bm.LinearStyleDef )
def test_correct_style_map(product_layer, style_cfg_map):
style_def = StyleDef(product_layer, style_cfg_map)
assert isinstance(style_def, bm.RGBAMappedStyleDef)
def test_alpha_style_map(
product_layer_alpha_map,
style_cfg_map_alpha_1,
style_cfg_map_alpha_2,
style_cfg_map_alpha_3):
def fake_make_mask(data, **kwargs):
return data
band = np.array([True, True, True])
time = datetime.date.today()
da = DataArray(band, name='foo')
dsc = DatasetCollection()
ds = Dataset(data_vars={'foo': da})
dsc.add_time(time, ds)
with patch('datacube_ows.band_mapper.make_mask', new_callable=lambda: fake_make_mask) as fmm:
style_def = StyleDef(product_layer_alpha_map, style_cfg_map_alpha_1)
result = style_def.transform_data(dsc, None, None)
alpha_channel = result["alpha"].values
assert (alpha_channel == 0).all()
style_def = StyleDef(product_layer_alpha_map, style_cfg_map_alpha_2)
result = style_def.transform_data(dsc, None, None)
alpha_channel = result["alpha"].values
assert (alpha_channel == 127).all()
style_def = StyleDef(product_layer_alpha_map, style_cfg_map_alpha_3)
result = style_def.transform_data(dsc, None, None)
alpha_channel = result["alpha"].values
assert (alpha_channel == 255).all()
def test_correct_style_ramp(product_layer, style_cfg_ramp):
style_def = StyleDef(product_layer, style_cfg_ramp)
assert isinstance(style_def, bm.RgbaColorRampDef)
def test_dynamic_range_compression_scale_range(product_layer, style_cfg_lin):
style_cfg_lin["scale_range"] = [-3000, 3000]
style_def = StyleDef(product_layer, style_cfg_lin)
assert style_def.scale_min == -3000
assert style_def.scale_max == 3000
band = np.zeros(3)
band[0] = -3000
band[1] = 0
band[2] = 3000
compressed = style_def.compress_band("red", band)
assert compressed[0] == 0
assert compressed[1] == 255 / 2
assert compressed[2] == 255
def test_dynamic_range_compression_scale_range_clip(product_layer, style_cfg_lin):
style_cfg_lin["scale_range"] = [-3000, 3000]
style_def = StyleDef(product_layer, style_cfg_lin)
assert style_def.scale_min == -3000
assert style_def.scale_max == 3000
band = np.zeros(3)
band[0] = -3001
band[1] = 0
band[2] = 3001
compressed = style_def.compress_band("red", band)
assert compressed[0] == 0
assert compressed[1] == 255 / 2
assert compressed[2] == 255
def test_dynamic_range_compression_scale_factor(product_layer, style_cfg_lin):
del style_cfg_lin["scale_range"]
style_cfg_lin["scale_factor"] = 2.5
style_def = StyleDef(product_layer, style_cfg_lin)
assert style_def.scale_min == 0.0
assert style_def.scale_max == 637.5
band = np.zeros(3)
band[0] = -3000
band[1] = 0
band[2] = 3000
@pytest.fixture
def product_layer_mask_map():
product_layer = OWSProductLayer.__new__(OWSProductLayer)
product_layer.name = "test_product"
product_layer.pq_band = None
product_layer.always_fetch_bands = ["foo"]
product_layer.band_idx = BandIndex.__new__(BandIndex)
product_layer.band_idx.band_cfg = {
"foo": ["foo"]
}
product_layer.band_idx._idx = {
"foo": "foo"
}
return product_layer
@pytest.fixture
def style_cfg_map_mask():
cfg = {
"name": "test_style",
"title": "Test Style",
"abstract": "This is a Test Style for Datacube WMS",
"needed_bands": ["foo"],
"value_map": {
"foo": [
{
"title": "Non-Transparent",
"abstract": "A Non-Transparent Value",
"flags": {
"bar": 1,
},
"color": "#111111",
"mask": True
},
{
"title": "Non-Transparent",
"abstract": "A Non-Transparent Value",
"flags": {
"bar": 2,
},
"color": "#FFFFFF",
},
{
"title": "Non-Transparent",
"abstract": "A Non-Transparent Value",
"flags": {
"bar": 1,
},
"color": "#111111",
}
]
}
}
return cfg
def test_RBGAMapped_Masking(product_layer_mask_map, style_cfg_map_mask):
def fake_make_mask(data, **kwargs):
val = kwargs["bar"]
return data == val
band = np.array([0, 0, 1, 1, 2, 2])
today = datetime.date.today()
da = DataArray(band, name='foo')
ds = DatasetCollection()
ds.add_time(today, Dataset(data_vars={'foo': da}))
with patch('datacube_ows.band_mapper.make_mask', new_callable=lambda: fake_make_mask) as fmm:
style_def = StyleDef(product_layer_mask_map, style_cfg_map_mask)
data = style_def.transform_data(ds, None, None)
r = data["red"]
g = data["green"]
b = data["blue"]
a = data["alpha"]
assert (r[2:3:1] == 0)
assert (g[2:3:1] == 0)
assert (b[2:3:1] == 0)
assert (a[2:3:1] == 0)
assert (r[4:5:1] == 255)
assert (g[4:5:1] == 255)
assert (b[4:5:1] == 255)
assert (a[4:5:1] == 255)
def test_reint():
from datacube_ows.band_mapper import RGBAMappedStyleDef
band = np.array([0., 0., 1., 1., 2., 2.])
da = DataArray(band, name='foo')
assert (band.dtype.kind == "f")
data = RGBAMappedStyleDef.reint(band)
assert (data.dtype.kind == "i")
assert (da.dtype.kind == "f")
data = RGBAMappedStyleDef.reint(da)
assert (data.dtype.kind == "i")
data = RGBAMappedStyleDef.reint(data)
assert (data.dtype.kind == "i")
def test_createcolordata():
from datacube_ows.band_mapper import RGBAMappedStyleDef
from colour import Color
band = np.array([0, 0, 1, 1, 2, 2])
da = DataArray(band, name='foo')
rgb = Color("#FFFFFF")
data = RGBAMappedStyleDef.create_colordata(da, rgb, 1.0, (band >= 0))
assert (data == 1.0).all()
def test_createcolordata_alpha():
from datacube_ows.band_mapper import RGBAMappedStyleDef
from colour import Color
band = np.array([0, 0, 1, 1, 2, 2])
da = DataArray(band, name='foo')
rgb = Color("#FFFFFF")
data = RGBAMappedStyleDef.create_colordata(da, rgb, 0.0, (band >= 0))
assert (data["alpha"] == 0).all()
def test_createcolordata_mask():
from datacube_ows.band_mapper import RGBAMappedStyleDef
from colour import Color
band = np.array([0, 0, 1, 1, 2, 2])
da = DataArray(band, name='foo')
rgb = Color("#FFFFFF")
data = RGBAMappedStyleDef.create_colordata(da, rgb, 0.0, (band > 0))
assert (np.isnan(data["red"][0:1:1])).all()
assert (np.isfinite(data["red"][2:5:1])).all()
def test_createcolordata_remask():
from datacube_ows.band_mapper import RGBAMappedStyleDef
from colour import Color
band = np.array([0, 0, 1, 1, np.nan, np.nan])
da = DataArray(band, name='foo')
rgb = Color("#FFFFFF")
data = RGBAMappedStyleDef.create_colordata(da, rgb, 0.0, np.array([True, True, True, True, True, True]))
assert (np.isfinite(data["red"][0:3:1])).all()
assert (np.isnan(data["red"][4:5:1])).all()
| 28.711207 | 108 | 0.54759 |
59baef8dd36ba5e191eaf5cf703b757fa6dfaba9 | 2,214 | py | Python | api/environments/identities/traits/tests/test_models.py | btasker/flagsmith | f8fab5a6e8f8b649e56e16443267d8cdb645a49b | [
"BSD-3-Clause"
] | null | null | null | api/environments/identities/traits/tests/test_models.py | btasker/flagsmith | f8fab5a6e8f8b649e56e16443267d8cdb645a49b | [
"BSD-3-Clause"
] | 7 | 2021-10-01T01:17:49.000Z | 2021-10-12T15:44:48.000Z | api/environments/identities/traits/tests/test_models.py | admariner/flagsmith | cb5d9fe163fa6974b3e322c2237776b5ef09acf9 | [
"BSD-3-Clause"
] | null | null | null | from unittest import mock
import pytest
from flag_engine.identities.builders import build_identity_dict
from environments.identities.traits.models import Trait
@pytest.mark.parametrize(
"value, expected_data",
(
(1, {"value_type": "int", "integer_value": 1}),
(0, {"value_type": "int", "integer_value": 0}),
("my_string", {"value_type": "unicode", "string_value": "my_string"}),
(True, {"value_type": "bool", "boolean_value": True}),
(False, {"value_type": "bool", "boolean_value": False}),
(123.4, {"value_type": "float", "float_value": 123.4}),
),
)
def test_generate_trait_value_data_for_value(value, expected_data):
assert Trait.generate_trait_value_data(value) == expected_data
@pytest.mark.parametrize(
"deserialized_data, expected_data",
(
({"type": "int", "value": 1}, {"value_type": "int", "integer_value": 1}),
({"type": "int", "value": 0}, {"value_type": "int", "integer_value": 0}),
(
{"type": "unicode", "value": "my_string"},
{"value_type": "unicode", "string_value": "my_string"},
),
(
{"type": "bool", "value": True},
{"value_type": "bool", "boolean_value": True},
),
(
{"type": "bool", "value": False},
{"value_type": "bool", "boolean_value": False},
),
(
{"type": "float", "value": 123.4},
{"value_type": "float", "float_value": 123.4},
),
),
)
def test_generate_trait_value_data_for_deserialized_data(
deserialized_data, expected_data
):
assert Trait.generate_trait_value_data(deserialized_data) == expected_data
def test_trait_bulk_create_create_objects(identity):
# Given
traits = [
Trait(identity=identity, trait_key="key1"),
Trait(identity=identity, trait_key="key2"),
]
# When
Trait.objects.bulk_create(traits)
# Then
assert Trait.objects.filter(identity=identity).count() == 2
def test_trait_bulk_delete_deletes_objects(trait):
# When
Trait.objects.filter(identity=trait.identity).delete()
# Then
Trait.objects.filter(identity=trait.identity).count() == 0
| 30.328767 | 81 | 0.609304 |
f17325881bf9952dd30a1fe08e672af390423ebb | 2,460 | py | Python | source/utils.py | vihaton/calendar-helper | 535b50a24972f6e1b5894aa2e681809805d790ef | [
"MIT"
] | 1 | 2021-01-20T22:52:32.000Z | 2021-01-20T22:52:32.000Z | source/utils.py | vihaton/calendar-helper | 535b50a24972f6e1b5894aa2e681809805d790ef | [
"MIT"
] | null | null | null | source/utils.py | vihaton/calendar-helper | 535b50a24972f6e1b5894aa2e681809805d790ef | [
"MIT"
] | null | null | null | from __future__ import print_function
import datetime
import pickle
import os.path
from google import auth
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
SECRETS = '../secrets/'
TOKEN = SECRETS + 'token.pickle'
CREDS = SECRETS + 'credentials.json'
def authenticate(verbose=False):
'''
Use either 1) existing token.pickle, or 2) credentials.json to authenticate against google auth API.
return credentials
'''
if verbose:
print(f'expect to find token in {TOKEN} and credentials in {CREDS}')
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(TOKEN):
with open(TOKEN, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CREDS, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(TOKEN, 'wb') as token:
pickle.dump(creds, token)
return creds
def main():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = authenticate()
service = build('calendar', 'v3', credentials=creds)
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print('Getting the upcoming 10 events')
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=10, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
print('No upcoming events found.')
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
print(start, event['summary'])
if __name__ == '__main__':
main()
| 33.243243 | 104 | 0.653252 |
a8cc014c1a4f9df8fbe893c1e90620db84c31446 | 2,037 | py | Python | clean.py | opendxl/opendxl-tie-client-python | 652261be44986c513caed6b62112f935dcf1e295 | [
"Apache-2.0"
] | 18 | 2016-12-19T18:19:39.000Z | 2020-07-16T17:01:36.000Z | clean.py | opendxl/opendxl-tie-client-python | 652261be44986c513caed6b62112f935dcf1e295 | [
"Apache-2.0"
] | 17 | 2017-03-30T11:51:27.000Z | 2021-11-12T10:17:20.000Z | clean.py | opendxl/opendxl-tie-client-python | 652261be44986c513caed6b62112f935dcf1e295 | [
"Apache-2.0"
] | 13 | 2016-12-19T18:19:41.000Z | 2018-07-11T22:09:56.000Z | from __future__ import absolute_import
from __future__ import print_function
import os
# pylint: disable=no-name-in-module, import-error
from distutils.dir_util import remove_tree
from shutil import copyfile
def clean_dir(src_dir, directory):
if os.path.exists(directory):
print("Cleaning directory: " + directory + "\n")
for f in os.listdir(directory):
target_file = os.path.join(directory, f)
if not os.path.isdir(target_file) and not f.lower().endswith(".py"):
os.remove(os.path.join(directory, f))
if os.path.exists(src_dir):
for f in os.listdir(src_dir):
src_file = os.path.join(src_dir, f)
if not os.path.isdir(src_file) and \
not(f.lower().endswith(".py") or f.lower().endswith(".pyc")):
copyfile(src_file, os.path.join(directory, f))
print("Starting clean.\n")
DIST_PY_FILE_LOCATION = os.path.dirname(os.path.realpath(__file__))
DIST_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "dist")
CONFIG_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "config")
SAMPLE_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "sample")
CONFIG_SRC_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "dxltieclient",
"_config", "app")
SAMPLE_SRC_DIRECTORY = os.path.join(DIST_PY_FILE_LOCATION, "dxltieclient",
"_config", "sample")
# Remove the dist directory if it exists
if os.path.exists(DIST_DIRECTORY):
print("Removing dist directory: " + DIST_DIRECTORY + "\n")
remove_tree(DIST_DIRECTORY, verbose=1)
# Clean the config directory
clean_dir(CONFIG_SRC_DIRECTORY, CONFIG_DIRECTORY)
# Clean the samples directory
clean_dir(SAMPLE_SRC_DIRECTORY, SAMPLE_DIRECTORY)
# Clean .pyc files
print("Cleaning .pyc files")
for root, dirs, files in os.walk(DIST_PY_FILE_LOCATION):
for source_file in files:
full_path = os.path.join(root, source_file)
if full_path.lower().endswith(".pyc"):
os.remove(full_path)
| 39.173077 | 81 | 0.682376 |
2e684fa432d93fc7a51f04a33900aaa652f7a910 | 40,232 | py | Python | canvasapi/canvas.py | phaustin/canvasapi | a488a974ef9d7037d615320802387e7d51279d65 | [
"MIT"
] | 1 | 2018-11-20T17:17:50.000Z | 2018-11-20T17:17:50.000Z | canvasapi/canvas.py | PennState/canvasapi | 077cbd51516484a5c44834c8aa3d0c4425e4ffcf | [
"MIT"
] | null | null | null | canvasapi/canvas.py | PennState/canvasapi | 077cbd51516484a5c44834c8aa3d0c4425e4ffcf | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import warnings
from canvasapi.account import Account
from canvasapi.course import Course
from canvasapi.current_user import CurrentUser
from canvasapi.exceptions import RequiredFieldMissing
from canvasapi.file import File
from canvasapi.folder import Folder
from canvasapi.group import Group, GroupCategory
from canvasapi.paginated_list import PaginatedList
from canvasapi.requester import Requester
from canvasapi.section import Section
from canvasapi.user import User
from canvasapi.util import combine_kwargs, get_institution_url, obj_or_id
warnings.simplefilter('always', DeprecationWarning)
class Canvas(object):
"""
The main class to be instantiated to provide access to Canvas's API.
"""
def __init__(self, base_url, access_token):
"""
:param base_url: The base URL of the Canvas instance's API.
:type base_url: str
:param access_token: The API key to authenticate requests with.
:type access_token: str
"""
new_url = get_institution_url(base_url)
if 'api/v1' in base_url:
warnings.warn(
"`base_url` no longer requires an API version be specified. "
"Rewriting `base_url` to {}".format(new_url),
DeprecationWarning
)
if 'http://' in base_url:
warnings.warn(
"Canvas may respond unexpectedly when making requests to HTTP "
"URLs. If possible, please use HTTPS.",
UserWarning
)
base_url = new_url + '/api/v1/'
self.__requester = Requester(base_url, access_token)
def create_account(self, **kwargs):
"""
Create a new root account.
:calls: `POST /api/v1/accounts \
<https://canvas.instructure.com/doc/api/accounts.html#method.accounts.create>`_
:rtype: :class:`canvasapi.account.Account`
"""
response = self.__requester.request(
'POST',
'accounts',
_kwargs=combine_kwargs(**kwargs)
)
return Account(self.__requester, response.json())
def get_account(self, account, use_sis_id=False, **kwargs):
"""
Retrieve information on an individual account.
:calls: `GET /api/v1/accounts/:id \
<https://canvas.instructure.com/doc/api/accounts.html#method.accounts.show>`_
:param account: The object or ID of the account to retrieve.
:type account: int, str or :class:`canvasapi.account.Account`
:param use_sis_id: Whether or not account_id is an sis ID.
Defaults to `False`.
:type use_sis_id: bool
:rtype: :class:`canvasapi.account.Account`
"""
if use_sis_id:
account_id = account
uri_str = 'accounts/sis_account_id:{}'
else:
account_id = obj_or_id(account, "account", (Account,))
uri_str = 'accounts/{}'
response = self.__requester.request(
'GET',
uri_str.format(account_id),
_kwargs=combine_kwargs(**kwargs)
)
return Account(self.__requester, response.json())
def get_accounts(self, **kwargs):
"""
List accounts that the current user can view or manage.
Typically, students and teachers will get an empty list in
response. Only account admins can view the accounts that they
are in.
:calls: `GET /api/v1/accounts \
<https://canvas.instructure.com/doc/api/accounts.html#method.accounts.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.account.Account`
"""
return PaginatedList(
Account,
self.__requester,
'GET',
'accounts',
_kwargs=combine_kwargs(**kwargs)
)
def get_course_accounts(self):
"""
List accounts that the current user can view through their
admin course enrollments (Teacher, TA or designer enrollments).
Only returns `id`, `name`, `workflow_state`, `root_account_id`
and `parent_account_id`.
:calls: `GET /api/v1/course_accounts \
<https://canvas.instructure.com/doc/api/accounts.html#method.accounts.course_accounts>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.account.Account`
"""
return PaginatedList(
Account,
self.__requester,
'GET',
'course_accounts',
)
def get_course(self, course, use_sis_id=False, **kwargs):
"""
Retrieve a course by its ID.
:calls: `GET /api/v1/courses/:id \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.show>`_
:param course: The object or ID of the course to retrieve.
:type course: int, str or :class:`canvasapi.course.Course`
:param use_sis_id: Whether or not course_id is an sis ID.
Defaults to `False`.
:type use_sis_id: bool
:rtype: :class:`canvasapi.course.Course`
"""
if use_sis_id:
course_id = course
uri_str = 'courses/sis_course_id:{}'
else:
course_id = obj_or_id(course, "course", (Course,))
uri_str = 'courses/{}'
response = self.__requester.request(
'GET',
uri_str.format(course_id),
_kwargs=combine_kwargs(**kwargs)
)
return Course(self.__requester, response.json())
def get_user(self, user, id_type=None):
"""
Retrieve a user by their ID. `id_type` denotes which endpoint to try as there are
several different IDs that can pull the same user record from Canvas.
Refer to API documentation's
`User <https://canvas.instructure.com/doc/api/users.html#User>`_
example to see the ID types a user can be retrieved with.
:calls: `GET /api/v1/users/:id \
<https://canvas.instructure.com/doc/api/users.html#method.users.api_show>`_
:param user: The user's object or ID.
:type user: :class:`canvasapi.user.User` or int
:param id_type: The ID type.
:type id_type: str
:rtype: :class:`canvasapi.user.User`
"""
if id_type:
uri = 'users/{}:{}'.format(id_type, user)
elif user == 'self':
uri = 'users/self'
else:
user_id = obj_or_id(user, "user", (User,))
uri = 'users/{}'.format(user_id)
response = self.__requester.request(
'GET',
uri
)
return User(self.__requester, response.json())
def get_current_user(self):
return CurrentUser(self.__requester)
def get_courses(self, **kwargs):
"""
Return a list of active courses for the current user.
:calls: `GET /api/v1/courses \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.course.Course`
"""
return PaginatedList(
Course,
self.__requester,
'GET',
'courses',
_kwargs=combine_kwargs(**kwargs)
)
def get_activity_stream_summary(self):
"""
Return a summary of the current user's global activity stream.
:calls: `GET /api/v1/users/self/activity_stream/summary \
<https://canvas.instructure.com/doc/api/users.html#method.users.activity_stream_summary>`_
:rtype: dict
"""
response = self.__requester.request(
'GET',
'users/self/activity_stream/summary'
)
return response.json()
def get_todo_items(self):
"""
Return the current user's list of todo items, as seen on the user dashboard.
:calls: `GET /api/v1/users/self/todo \
<https://canvas.instructure.com/doc/api/users.html#method.users.todo_items>`_
:rtype: dict
"""
response = self.__requester.request(
'GET',
'users/self/todo'
)
return response.json()
def get_upcoming_events(self):
"""
Return the current user's upcoming events, i.e. the same things shown
in the dashboard 'Coming Up' sidebar.
:calls: `GET /api/v1/users/self/upcoming_events \
<https://canvas.instructure.com/doc/api/users.html#method.users.upcoming_events>`_
:rtype: dict
"""
response = self.__requester.request(
'GET',
'users/self/upcoming_events'
)
return response.json()
def get_course_nicknames(self):
"""
Return all course nicknames set by the current account.
:calls: `GET /api/v1/users/self/course_nicknames \
<https://canvas.instructure.com/doc/api/users.html#method.course_nicknames.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.course.CourseNickname`
"""
from canvasapi.course import CourseNickname
return PaginatedList(
CourseNickname,
self.__requester,
'GET',
'users/self/course_nicknames'
)
def get_course_nickname(self, course):
"""
Return the nickname for the given course.
:calls: `GET /api/v1/users/self/course_nicknames/:course_id \
<https://canvas.instructure.com/doc/api/users.html#method.course_nicknames.show>`_
:param course: The object or ID of the course.
:type course: :class:`canvasapi.course.Course` or int
:rtype: :class:`canvasapi.course.CourseNickname`
"""
from canvasapi.course import CourseNickname
course_id = obj_or_id(course, "course", (Course,))
response = self.__requester.request(
'GET',
'users/self/course_nicknames/{}'.format(course_id)
)
return CourseNickname(self.__requester, response.json())
def get_section(self, section, use_sis_id=False, **kwargs):
"""
Get details about a specific section.
:calls: `GET /api/v1/sections/:id \
<https://canvas.instructure.com/doc/api/sections.html#method.sections.show>`_
:param section: The object or ID of the section to get.
:type section: :class:`canvasapi.section.Section` or int
:param use_sis_id: Whether or not section_id is an sis ID.
Defaults to `False`.
:type use_sis_id: bool
:rtype: :class:`canvasapi.section.Section`
"""
if use_sis_id:
section_id = section
uri_str = 'sections/sis_section_id:{}'
else:
section_id = obj_or_id(section, "section", (Section,))
uri_str = 'sections/{}'
response = self.__requester.request(
'GET',
uri_str.format(section_id),
_kwargs=combine_kwargs(**kwargs)
)
return Section(self.__requester, response.json())
def set_course_nickname(self, course, nickname):
"""
Set a nickname for the given course. This will replace the
course's name in the output of subsequent API calls, as
well as in selected places in the Canvas web user interface.
:calls: `PUT /api/v1/users/self/course_nicknames/:course_id \
<https://canvas.instructure.com/doc/api/users.html#method.course_nicknames.update>`_
:param course: The ID of the course.
:type course: :class:`canvasapi.course.Course` or int
:param nickname: The nickname for the course.
:type nickname: str
:rtype: :class:`canvasapi.course.CourseNickname`
"""
from canvasapi.course import CourseNickname
course_id = obj_or_id(course, "course", (Course,))
response = self.__requester.request(
'PUT',
'users/self/course_nicknames/{}'.format(course_id),
nickname=nickname
)
return CourseNickname(self.__requester, response.json())
def clear_course_nicknames(self):
"""
Remove all stored course nicknames.
:calls: `DELETE /api/v1/users/self/course_nicknames \
<https://canvas.instructure.com/doc/api/users.html#method.course_nicknames.clear>`_
:returns: True if the nicknames were cleared, False otherwise.
:rtype: bool
"""
response = self.__requester.request(
'DELETE',
'users/self/course_nicknames'
)
return response.json().get('message') == 'OK'
def search_accounts(self, **kwargs):
"""
Return a list of up to 5 matching account domains. Partial matches on
name and domain are supported.
:calls: `GET /api/v1/accounts/search \
<https://canvas.instructure.com/doc/api/account_domain_lookups.html#method.account_domain_lookups.search>`_
:rtype: dict
"""
response = self.__requester.request(
'GET',
'accounts/search',
_kwargs=combine_kwargs(**kwargs)
)
return response.json()
def create_group(self, **kwargs):
"""
Create a group
:calls: `POST /api/v1/groups/ \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.create>`_
:rtype: :class:`canvasapi.group.Group`
"""
response = self.__requester.request(
'POST',
'groups',
_kwargs=combine_kwargs(**kwargs)
)
return Group(self.__requester, response.json())
def get_group(self, group, use_sis_id=False, **kwargs):
"""
Return the data for a single group. If the caller does not
have permission to view the group a 401 will be returned.
:calls: `GET /api/v1/groups/:group_id \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.show>`_
:param group: The object or ID of the group to get.
:type group: :class:`canvasapi.group.Group` or int
:param use_sis_id: Whether or not group_id is an sis ID.
Defaults to `False`.
:type use_sis_id: bool
:rtype: :class:`canvasapi.group.Group`
"""
if use_sis_id:
group_id = group
uri_str = 'groups/sis_group_id:{}'
else:
group_id = obj_or_id(group, "group", (Group,))
uri_str = 'groups/{}'
response = self.__requester.request(
'GET',
uri_str.format(group_id),
_kwargs=combine_kwargs(**kwargs)
)
return Group(self.__requester, response.json())
def get_group_category(self, category):
"""
Get a single group category.
:calls: `GET /api/v1/group_categories/:group_category_id \
<https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.show>`_
:param category: The object or ID of the category.
:type category: :class:`canvasapi.group.GroupCategory` or int
:rtype: :class:`canvasapi.group.GroupCategory`
"""
category_id = obj_or_id(category, "category", (GroupCategory,))
response = self.__requester.request(
'GET',
'group_categories/{}'.format(category_id)
)
return GroupCategory(self.__requester, response.json())
def create_conversation(self, recipients, body, **kwargs):
"""
Create a new Conversation.
:calls: `POST /api/v1/conversations \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.create>`_
:param recipients: An array of recipient ids.
These may be user ids or course/group ids prefixed
with 'course\\_' or 'group\\_' respectively,
e.g. recipients=['1', '2', 'course_3']
:type recipients: `list` of `str`
:param body: The body of the message being added.
:type body: `str`
:rtype: list of :class:`canvasapi.conversation.Conversation`
"""
from canvasapi.conversation import Conversation
kwargs['recipients'] = recipients
kwargs['body'] = body
response = self.__requester.request(
'POST',
'conversations',
_kwargs=combine_kwargs(**kwargs)
)
return [Conversation(self.__requester, convo) for convo in response.json()]
def get_conversation(self, conversation, **kwargs):
"""
Return single Conversation
:calls: `GET /api/v1/conversations/:id \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.show>`_
:param conversation: The object or ID of the conversation.
:type conversation: :class:`canvasapi.conversation.Conversation` or int
:rtype: :class:`canvasapi.conversation.Conversation`
"""
from canvasapi.conversation import Conversation
conversation_id = obj_or_id(conversation, "conversation", (Conversation,))
response = self.__requester.request(
'GET',
'conversations/{}'.format(conversation_id),
_kwargs=combine_kwargs(**kwargs)
)
return Conversation(self.__requester, response.json())
def get_conversations(self, **kwargs):
"""
Return list of conversations for the current user, most resent ones first.
:calls: `GET /api/v1/conversations \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of \
:class:`canvasapi.conversation.Conversation`
"""
from canvasapi.conversation import Conversation
return PaginatedList(
Conversation,
self.__requester,
'GET',
'conversations',
_kwargs=combine_kwargs(**kwargs)
)
def conversations_mark_all_as_read(self):
"""
Mark all conversations as read.
:calls: `POST /api/v1/conversations/mark_all_as_read \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.mark_all_as_read>`_
:rtype: `bool`
"""
response = self.__requester.request(
'POST',
'conversations/mark_all_as_read'
)
return response.json() == {}
def conversations_unread_count(self):
"""
Get the number of unread conversations for the current user
:calls: `GET /api/v1/conversations/unread_count \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.unread_count>`_
:returns: simple object with unread_count, example: {'unread_count': '7'}
:rtype: `dict`
"""
response = self.__requester.request(
'GET',
'conversations/unread_count'
)
return response.json()
def conversations_get_running_batches(self):
"""
Returns any currently running conversation batches for the current user.
Conversation batches are created when a bulk private message is sent
asynchronously.
:calls: `GET /api/v1/conversations/batches \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.batches>`_
:returns: dict with list of batch objects - not currently a Class
:rtype: `dict`
"""
response = self.__requester.request(
'GET',
'conversations/batches'
)
return response.json()
def conversations_batch_update(self, conversation_ids, event):
"""
:calls: `PUT /api/v1/conversations \
<https://canvas.instructure.com/doc/api/conversations.html#method.conversations.batch_update>`_
:param conversation_ids: List of conversations to update. Limited to 500 conversations.
:type conversation_ids: `list` of `str`
:param event: The action to take on each conversation.
:type event: `str`
:rtype: :class:`canvasapi.progress.Progress`
"""
from canvasapi.progress import Progress
ALLOWED_EVENTS = [
'mark_as_read',
'mark_as_unread',
'star',
'unstar',
'archive',
'destroy'
]
try:
if event not in ALLOWED_EVENTS:
raise ValueError(
'{} is not a valid action. Please use one of the following: {}'.format(
event,
','.join(ALLOWED_EVENTS)
)
)
if len(conversation_ids) > 500:
raise ValueError(
'You have requested {} updates, which exceeds the limit of 500'.format(
len(conversation_ids)
)
)
response = self.__requester.request(
'PUT',
'conversations',
event=event,
**{"conversation_ids[]": conversation_ids}
)
return_progress = Progress(self.__requester, response.json())
return return_progress
except ValueError as e:
return e
def create_calendar_event(self, calendar_event, **kwargs):
"""
Create a new Calendar Event.
:calls: `POST /api/v1/calendar_events \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.create>`_
:param calendar_event: The attributes of the calendar event.
:type calendar_event: `dict`
:rtype: :class:`canvasapi.calendar_event.CalendarEvent`
"""
from canvasapi.calendar_event import CalendarEvent
if isinstance(calendar_event, dict) and 'context_code' in calendar_event:
kwargs['calendar_event'] = calendar_event
else:
raise RequiredFieldMissing(
"Dictionary with key 'context_codes' is required."
)
response = self.__requester.request(
'POST',
'calendar_events',
_kwargs=combine_kwargs(**kwargs)
)
return CalendarEvent(self.__requester, response.json())
def list_calendar_events(self, **kwargs):
"""
List calendar events.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi.canvas.Canvas.get_calendar_events` instead.
:calls: `GET /api/v1/calendar_events \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.calendar_event.CalendarEvent`
"""
warnings.warn(
"`list_calendar_events` is being deprecated and will be removed "
"in a future version. Use `get_calendar_events` instead",
DeprecationWarning
)
return self.get_calendar_events(**kwargs)
def get_calendar_events(self, **kwargs):
"""
List calendar events.
:calls: `GET /api/v1/calendar_events \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.calendar_event.CalendarEvent`
"""
from canvasapi.calendar_event import CalendarEvent
return PaginatedList(
CalendarEvent,
self.__requester,
'GET',
'calendar_events',
_kwargs=combine_kwargs(**kwargs)
)
def get_calendar_event(self, calendar_event):
"""
Return single Calendar Event by id
:calls: `GET /api/v1/calendar_events/:id \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.show>`_
:param calendar_event: The object or ID of the calendar event.
:type calendar_event: :class:`canvasapi.calendar_event.CalendarEvent` or int
:rtype: :class:`canvasapi.calendar_event.CalendarEvent`
"""
from canvasapi.calendar_event import CalendarEvent
calendar_event_id = obj_or_id(calendar_event, "calendar_event", (CalendarEvent,))
response = self.__requester.request(
'GET',
'calendar_events/{}'.format(calendar_event_id)
)
return CalendarEvent(self.__requester, response.json())
def reserve_time_slot(self, calendar_event, participant_id=None, **kwargs):
"""
Return single Calendar Event by id
:calls: `POST /api/v1/calendar_events/:id/reservations \
<https://canvas.instructure.com/doc/api/calendar_events.html#method.calendar_events_api.reserve>`_
:param calendar_event: The object or ID of the calendar event.
:type calendar_event: :class:`canvasapi.calendar_event.CalendarEvent` or int
:param participant_id: The ID of the participant, if given.
:type participant_id: str
:rtype: :class:`canvasapi.calendar_event.CalendarEvent`
"""
from canvasapi.calendar_event import CalendarEvent
calendar_event_id = obj_or_id(calendar_event, "calendar_event", (CalendarEvent,))
if participant_id:
uri = 'calendar_events/{}/reservations/{}'.format(
calendar_event_id, participant_id
)
else:
uri = 'calendar_events/{}/reservations'.format(calendar_event_id)
response = self.__requester.request(
'POST',
uri,
_kwargs=combine_kwargs(**kwargs)
)
return CalendarEvent(self.__requester, response.json())
def list_appointment_groups(self, **kwargs):
"""
List appointment groups.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi.canvas.Canvas.get_appointment_groups` instead.
:calls: `GET /api/v1/appointment_groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.appointment_group.AppointmentGroup`
"""
warnings.warn(
"`list_appointment_groups` is being deprecated and will be removed"
" in a future version. Use `get_appointment_groups` instead.",
DeprecationWarning
)
return self.get_appointment_groups(**kwargs)
def get_appointment_groups(self, **kwargs):
"""
List appointment groups.
:calls: `GET /api/v1/appointment_groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.appointment_group.AppointmentGroup`
"""
from canvasapi.appointment_group import AppointmentGroup
return PaginatedList(
AppointmentGroup,
self.__requester,
'GET',
'appointment_groups',
_kwargs=combine_kwargs(**kwargs)
)
def get_appointment_group(self, appointment_group):
"""
Return single Appointment Group by id
:calls: `GET /api/v1/appointment_groups/:id \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.show>`_
:param appointment_group: The ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.appointment_group.AppointmentGroup`
"""
from canvasapi.appointment_group import AppointmentGroup
appointment_group_id = obj_or_id(
appointment_group, "appointment_group", (AppointmentGroup,)
)
response = self.__requester.request(
'GET',
'appointment_groups/{}'.format(appointment_group_id)
)
return AppointmentGroup(self.__requester, response.json())
def create_appointment_group(self, appointment_group, **kwargs):
"""
Create a new Appointment Group.
:calls: `POST /api/v1/appointment_groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.create>`_
:param appointment_group: The attributes of the appointment group.
:type appointment_group: `dict`
:param title: The title of the appointment group.
:type title: `str`
:rtype: :class:`canvasapi.appointment_group.AppointmentGroup`
"""
from canvasapi.appointment_group import AppointmentGroup
if (
isinstance(appointment_group, dict) and
'context_codes' in appointment_group and
'title' in appointment_group
):
kwargs['appointment_group'] = appointment_group
elif (
isinstance(appointment_group, dict) and
'context_codes' not in appointment_group
):
raise RequiredFieldMissing(
"Dictionary with key 'context_codes' is missing."
)
elif isinstance(appointment_group, dict) and 'title' not in appointment_group:
raise RequiredFieldMissing("Dictionary with key 'title' is missing.")
response = self.__requester.request(
'POST',
'appointment_groups',
_kwargs=combine_kwargs(**kwargs)
)
return AppointmentGroup(self.__requester, response.json())
def list_user_participants(self, appointment_group, **kwargs):
"""
List user participants in this appointment group.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi. canvas.Canvas.get_user_participants` instead.
:calls: `GET /api/v1/appointment_groups/:id/users \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.users>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.user.User`
"""
warnings.warn(
"`list_user_participants` is being deprecated and will be removed in a future version."
" Use `get_user_participants` instead",
DeprecationWarning
)
return self.get_user_participants(appointment_group, **kwargs)
def get_user_participants(self, appointment_group, **kwargs):
"""
List user participants in this appointment group.
:calls: `GET /api/v1/appointment_groups/:id/users \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.users>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.user.User`
"""
from canvasapi.appointment_group import AppointmentGroup
from canvasapi.user import User
appointment_group_id = obj_or_id(
appointment_group, "appointment_group", (AppointmentGroup,)
)
return PaginatedList(
User,
self.__requester,
'GET',
'appointment_groups/{}/users'.format(appointment_group_id),
_kwargs=combine_kwargs(**kwargs)
)
def list_group_participants(self, appointment_group, **kwargs):
"""
List student group participants in this appointment group.
.. warning::
.. deprecated:: 0.10.0
Use :func:`canvasapi. canvas.Canvas.get_group_participants` instead.
:calls: `GET /api/v1/appointment_groups/:id/groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.groups>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.group.Group`
"""
warnings.warn(
"`list_group_participants` is being deprecated and will be removed "
"in a future version. Use `get_group_participants` instead",
DeprecationWarning
)
return self.get_group_participants(appointment_group, **kwargs)
def get_group_participants(self, appointment_group, **kwargs):
"""
List student group participants in this appointment group.
:calls: `GET /api/v1/appointment_groups/:id/groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.groups>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.group.Group`
"""
from canvasapi.appointment_group import AppointmentGroup
from canvasapi.group import Group
appointment_group_id = obj_or_id(
appointment_group, "appointment_group", (AppointmentGroup,)
)
return PaginatedList(
Group,
self.__requester,
'GET',
'appointment_groups/{}/groups'.format(appointment_group_id),
_kwargs=combine_kwargs(**kwargs)
)
def get_file(self, file, **kwargs):
"""
Return the standard attachment json object for a file.
:calls: `GET /api/v1/files/:id \
<https://canvas.instructure.com/doc/api/files.html#method.files.api_show>`_
:param file: The object or ID of the file to retrieve.
:type file: :class:`canvasapi.file.File` or int
:rtype: :class:`canvasapi.file.File`
"""
file_id = obj_or_id(file, "file", (File,))
response = self.__requester.request(
'GET',
'files/{}'.format(file_id),
_kwargs=combine_kwargs(**kwargs)
)
return File(self.__requester, response.json())
def get_folder(self, folder):
"""
Return the details for a folder
:calls: `GET /api/v1/folders/:id \
<https://canvas.instructure.com/doc/api/files.html#method.folders.show>`_
:param folder: The object or ID of the folder to retrieve.
:type folder: :class:`canvasapi.folder.Folder` or int
:rtype: :class:`canvasapi.folder.Folder`
"""
folder_id = obj_or_id(folder, "folder", (Folder,))
response = self.__requester.request(
'GET',
'folders/{}'.format(folder_id)
)
return Folder(self.__requester, response.json())
def search_recipients(self, **kwargs):
"""
Find valid recipients (users, courses and groups) that the current user
can send messages to.
Returns a list of mixed data types.
:calls: `GET /api/v1/search/recipients \
<https://canvas.instructure.com/doc/api/search.html#method.search.recipients>`_
:rtype: `list`
"""
if 'search' not in kwargs:
kwargs['search'] = ' '
response = self.__requester.request(
'GET',
'search/recipients',
_kwargs=combine_kwargs(**kwargs)
)
return response.json()
def search_all_courses(self, **kwargs):
"""
List all the courses visible in the public index.
Returns a list of dicts, each containing a single course.
:calls: `GET /api/v1/search/all_courses \
<https://canvas.instructure.com/doc/api/search.html#method.search.all_courses>`_
:rtype: `list`
"""
response = self.__requester.request(
'GET',
'search/all_courses',
_kwargs=combine_kwargs(**kwargs)
)
return response.json()
def get_outcome(self, outcome):
"""
Returns the details of the outcome with the given id.
:calls: `GET /api/v1/outcomes/:id \
<https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_
:param outcome: The outcome object or ID to return.
:type outcome: :class:`canvasapi.outcome.Outcome` or int
:returns: An Outcome object.
:rtype: :class:`canvasapi.outcome.Outcome`
"""
from canvasapi.outcome import Outcome
outcome_id = obj_or_id(outcome, "outcome", (Outcome,))
response = self.__requester.request(
'GET',
'outcomes/{}'.format(outcome_id)
)
return Outcome(self.__requester, response.json())
def get_root_outcome_group(self):
"""
Redirect to root outcome group for context
:calls: `GET /api/v1/global/root_outcome_group \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.redirect>`_
:returns: The OutcomeGroup of the context.
:rtype: :class:`canvasapi.outcome.OutcomeGroup`
"""
from canvasapi.outcome import OutcomeGroup
response = self.__requester.request(
'GET',
'global/root_outcome_group'
)
return OutcomeGroup(self.__requester, response.json())
def get_outcome_group(self, group):
"""
Returns the details of the Outcome Group with the given id.
:calls: `GET /api/v1/global/outcome_groups/:id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.show>`_
:param group: The outcome group object or ID to return.
:type group: :class:`canvasapi.outcome.OutcomeGroup` or int
:returns: An outcome group object.
:rtype: :class:`canvasapi.outcome.OutcomeGroup`
"""
from canvasapi.outcome import OutcomeGroup
outcome_group_id = obj_or_id(group, "group", (OutcomeGroup,))
response = self.__requester.request(
'GET',
'global/outcome_groups/{}'.format(outcome_group_id)
)
return OutcomeGroup(self.__requester, response.json())
def get_progress(self, progress, **kwargs):
"""
Get a specific progress.
:calls: `GET /api/v1/progress/:id
<https://canvas.instructure.com/doc/api/progress.html#method.progress.show>`_
:param progress: The object or ID of the progress to retrieve.
:type progress: int, str or :class:`canvasapi.progress.Progress`
:rtype: :class:`canvasapi.progress.Progress`
"""
from canvasapi.progress import Progress
progress_id = obj_or_id(progress, "progress", (Progress,))
response = self.__requester.request(
'GET',
'progress/{}'.format(progress_id),
_kwargs=combine_kwargs(**kwargs)
)
return Progress(self.__requester, response.json())
def get_announcements(self, **kwargs):
"""
List announcements.
:calls: `GET /api/v1/announcements \
<https://canvas.instructure.com/doc/api/announcements.html#method.announcements_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.discussion_topic.DiscussionTopic`
"""
from canvasapi.discussion_topic import DiscussionTopic
return PaginatedList(
DiscussionTopic,
self.__requester,
'GET',
'announcements',
_kwargs=combine_kwargs(**kwargs)
)
| 34.563574 | 115 | 0.616872 |
fa3aae5b700387bb3d175ada45f015beb71c916d | 16,463 | py | Python | patches/kaggle_gcp.py | rpatil524/docker-python | 97ab4b8de203f4856cf5bcb7960419ad2b982c39 | [
"Apache-2.0"
] | 2 | 2020-12-16T20:47:59.000Z | 2021-06-03T16:32:33.000Z | patches/kaggle_gcp.py | aniketmaurya/docker-python | 5be3502c23f2212e5c53fca33586cc212936f366 | [
"Apache-2.0"
] | null | null | null | patches/kaggle_gcp.py | aniketmaurya/docker-python | 5be3502c23f2212e5c53fca33586cc212936f366 | [
"Apache-2.0"
] | null | null | null | import os
import inspect
from google.auth import credentials, environment_vars
from google.auth.exceptions import RefreshError
from google.api_core.gapic_v1.client_info import ClientInfo
from google.cloud import bigquery
from google.cloud.exceptions import Forbidden
from google.cloud.bigquery._http import Connection
from kaggle_secrets import GcpTarget, UserSecretsClient
from log import Log
KAGGLE_GCP_CLIENT_USER_AGENT="kaggle-gcp-client/1.0"
def get_integrations():
kernel_integrations_var = os.getenv("KAGGLE_KERNEL_INTEGRATIONS")
kernel_integrations = KernelIntegrations()
if kernel_integrations_var is None:
return kernel_integrations
for integration in kernel_integrations_var.split(':'):
try:
target = GcpTarget[integration.upper()]
kernel_integrations.add_integration(target)
except KeyError as e:
Log.error(f"Unknown integration target: {integration.upper()}")
return kernel_integrations
class KernelIntegrations():
def __init__(self):
self.integrations = {}
def add_integration(self, target):
self.integrations[target] = True
def has_integration(self, target):
return target in self.integrations
def has_bigquery(self):
return GcpTarget.BIGQUERY in self.integrations
def has_gcs(self):
return GcpTarget.GCS in self.integrations
def has_cloudai(self):
return GcpTarget.CLOUDAI in self.integrations or \
GcpTarget.AUTOML in self.integrations
class KaggleKernelCredentials(credentials.Credentials):
"""Custom Credentials used to authenticate using the Kernel's connected OAuth account.
Example usage:
client = bigquery.Client(project='ANOTHER_PROJECT',
credentials=KaggleKernelCredentials())
"""
def __init__(self, target=GcpTarget.BIGQUERY):
super().__init__()
self.target = target
def refresh(self, request):
try:
client = UserSecretsClient()
if self.target == GcpTarget.BIGQUERY:
self.token, self.expiry = client.get_bigquery_access_token()
elif self.target == GcpTarget.GCS:
self.token, self.expiry = client._get_gcs_access_token()
elif self.target == GcpTarget.CLOUDAI:
self.token, self.expiry = client._get_cloudai_access_token()
except ConnectionError as e:
Log.error(f"Connection error trying to refresh access token: {e}")
print("There was a connection error trying to fetch the access token. "
f"Please ensure internet is on in order to use the {self.target.service} Integration.")
raise RefreshError('Unable to refresh access token due to connection error.') from e
except Exception as e:
Log.error(f"Error trying to refresh access token: {e}")
if (not get_integrations().has_integration(self.target)):
Log.error(f"No {self.target.service} integration found.")
print(
f"Please ensure you have selected a {self.target.service} account in the Notebook Add-ons menu.")
raise RefreshError('Unable to refresh access token.') from e
class KaggleKernelWithProjetCredentials(KaggleKernelCredentials):
""" Wrapper Kaggle Credentials with quota_project_id.
"""
def __init__(self, parentCredential=None, quota_project_id=None):
super().__init__(target=parentCredential.target)
self._quota_project_id=quota_project_id
class _DataProxyConnection(Connection):
"""Custom Connection class used to proxy the BigQuery client to Kaggle's data proxy."""
def __init__(self, client, **kwargs):
super().__init__(client, **kwargs)
self.extra_headers["X-KAGGLE-PROXY-DATA"] = os.getenv(
"KAGGLE_DATA_PROXY_TOKEN")
def api_request(self, *args, **kwargs):
"""Wrap Connection.api_request in order to handle errors gracefully.
"""
try:
return super().api_request(*args, **kwargs)
except Forbidden as e:
msg = ("Permission denied using Kaggle's public BigQuery integration. "
"Did you mean to select a BigQuery account in the Notebook Add-ons menu?")
print(msg)
Log.info(msg)
raise e
class PublicBigqueryClient(bigquery.client.Client):
"""A modified BigQuery client that routes requests using Kaggle's Data Proxy to provide free access to Public Datasets.
Example usage:
from kaggle import PublicBigqueryClient
client = PublicBigqueryClient()
"""
def __init__(self, *args, **kwargs):
data_proxy_project = os.getenv("KAGGLE_DATA_PROXY_PROJECT")
default_api_endpoint = os.getenv("KAGGLE_DATA_PROXY_URL")
anon_credentials = credentials.AnonymousCredentials()
anon_credentials.refresh = lambda *args: None
super().__init__(
project=data_proxy_project, credentials=anon_credentials, *args, **kwargs
)
# TODO: Remove this once https://github.com/googleapis/google-cloud-python/issues/7122 is implemented.
self._connection = _DataProxyConnection(self, api_endpoint=default_api_endpoint)
def has_been_monkeypatched(method):
return "kaggle_gcp" in inspect.getsourcefile(method)
def is_user_secrets_token_set():
return "KAGGLE_USER_SECRETS_TOKEN" in os.environ
def is_proxy_token_set():
return "KAGGLE_DATA_PROXY_TOKEN" in os.environ
def init_bigquery():
from google.cloud import bigquery
if not (is_proxy_token_set() or is_user_secrets_token_set()):
return bigquery
# If this Notebook has bigquery integration on startup, preload the Kaggle Credentials
# object for magics to work.
if get_integrations().has_bigquery():
from google.cloud.bigquery import magics
magics.context.credentials = KaggleKernelCredentials()
def monkeypatch_bq(bq_client, *args, **kwargs):
from kaggle_gcp import get_integrations, PublicBigqueryClient, KaggleKernelCredentials
specified_credentials = kwargs.get('credentials')
has_bigquery = get_integrations().has_bigquery()
# Prioritize passed in project id, but if it is missing look for env var.
arg_project = kwargs.get('project')
explicit_project_id = arg_project or os.environ.get(environment_vars.PROJECT)
# This is a hack to get around the bug in google-cloud library.
# Remove these two lines once this is resolved:
# https://github.com/googleapis/google-cloud-python/issues/8108
if explicit_project_id:
Log.info(f"Explicit project set to {explicit_project_id}")
kwargs['project'] = explicit_project_id
if explicit_project_id is None and specified_credentials is None and not has_bigquery:
msg = "Using Kaggle's public dataset BigQuery integration."
Log.info(msg)
print(msg)
return PublicBigqueryClient(*args, **kwargs)
else:
if specified_credentials is None:
Log.info("No credentials specified, using KaggleKernelCredentials.")
kwargs['credentials'] = KaggleKernelCredentials()
if (not has_bigquery):
Log.info("No bigquery integration found, creating client anyways.")
print('Please ensure you have selected a BigQuery '
'account in the Notebook Add-ons menu.')
if explicit_project_id is None:
Log.info("No project specified while using the unmodified client.")
print('Please ensure you specify a project id when creating the client'
' in order to use your BigQuery account.')
kwargs['client_info'] = set_kaggle_user_agent(kwargs.get('client_info'))
return bq_client(*args, **kwargs)
# Monkey patches BigQuery client creation to use proxy or user-connected GCP account.
# Deprecated in favor of Kaggle.DataProxyClient().
# TODO: Remove this once uses have migrated to that new interface.
bq_client = bigquery.Client
if (not has_been_monkeypatched(bigquery.Client)):
bigquery.Client = lambda *args, **kwargs: monkeypatch_bq(
bq_client, *args, **kwargs)
return bigquery
# Monkey patch for aiplatform init
# eg
# from google.cloud import aiplatform
# aiplatform.init(args)
def monkeypatch_aiplatform_init(aiplatform_klass, kaggle_kernel_credentials):
aiplatform_init = aiplatform_klass.init
def patched_init(self, *args, **kwargs):
specified_credentials = kwargs.get('credentials')
if specified_credentials is None:
Log.info("No credentials specified, using KaggleKernelCredentials.")
kwargs['credentials'] = kaggle_kernel_credentials
return aiplatform_init(self, *args, **kwargs)
if (not has_been_monkeypatched(aiplatform_klass.init)):
aiplatform_klass.init = patched_init
Log.info("aiplatform.init patched")
def monkeypatch_client(client_klass, kaggle_kernel_credentials):
client_init = client_klass.__init__
def patched_init(self, *args, **kwargs):
specified_credentials = kwargs.get('credentials')
if specified_credentials is None:
Log.info("No credentials specified, using KaggleKernelCredentials.")
# Some GCP services demand the billing and target project must be the same.
# To avoid using default service account based credential as caller credential
# user need to provide ClientOptions with quota_project_id:
# srv.Client(client_options=client_options.ClientOptions(quota_project_id="YOUR PROJECT"))
client_options=kwargs.get('client_options')
if client_options != None and client_options.quota_project_id != None:
kwargs['credentials'] = KaggleKernelWithProjetCredentials(
parentCredential = kaggle_kernel_credentials,
quota_project_id = client_options.quota_project_id)
else:
kwargs['credentials'] = kaggle_kernel_credentials
kwargs['client_info'] = set_kaggle_user_agent(kwargs.get('client_info'))
return client_init(self, *args, **kwargs)
if (not has_been_monkeypatched(client_klass.__init__)):
client_klass.__init__ = patched_init
Log.info(f"Client patched: {client_klass}")
def set_kaggle_user_agent(client_info: ClientInfo):
# Add kaggle client user agent in order to attribute usage.
if client_info is None:
client_info = ClientInfo(user_agent=KAGGLE_GCP_CLIENT_USER_AGENT)
else:
client_info.user_agent = KAGGLE_GCP_CLIENT_USER_AGENT
return client_info
def init_gcs():
from google.cloud import storage
if not is_user_secrets_token_set():
return storage
from kaggle_gcp import get_integrations
if not get_integrations().has_gcs():
return storage
from kaggle_secrets import GcpTarget
from kaggle_gcp import KaggleKernelCredentials
monkeypatch_client(
storage.Client,
KaggleKernelCredentials(target=GcpTarget.GCS))
return storage
def init_automl():
from google.cloud import automl, automl_v1beta1
if not is_user_secrets_token_set():
return
from kaggle_gcp import get_integrations
if not get_integrations().has_cloudai():
return
from kaggle_secrets import GcpTarget
from kaggle_gcp import KaggleKernelCredentials
kaggle_kernel_credentials = KaggleKernelCredentials(target=GcpTarget.CLOUDAI)
# Patch the 2 GA clients: AutoMlClient and PreditionServiceClient
monkeypatch_client(automl.AutoMlClient, kaggle_kernel_credentials)
monkeypatch_client(automl.PredictionServiceClient, kaggle_kernel_credentials)
# The AutoML client library exposes 3 different client classes (AutoMlClient,
# TablesClient, PredictionServiceClient), so patch each of them.
# The same KaggleKernelCredentials are passed to all of them.
# The GcsClient class is only used internally by TablesClient.
# The beta version of the clients that are now GA are included here for now.
# They are deprecated and will be removed by 1 May 2020.
monkeypatch_client(automl_v1beta1.AutoMlClient, kaggle_kernel_credentials)
monkeypatch_client(automl_v1beta1.PredictionServiceClient, kaggle_kernel_credentials)
# The TablesClient is still in beta, so this will not be deprecated until
# the TablesClient is GA.
monkeypatch_client(automl_v1beta1.TablesClient, kaggle_kernel_credentials)
def init_translation_v2():
from google.cloud import translate_v2
if not is_user_secrets_token_set():
return translate_v2
from kaggle_gcp import get_integrations
if not get_integrations().has_cloudai():
return translate_v2
from kaggle_secrets import GcpTarget
kernel_credentials = KaggleKernelCredentials(target=GcpTarget.CLOUDAI)
monkeypatch_client(translate_v2.Client, kernel_credentials)
return translate_v2
def init_translation_v3():
# Translate v3 exposes different client than translate v2.
from google.cloud import translate_v3
if not is_user_secrets_token_set():
return translate_v3
from kaggle_gcp import get_integrations
if not get_integrations().has_cloudai():
return translate_v3
from kaggle_secrets import GcpTarget
kernel_credentials = KaggleKernelCredentials(target=GcpTarget.CLOUDAI)
monkeypatch_client(translate_v3.TranslationServiceClient, kernel_credentials)
return translate_v3
def init_natural_language():
from google.cloud import language
if not is_user_secrets_token_set():
return language
from kaggle_gcp import get_integrations
if not get_integrations().has_cloudai():
return language
from kaggle_secrets import GcpTarget
kernel_credentials = KaggleKernelCredentials(target=GcpTarget.CLOUDAI)
monkeypatch_client(language.LanguageServiceClient, kernel_credentials)
monkeypatch_client(language.LanguageServiceAsyncClient, kernel_credentials)
return language
def init_ucaip():
from google.cloud import aiplatform
if not is_user_secrets_token_set():
return
from kaggle_gcp import get_integrations
if not get_integrations().has_cloudai():
return
from kaggle_secrets import GcpTarget
from kaggle_gcp import KaggleKernelCredentials
kaggle_kernel_credentials = KaggleKernelCredentials(target=GcpTarget.CLOUDAI)
# Patch the ucaip init method, this flows down to all ucaip services
monkeypatch_aiplatform_init(aiplatform, kaggle_kernel_credentials)
def init_video_intelligence():
from google.cloud import videointelligence
if not is_user_secrets_token_set():
return videointelligence
from kaggle_gcp import get_integrations
if not get_integrations().has_cloudai():
return videointelligence
from kaggle_secrets import GcpTarget
kernel_credentials = KaggleKernelCredentials(target=GcpTarget.CLOUDAI)
monkeypatch_client(
videointelligence.VideoIntelligenceServiceClient,
kernel_credentials)
monkeypatch_client(
videointelligence.VideoIntelligenceServiceAsyncClient,
kernel_credentials)
return videointelligence
def init_vision():
from google.cloud import vision
if not is_user_secrets_token_set():
return vision
from kaggle_gcp import get_integrations
if not get_integrations().has_cloudai():
return vision
from kaggle_secrets import GcpTarget
kernel_credentials = KaggleKernelCredentials(target=GcpTarget.CLOUDAI)
monkeypatch_client(vision.ImageAnnotatorClient, kernel_credentials)
monkeypatch_client(vision.ImageAnnotatorAsyncClient, kernel_credentials)
return vision
def init():
init_bigquery()
init_gcs()
init_automl()
init_translation_v2()
init_translation_v3()
init_natural_language()
init_video_intelligence()
init_vision()
init_ucaip()
# We need to initialize the monkeypatching of the client libraries
# here since there is a circular dependency between our import hook version
# google.cloud.* and kaggle_gcp. By calling init here, we guarantee
# that regardless of the original import that caused google.cloud.* to be
# loaded, the monkeypatching will be done.
init()
| 41.573232 | 123 | 0.717974 |
d0e98972adad372c61ede004d625adce31eac249 | 22,105 | py | Python | python/ray/tests/test_basic.py | FarzanT/ray | d747071dd9e883bd4ceefe80d0344f287630c4e5 | [
"Apache-2.0"
] | null | null | null | python/ray/tests/test_basic.py | FarzanT/ray | d747071dd9e883bd4ceefe80d0344f287630c4e5 | [
"Apache-2.0"
] | null | null | null | python/ray/tests/test_basic.py | FarzanT/ray | d747071dd9e883bd4ceefe80d0344f287630c4e5 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import logging
import os
import pickle
import sys
import time
import numpy as np
import pytest
import ray
import ray.cluster_utils
import ray.test_utils
logger = logging.getLogger(__name__)
# https://github.com/ray-project/ray/issues/6662
def test_ignore_http_proxy(shutdown_only):
ray.init(num_cpus=1)
os.environ["http_proxy"] = "http://example.com"
os.environ["https_proxy"] = "http://example.com"
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
# https://github.com/ray-project/ray/issues/7263
def test_grpc_message_size(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def bar(*a):
return
# 50KiB, not enough to spill to plasma, but will be inlined.
def f():
return np.zeros(50000, dtype=np.uint8)
# Executes a 10MiB task spec
ray.get(bar.remote(*[f() for _ in range(200)]))
# https://github.com/ray-project/ray/issues/7287
def test_omp_threads_set(shutdown_only):
ray.init(num_cpus=1)
# Should have been auto set by ray init.
assert os.environ["OMP_NUM_THREADS"] == "1"
def test_submit_api(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_returns=0) is None
id1 = f._remote(args=[1], num_returns=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_returns=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_returns=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
assert ray.get(g._remote()) == []
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor:
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
@ray.remote
class Actor2:
def __init__(self):
pass
def method(self):
pass
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
a2 = Actor2._remote()
ray.get(a2.method._remote())
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_returns=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_invalid_arguments(shutdown_only):
ray.init(num_cpus=2)
for opt in [np.random.randint(-100, -1), np.random.uniform(0, 1)]:
with pytest.raises(
ValueError,
match="The keyword 'num_returns' only accepts 0 or a"
" positive integer"):
@ray.remote(num_returns=opt)
def g1():
return 1
for opt in [np.random.randint(-100, -2), np.random.uniform(0, 1)]:
with pytest.raises(
ValueError,
match="The keyword 'max_retries' only accepts 0, -1 or a"
" positive integer"):
@ray.remote(max_retries=opt)
def g2():
return 1
for opt in [np.random.randint(-100, -1), np.random.uniform(0, 1)]:
with pytest.raises(
ValueError,
match="The keyword 'max_calls' only accepts 0 or a positive"
" integer"):
@ray.remote(max_calls=opt)
def g3():
return 1
for opt in [np.random.randint(-100, -2), np.random.uniform(0, 1)]:
with pytest.raises(
ValueError,
match="The keyword 'max_restarts' only accepts -1, 0 or a"
" positive integer"):
@ray.remote(max_restarts=opt)
class A1:
x = 1
for opt in [np.random.randint(-100, -2), np.random.uniform(0, 1)]:
with pytest.raises(
ValueError,
match="The keyword 'max_task_retries' only accepts -1, 0 or a"
" positive integer"):
@ray.remote(max_task_retries=opt)
class A2:
x = 1
def test_many_fractional_resources(shutdown_only):
ray.init(num_cpus=2, num_gpus=2, resources={"Custom": 2})
@ray.remote
def g():
return 1
@ray.remote
def f(block, accepted_resources):
true_resources = {
resource: value[0][1]
for resource, value in ray.get_resource_ids().items()
}
if block:
ray.get(g.remote())
return ray.test_utils.dicts_equal(true_resources, accepted_resources)
# Check that the resource are assigned correctly.
result_ids = []
for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)):
resource_set = {"CPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_cpus=rand1))
resource_set = {"CPU": 1, "GPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_gpus=rand1))
resource_set = {"CPU": 1, "Custom": int(rand1 * 10000) / 10000}
result_ids.append(
f._remote([False, resource_set], resources={"Custom": rand1}))
resource_set = {
"CPU": int(rand1 * 10000) / 10000,
"GPU": int(rand2 * 10000) / 10000,
"Custom": int(rand3 * 10000) / 10000
}
result_ids.append(
f._remote(
[False, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
result_ids.append(
f._remote(
[True, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
assert all(ray.get(result_ids))
# Check that the available resources at the end are the same as the
# beginning.
stop_time = time.time() + 10
correct_available_resources = False
while time.time() < stop_time:
available_resources = ray.available_resources()
if ("CPU" in available_resources
and ray.available_resources()["CPU"] == 2.0
and "GPU" in available_resources
and ray.available_resources()["GPU"] == 2.0
and "Custom" in available_resources
and ray.available_resources()["Custom"] == 2.0):
correct_available_resources = True
break
if not correct_available_resources:
assert False, "Did not get correct available resources."
def test_background_tasks_with_max_calls(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
def g():
time.sleep(.1)
return 0
@ray.remote(max_calls=1, max_retries=0)
def f():
return [g.remote()]
nested = ray.get([f.remote() for _ in range(10)])
# Should still be able to retrieve these objects, since f's workers will
# wait for g to finish before exiting.
ray.get([x[0] for x in nested])
@ray.remote(max_calls=1, max_retries=0)
def f():
return os.getpid(), g.remote()
nested = ray.get([f.remote() for _ in range(10)])
while nested:
pid, g_id = nested.pop(0)
ray.get(g_id)
del g_id
ray.test_utils.wait_for_pid_to_exit(pid)
def test_fair_queueing(shutdown_only):
ray.init(num_cpus=1, _system_config={"fair_queueing_enabled": 1})
@ray.remote
def h():
return 0
@ray.remote
def g():
return ray.get(h.remote())
@ray.remote
def f():
return ray.get(g.remote())
# This will never finish without fair queueing of {f, g, h}:
# https://github.com/ray-project/ray/issues/3644
ready, _ = ray.wait(
[f.remote() for _ in range(1000)], timeout=60.0, num_returns=1000)
assert len(ready) == 1000, len(ready)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
object_ref = ray.put(value_before)
value_after = ray.get(object_ref)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
object_ref = ray.put(value_before)
value_after = ray.get(object_ref)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
object_ref = ray.put(value_before)
value_after = ray.get(object_ref)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
object_ref = ray.put(value_before)
value_after = ray.get(object_ref)
assert value_before == value_after
@pytest.mark.skipif(sys.platform != "linux", reason="Failing on Windows")
def test_wait_timing(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
def f():
time.sleep(1)
future = f.remote()
start = time.time()
ready, not_ready = ray.wait([future], timeout=0.2)
assert 0.2 < time.time() - start < 0.3
assert len(ready) == 0
assert len(not_ready) == 1
def test_function_descriptor():
python_descriptor = ray._raylet.PythonFunctionDescriptor(
"module_name", "function_name", "class_name", "function_hash")
python_descriptor2 = pickle.loads(pickle.dumps(python_descriptor))
assert python_descriptor == python_descriptor2
assert hash(python_descriptor) == hash(python_descriptor2)
assert python_descriptor.function_id == python_descriptor2.function_id
java_descriptor = ray._raylet.JavaFunctionDescriptor(
"class_name", "function_name", "signature")
java_descriptor2 = pickle.loads(pickle.dumps(java_descriptor))
assert java_descriptor == java_descriptor2
assert python_descriptor != java_descriptor
assert python_descriptor != object()
d = {python_descriptor: 123}
assert d.get(python_descriptor2) == 123
def test_ray_options(shutdown_only):
@ray.remote(
num_cpus=2, num_gpus=3, memory=150 * 2**20, resources={"custom1": 1})
def foo():
import time
# Sleep for a heartbeat period to ensure resources changing reported.
time.sleep(0.1)
return ray.available_resources()
ray.init(num_cpus=10, num_gpus=10, resources={"custom1": 2})
without_options = ray.get(foo.remote())
with_options = ray.get(
foo.options(
num_cpus=3,
num_gpus=4,
memory=50 * 2**20,
resources={
"custom1": 0.5
}).remote())
to_check = ["CPU", "GPU", "memory", "custom1"]
for key in to_check:
assert without_options[key] != with_options[key], key
assert without_options != with_options
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 0,
"object_store_memory": 75 * 1024 * 1024,
}],
indirect=True)
def test_fetch_local(ray_start_cluster_head):
cluster = ray_start_cluster_head
cluster.add_node(num_cpus=2, object_store_memory=75 * 1024 * 1024)
signal_actor = ray.test_utils.SignalActor.remote()
@ray.remote
def put():
ray.wait([signal_actor.wait.remote()])
return np.random.rand(5 * 1024 * 1024) # 40 MB data
local_ref = ray.put(np.random.rand(5 * 1024 * 1024))
remote_ref = put.remote()
# Data is not ready in any node
(ready_ref, remaining_ref) = ray.wait(
[remote_ref], timeout=2, fetch_local=False)
assert (0, 1) == (len(ready_ref), len(remaining_ref))
ray.wait([signal_actor.send.remote()])
# Data is ready in some node, but not local node.
(ready_ref, remaining_ref) = ray.wait([remote_ref], fetch_local=False)
assert (1, 0) == (len(ready_ref), len(remaining_ref))
(ready_ref, remaining_ref) = ray.wait(
[remote_ref], timeout=2, fetch_local=True)
assert (0, 1) == (len(ready_ref), len(remaining_ref))
del local_ref
(ready_ref, remaining_ref) = ray.wait([remote_ref], fetch_local=True)
assert (1, 0) == (len(ready_ref), len(remaining_ref))
def test_nested_functions(ray_start_shared_local_modes):
# Make sure that remote functions can use other values that are defined
# after the remote function but before the first function invocation.
@ray.remote
def f():
return g(), ray.get(h.remote())
def g():
return 1
@ray.remote
def h():
return 2
assert ray.get(f.remote()) == (1, 2)
# Test a remote function that recursively calls itself.
@ray.remote
def factorial(n):
if n == 0:
return 1
return n * ray.get(factorial.remote(n - 1))
assert ray.get(factorial.remote(0)) == 1
assert ray.get(factorial.remote(1)) == 1
assert ray.get(factorial.remote(2)) == 2
assert ray.get(factorial.remote(3)) == 6
assert ray.get(factorial.remote(4)) == 24
assert ray.get(factorial.remote(5)) == 120
# Test remote functions that recursively call each other.
@ray.remote
def factorial_even(n):
assert n % 2 == 0
if n == 0:
return 1
return n * ray.get(factorial_odd.remote(n - 1))
@ray.remote
def factorial_odd(n):
assert n % 2 == 1
return n * ray.get(factorial_even.remote(n - 1))
assert ray.get(factorial_even.remote(4)) == 24
assert ray.get(factorial_odd.remote(5)) == 120
def test_ray_recursive_objects(ray_start_shared_local_modes):
class ClassA:
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
# Serialize the recursive objects.
for obj in recursive_objects:
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(
ray_start_shared_local_modes):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo:
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_ref(
ray_start_shared_local_modes):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo:
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
ray.put(f)
def test_keyword_args(ray_start_shared_local_modes):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_args_starkwargs(ray_start_shared_local_modes):
def starkwargs(a, b, **kwargs):
return a, b, kwargs
class TestActor:
def starkwargs(self, a, b, **kwargs):
return a, b, kwargs
def test_function(fn, remote_fn):
assert fn(1, 2, x=3) == ray.get(remote_fn.remote(1, 2, x=3))
with pytest.raises(TypeError):
remote_fn.remote(3)
remote_test_function = ray.remote(test_function)
remote_starkwargs = ray.remote(starkwargs)
test_function(starkwargs, remote_starkwargs)
ray.get(remote_test_function.remote(starkwargs, remote_starkwargs))
remote_actor_class = ray.remote(TestActor)
remote_actor = remote_actor_class.remote()
actor_method = remote_actor.starkwargs
local_actor = TestActor()
local_method = local_actor.starkwargs
test_function(local_method, actor_method)
ray.get(remote_test_function.remote(local_method, actor_method))
def test_args_named_and_star(ray_start_shared_local_modes):
def hello(a, x="hello", **kwargs):
return a, x, kwargs
class TestActor:
def hello(self, a, x="hello", **kwargs):
return a, x, kwargs
def test_function(fn, remote_fn):
assert fn(1, x=2, y=3) == ray.get(remote_fn.remote(1, x=2, y=3))
assert fn(1, 2, y=3) == ray.get(remote_fn.remote(1, 2, y=3))
assert fn(1, y=3) == ray.get(remote_fn.remote(1, y=3))
assert fn(1, ) == ray.get(remote_fn.remote(1, ))
assert fn(1) == ray.get(remote_fn.remote(1))
with pytest.raises(TypeError):
remote_fn.remote(1, 2, x=3)
remote_test_function = ray.remote(test_function)
remote_hello = ray.remote(hello)
test_function(hello, remote_hello)
ray.get(remote_test_function.remote(hello, remote_hello))
remote_actor_class = ray.remote(TestActor)
remote_actor = remote_actor_class.remote()
actor_method = remote_actor.hello
local_actor = TestActor()
local_method = local_actor.hello
test_function(local_method, actor_method)
ray.get(remote_test_function.remote(local_method, actor_method))
def test_args_stars_after(ray_start_shared_local_modes):
def star_args_after(a="hello", b="heo", *args, **kwargs):
return a, b, args, kwargs
class TestActor:
def star_args_after(self, a="hello", b="heo", *args, **kwargs):
return a, b, args, kwargs
def test_function(fn, remote_fn):
assert fn("hi", "hello", 2) == ray.get(
remote_fn.remote("hi", "hello", 2))
assert fn(
"hi", "hello", 2, hi="hi") == ray.get(
remote_fn.remote("hi", "hello", 2, hi="hi"))
assert fn(hi="hi") == ray.get(remote_fn.remote(hi="hi"))
remote_test_function = ray.remote(test_function)
remote_star_args_after = ray.remote(star_args_after)
test_function(star_args_after, remote_star_args_after)
ray.get(
remote_test_function.remote(star_args_after, remote_star_args_after))
remote_actor_class = ray.remote(TestActor)
remote_actor = remote_actor_class.remote()
actor_method = remote_actor.star_args_after
local_actor = TestActor()
local_method = local_actor.star_args_after
test_function(local_method, actor_method)
ray.get(remote_test_function.remote(local_method, actor_method))
def test_object_id_backward_compatibility(ray_start_shared_local_modes):
# We've renamed Python's `ObjectID` to `ObjectRef`, and added a type
# alias for backward compatibility.
# This test is to make sure legacy code can still use `ObjectID`.
# TODO(hchen): once we completely remove Python's `ObjectID`,
# this test can be removed as well.
# Check that these 2 types are the same.
assert ray.ObjectID == ray.ObjectRef
object_ref = ray.put(1)
# Check that users can use either type in `isinstance`
assert isinstance(object_ref, ray.ObjectID)
assert isinstance(object_ref, ray.ObjectRef)
def test_nonascii_in_function_body(ray_start_shared_local_modes):
@ray.remote
def return_a_greek_char():
return "φ"
assert ray.get(return_a_greek_char.remote()) == "φ"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| 29.952575 | 78 | 0.614748 |
035f2202dc0c7018d0c5b864725419101e6e2e2a | 739 | py | Python | bradocs4py/validadoresie/validadorrs.py | namio/BRADocs4Py | 3d0343abd367e0e5ce2222740c9953d482daa6ac | [
"MIT"
] | 11 | 2019-06-19T15:08:09.000Z | 2021-11-12T17:45:49.000Z | bradocs4py/validadoresie/validadorrs.py | namio/BRADocs4Py | 3d0343abd367e0e5ce2222740c9953d482daa6ac | [
"MIT"
] | 1 | 2020-10-04T17:50:08.000Z | 2020-10-04T17:50:08.000Z | bradocs4py/validadoresie/validadorrs.py | namio/BRADocs4Py | 3d0343abd367e0e5ce2222740c9953d482daa6ac | [
"MIT"
] | 3 | 2021-07-24T01:55:56.000Z | 2021-12-26T22:51:15.000Z | # -*- coding: utf-8 -*-
import re
from itertools import chain
class ValidadorRS(object):
"""
Realiza a validação de cadeias de caracteres (strings) que representam
o número da Inscrição Estadual do Estado do Rio Grande do Sul, conforme regras
encontradas em http://www.sintegra.gov.br/Cad_Estados/cad_RS.html.
"""
def __hashDigit(self, insc):
pesos = chain([2], range(9,1,-1))
val = sum(int(digito) * peso for digito, peso in zip(insc, pesos)) % 11
return 0 if val < 2 else 11 - val
@staticmethod
def validar(numero):
if numero == None: return False
p = re.compile('[^0-9]')
x = p.sub('', numero)
if len(x) != 10 or len(set(x)) == 1: return False
v = ValidadorRS()
return v.__hashDigit(x) == int(x[9])
| 22.393939 | 79 | 0.664411 |
27b10b7078944f7ea336ecc13f4be890bd51cb46 | 4,074 | py | Python | Implementation/reject_option.py | andreArtelt/explaining_lvq_reject | 9c59ad1cba1f9e763d5d3f67c6037d38fcdcff5a | [
"MIT"
] | null | null | null | Implementation/reject_option.py | andreArtelt/explaining_lvq_reject | 9c59ad1cba1f9e763d5d3f67c6037d38fcdcff5a | [
"MIT"
] | null | null | null | Implementation/reject_option.py | andreArtelt/explaining_lvq_reject | 9c59ad1cba1f9e763d5d3f67c6037d38fcdcff5a | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
import numpy as np
from sklearn_lvq import GmlvqModel
from lvq import build_pairwise_lvq_classifiers
class RejectOption(ABC):
def __init__(self, threshold, **kwds):
self.threshold = threshold
super().__init__(**kwds)
@abstractmethod
def criterion(self, x):
raise NotImplementedError()
def __call__(self, x):
return self.reject(x)
def reject(self, x):
return self.criterion(x) < self.threshold
class LvqRejectOption(RejectOption):
def __init__(self, lvq_wrapped_model, **kwds):
self.lvq_model = lvq_wrapped_model
super().__init__(**kwds)
@abstractmethod
def fit(self, X, y):
raise NotImplementedError()
class LvqRejectRelSim(LvqRejectOption):
def __init__(self, **kwds):
super().__init__(**kwds)
def fit(self, X, y):
pass
def criterion(self, x):
distances_to_prototypes = [self.lvq_model.dist(self.lvq_model.prototypes[i], x) for i in range(len(self.lvq_model.prototypes))]
pi_idx = np.argmin(distances_to_prototypes)
dp = distances_to_prototypes[pi_idx]
pi_label = self.lvq_model.prototypes_labels[pi_idx]
other_prototypes_idx = np.where(self.lvq_model.prototypes_labels != pi_label)[0]
dm = np.min([distances_to_prototypes[idx] for idx in other_prototypes_idx])
return (dm - dp) / (dm + dp)
class LvqRejectDistDecisionBoundary(LvqRejectOption):
def __init__(self, **kwds):
super().__init__(**kwds)
def fit(self, X, y):
pass
def criterion(self, x):
distances_to_prototypes = [self.lvq_model.dist(self.lvq_model.prototypes[i], x) for i in range(len(self.lvq_model.prototypes))]
pi_idx = np.argmin(distances_to_prototypes)
p_i = self.lvq_model.prototypes[pi_idx]
dp = distances_to_prototypes[pi_idx]
pi_label = self.lvq_model.prototypes_labels[pi_idx]
other_prototypes_idx = np.where(self.lvq_model.prototypes_labels != pi_label)[0]
pj_idx = np.argmin([distances_to_prototypes[idx] for idx in other_prototypes_idx])
p_j = self.lvq_model.prototypes[other_prototypes_idx[pj_idx]]
dm = distances_to_prototypes[other_prototypes_idx[pj_idx]]
return np.abs(dp - dm) / (2. * np.linalg.norm(p_i - p_j)**2)
class LvqRejectProbabilistic(LvqRejectOption):
def __init__(self, pairwise_lvq_classifier_class=GmlvqModel, **kwds):
self.pairwise_lvq_classifier_class = pairwise_lvq_classifier_class
self.pairwise_wrapped_lvq_models = None
self.num_classes = None
super().__init__(**kwds)
def fit(self, X_train, y_train):
self.num_classes = len(np.unique(y_train))
self.pairwise_wrapped_lvq_models = build_pairwise_lvq_classifiers(self.pairwise_lvq_classifier_class, X_train, y_train)
def __compute_prob_ij(self, x, i, j):
lvq_model_ij_data = self.pairwise_wrapped_lvq_models[i][j]
lvq_model_ij, alpha, beta = lvq_model_ij_data["model"], lvq_model_ij_data["alpha"], lvq_model_ij_data["beta"]
distances_to_prototypes = [lvq_model_ij.dist(lvq_model_ij.prototypes[i], x) for i in range(len(lvq_model_ij.prototypes))]
pi_idx = np.argmin(distances_to_prototypes)
dp = distances_to_prototypes[pi_idx]
pi_label = lvq_model_ij.prototypes_labels[pi_idx]
other_prototypes_idx = np.where(lvq_model_ij.prototypes_labels != pi_label)[0]
pj_idx = np.argmin([distances_to_prototypes[idx] for idx in other_prototypes_idx])
dm = distances_to_prototypes[other_prototypes_idx[pj_idx]]
r_ij = (dm - dp) / (dm + dp)
return 1. / (1. + np.exp(alpha * r_ij + beta))
def __compute_prob_i(self, x, i):
other_labels = list(range(self.num_classes));other_labels.remove(i)
return 1. / (np.sum([1. / self.__compute_prob_ij(x, i, j) for j in other_labels]) - self.num_classes + 2)
def criterion(self, x):
return np.max([self.__compute_prob_i(x, i) for i in range(self.num_classes)])
| 35.12069 | 135 | 0.69244 |
94d5c030497d6e1250fa6c88619ead8d44e550c9 | 2,267 | py | Python | docs/conf.py | korotkin/car_store_test | ff0e04116d6d3735e49252d499d766ac69131b24 | [
"MIT"
] | null | null | null | docs/conf.py | korotkin/car_store_test | ff0e04116d6d3735e49252d499d766ac69131b24 | [
"MIT"
] | null | null | null | docs/conf.py | korotkin/car_store_test | ff0e04116d6d3735e49252d499d766ac69131b24 | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import django
if os.getenv("READTHEDOCS", default=False) == "True":
sys.path.insert(0, os.path.abspath(".."))
os.environ["DJANGO_READ_DOT_ENV_FILE"] = "True"
os.environ["USE_DOCKER"] = "no"
else:
sys.path.insert(0, os.path.abspath("/app"))
os.environ["DATABASE_URL"] = "sqlite:///readthedocs.db"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
# -- Project information -----------------------------------------------------
project = "car_store"
copyright = """2021, S.Korotkin"""
author = "S.Korotkin"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
| 35.984127 | 79 | 0.666079 |
952888e45d5a20d8c1a52939e67a328697a644a0 | 1,552 | py | Python | aries_cloudagent/protocols/credentials/handlers/credential_request_handler.py | euroledger/aries-cloudagent-python | caf457276b19df374c16c2890e1c7e4914f46254 | [
"Apache-2.0"
] | 2 | 2020-02-26T14:22:44.000Z | 2021-05-06T20:13:36.000Z | aries_cloudagent/protocols/credentials/handlers/credential_request_handler.py | euroledger/aries-cloudagent-python | caf457276b19df374c16c2890e1c7e4914f46254 | [
"Apache-2.0"
] | 6 | 2021-03-10T20:05:19.000Z | 2022-02-27T05:41:09.000Z | aries_cloudagent/protocols/credentials/handlers/credential_request_handler.py | euroledger/aries-cloudagent-python | caf457276b19df374c16c2890e1c7e4914f46254 | [
"Apache-2.0"
] | 4 | 2020-02-19T23:02:11.000Z | 2021-11-18T11:33:43.000Z | """Credential request handler."""
from ....messaging.base_handler import (
BaseHandler,
BaseResponder,
HandlerException,
RequestContext,
)
from ..manager import CredentialManager
from ..messages.credential_request import CredentialRequest
class CredentialRequestHandler(BaseHandler):
"""Message handler class for credential requests."""
async def handle(self, context: RequestContext, responder: BaseResponder):
"""
Message handler logic for credential requests.
Args:
context: request context
responder: responder callback
"""
self._logger.debug(f"CredentialRequestHandler called with context {context}")
assert isinstance(context.message, CredentialRequest)
self._logger.info(
"Received credential request: %s", context.message.serialize(as_string=True)
)
if not context.connection_ready:
raise HandlerException("No connection established for credential request")
credential_manager = CredentialManager(context)
credential_exchange_record = await credential_manager.receive_request(
context.message
)
# If auto_issue is enabled, respond immediately
if credential_exchange_record.auto_issue:
(
credential_exchange_record,
credential_issue_message,
) = await credential_manager.issue_credential(credential_exchange_record)
await responder.send_reply(credential_issue_message)
| 31.673469 | 88 | 0.688789 |
010627b14e54722c366744b7db04a2d5ae35c75f | 2,535 | py | Python | test/test_rereference.py | haydard/wyrm | ff3f675ea71a45f1dd91ecbc5944229ebb3342ec | [
"MIT"
] | null | null | null | test/test_rereference.py | haydard/wyrm | ff3f675ea71a45f1dd91ecbc5944229ebb3342ec | [
"MIT"
] | null | null | null | test/test_rereference.py | haydard/wyrm | ff3f675ea71a45f1dd91ecbc5944229ebb3342ec | [
"MIT"
] | null | null | null | from __future__ import division
import unittest
import numpy as np
from wyrm.types import Data
from wyrm.processing import rereference
from wyrm.processing import swapaxes
CHANS = 5
SAMPLES = 20
EPOS = 3
class TestRereference(unittest.TestCase):
def setUp(self):
dat = np.zeros((SAMPLES, CHANS))
# [-10, -9, ... 20)
dat[:, 0] = np.arange(SAMPLES) - SAMPLES/2
channels = ['chan{i}'.format(i=i) for i in range(CHANS)]
time = np.arange(SAMPLES)
self.cnt = Data(dat, [time, channels], ['time', 'channels'], ['ms', '#'])
# construct epo
epo_dat = np.array([dat + i for i in range(EPOS)])
classes = ['class{i}'.format(i=i) for i in range(EPOS)]
self.epo = Data(epo_dat, [classes, time, channels], ['class', 'time', 'channels'], ['#', 'ms', '#'])
def test_rereference_cnt(self):
"""Rereference channels (cnt)."""
cnt_r = rereference(self.cnt, 'chan0')
dat_r = np.linspace(SAMPLES/2, -SAMPLES/2, SAMPLES, endpoint=False)
dat_r = [dat_r for i in range(CHANS)]
dat_r = np.array(dat_r).T
dat_r[:, 0] = 0
np.testing.assert_array_equal(cnt_r.data, dat_r)
def test_rereference_epo(self):
"""Rereference channels (epo)."""
epo_r = rereference(self.epo, 'chan0')
dat_r = np.linspace(SAMPLES/2, -SAMPLES/2, SAMPLES, endpoint=False)
dat_r = [dat_r for i in range(CHANS)]
dat_r = np.array(dat_r).T
dat_r[:, 0] = 0
dat_r = np.array([dat_r for i in range(EPOS)])
np.testing.assert_array_equal(epo_r.data, dat_r)
def test_raise_value_error(self):
"""Raise ValueError if channel not found."""
with self.assertRaises(ValueError):
rereference(self.cnt, 'foo')
def test_case_insensitivity(self):
"""rereference should not care about case."""
try:
rereference(self.cnt, 'ChAN0')
except ValueError:
self.fail()
def test_rereference_copy(self):
"""rereference must not modify arguments."""
cpy = self.cnt.copy()
rereference(self.cnt, 'chan0')
self.assertEqual(self.cnt, cpy)
def test_rereference_swapaxes(self):
"""rereference must work with nonstandard chanaxis."""
dat = rereference(swapaxes(self.epo, 1, 2), 'chan0', chanaxis=1)
dat = swapaxes(dat, 1, 2)
dat2 = rereference(self.epo, 'chan0')
self.assertEqual(dat, dat2)
if __name__ == '__main__':
unittest.main()
| 32.088608 | 108 | 0.605917 |
127d047d609c19b2daa5f730f26916181bf2e7e6 | 7,856 | py | Python | Bio/Restriction/__init__.py | bioinf-mcb/biopython | 1a1f4a7ee4e0efba517d3d607c56c27e72e399cc | [
"BSD-3-Clause"
] | 5 | 2015-06-14T17:01:57.000Z | 2020-10-05T03:27:56.000Z | Bio/Restriction/__init__.py | uci-ics-32/biopython | ff7d3703d442192a1f6d84c52e028d566d44ff1c | [
"BSD-3-Clause"
] | 14 | 2021-03-26T20:54:22.000Z | 2021-04-06T17:18:53.000Z | Bio/Restriction/__init__.py | uci-ics-32/biopython | ff7d3703d442192a1f6d84c52e028d566d44ff1c | [
"BSD-3-Clause"
] | 8 | 2016-02-20T22:53:21.000Z | 2022-02-04T06:10:23.000Z | #!/usr/bin/env python
#
# Restriction Analysis Libraries.
# Copyright (C) 2004. Frederic Sohm.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Restriction Digest Enzymes.
Examples
--------
>>> from Bio.Seq import Seq
>>> from Bio.Restriction import *
>>> pBs_mcs = 'GGTACCGGGCCCCCCCTCGAGGTCGACGGTATCGATAAGCTTGATATCGAATTCCTG'
>>> pBs_mcs += 'CAGCCCGGGGGATCCACTAGTTCTAGAGCGGCCGCCACCGCGGTGGAGCTC'
>>> seq = Seq(pBs_mcs) # Multiple-cloning site of pBluescript SK(-)
>>> a = Analysis(AllEnzymes, seq)
>>> a.print_that() # no argument -> print all the results
AbaSI : 10, 12, 13, 16, 17, 18, 19, 20, 22, 23, 24, 25, 25, 26, 27...
BmeDI : 2, 7, 8, 8, 9, 9, 13, 14, 15, 16, 17, 18, 19, 19, 21, 21...
YkrI : 10, 12, 13, 16, 16, 17, 19, 20, 21, 22, 23, 24, 25, 25, 26...
BmeDI : 1, 2, 7, 8, 8, 9, 9, 13, 14, 15, 16, 17, 18, 19...
AccII : 98.
AciI : 86, 90, 96, 98...
Enzymes which do not cut the sequence.
AspLEI BstHHI CfoI CviAII FaeI FaiI FatI GlaI
HhaI Hin1II Hin6I HinP1I HpyCH4IV HpySE526I Hsp92II HspAI
MaeII MseI NlaIII SaqAI TaiI Tru1I Tru9I...
<BLANKLINE>
>>> b = a.blunt() # Analysis with blunt enzmyes
>>> a.print_that(b) # Print results for blunt cutters
AccII : 98.
AfaI : 4.
AluBI : 40, 106.
AluI : 40, 106.
Bsh1236I : 98.
BshFI : 10, 89.
BsnI : 10, 89.
BspANI : 10, 89...
Enzymes which do not cut the sequence.
FaiI GlaI CdiI MlyI SchI SspD5I AanI...
<BLANKLINE>
""" # noqa: W291, W293
from Bio.Restriction.Restriction import * # noqa (legacy module arrangement)
#
# OK can't put the following code in Bio.Restriction.__init__ unless
# I put everything from Restriction in here.
# or at least the RestrictionBatch class.
#
# The reason for that is if I do that, I break the __contains__ method of
# the RestrictionBatch in Restriction, which expect to find the name of
# the enzymes in the locals() dictionary when evaluating string to see if
# it is an enzyme.
#
# This calls for some explanations I guess:
# When testing for the presence of a Restriction enzyme in a
# RestrictionBatch, the user can use:
#
# 1) a class of type 'RestrictionType'
# 2) a string of the name of the enzyme (its repr)
# i.e:
# >>> from Bio.Restriction import RestrictionBatch, EcoRI
# >>> MyBatch = RestrictionBatch(EcoRI)
# >>> EcoRI in MyBatch # the class EcoRI.
# True
# >>> 'EcoRI' in MyBatch # a string representation
# True
#
# OK, that's how it is supposed to work. And I find it quite useful.
#
# Now if I leave the code here I got:
# >>> from Bio.Restriction import RestrictionBatch, EcoRI
# >>> MyBatch = RestrictionBatch(EcoRI)
# >>> EcoRI in MyBatch # the class EcoRI.
# True
# >>> 'EcoRI' in MyBatch # a string.
# False
# There is 5 ways to change that:
# 1) abandon the evaluation of string representation.
# 2) leave the code like that and hack something in RestrictionBatch.
# 3) Move back the code in Bio.Restriction.Restriction
# 4) Move RestrictionBatch here.
# 5) Remove Restriction.Restriction and move all the code in here
#
# 1) no fun in that.
# 2) there is a simpler way to do it.
# 3) I prefer to keep all the code together.
# 4) and 5) both are OK. Only a matter of preference.
#
# So the following code has been moved back to Bio.Restriction.Restriction
# For the user the results is transparent:
# from Bio.Restriction import * works as before.
#
# ##
# ## The restriction enzyme classes are created dynamically when the module
# ## is imported. Here is the magic which allow the creation of the
# ## restriction-enzyme classes.
# ##
# ## The reason for the two dictionaries in Restriction_Dictionary
# ## one for the types (which will be called pseudo-type as they really
# ## correspond to the values that instances of RestrictionType can take)
# ## and one for the enzymes is efficiency as the bases are evaluated
# ## once per pseudo-type.
# ##
# ## However Restriction is still a very inefficient module at import. But
# ## remember that around 660 classes (which is more or less the size of
# ## Rebase) have to be created dynamically. However, this processing take
# ## place only once.
# ## This inefficiency is however largely compensated by the use of metaclass
# ## which provide a very efficient layout for the class themselves mostly
# ## alleviating the need of if/else loops in the class methods.
# ##
# ## It is essential to run Restriction with doc string optimisation (-OO
# ## switch) as the doc string of 660 classes take a lot of processing.
# ##
# # CommOnly = RestrictionBatch() # commercial enzymes
# # NonComm = RestrictionBatch() # not available commercially
# # for TYPE, (bases, enzymes) in typedict.items():
# # #
# # # The keys are the pseudo-types TYPE (stored as type1, type2...)
# # # The names are not important and are only present to differentiate
# # # the keys in the dict. All the pseudo-types are in fact
# # # RestrictionType. These names will not be used after and the pseudo-
# # # types are not kept in the locals() dictionary. It is therefore
# # # impossible to import them.
# # # Now, if you have look at the dictionary, you will see that not all
# # # thet ypes are present as those without corresponding enzymes have
# # # been removed by Dictionary_Builder().
# # #
# # # The values are tuples which contain
# # # as first element a tuple of bases (as string) and
# # # as second element the names of the enzymes.
# # #
# # # First eval the bases.
# # #
# # bases = tuple(eval(x) for x in bases)
# # #
# # # now create the particular value of RestrictionType for the classes
# # # in enzymes.
# # #
# # T = type.__new__(RestrictionType, 'RestrictionType', bases, {})
# # for k in enzymes:
# # #
# # # Now, we go through all the enzymes and assign them their type.
# # # enzymedict[k] contains the values of the attributes for this
# # # particular class (self.site, self.ovhg,....).
# # #
# # newenz = T(k, bases, enzymedict[k])
# # #
# # # we add the enzymes to the corresponding batch.
# # #
# # # No need to verify the enzyme is a RestrictionType -> add_nocheck
# # #
# # if newenz.is_comm() : CommOnly.add_nocheck(newenz)
# # else : NonComm.add_nocheck(newenz)
# ##
# ## AllEnzymes is a RestrictionBatch with all the enzymes from Rebase.
# ##
# # AllEnzymes = CommOnly | NonComm
# ##
# ## Now, place the enzymes in locals so they can be imported.
# ##
# # names = [str(x) for x in AllEnzymes]
# # locals().update(dict(map(None, names, AllEnzymes)))
# ##
# ## Limit what can be imported by from Restriction import *
# ## Most of the classes here will never be used outside this module
# ## (Defined,Palindromic...). It is still possible to request them
# ## specifically
# ##
# ## also delete the variable that are no longer needed.
# ##
# ##
# # __all__= ['Analysis', 'RestrictionBatch','AllEnzymes','CommOnly',
# # 'NonComm']+names
# # del k, x, enzymes, TYPE, bases, names
| 41.347368 | 84 | 0.613671 |
7a33560ef9fa27e8f2abf779796626a1590e3860 | 5,422 | py | Python | core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py | Mincom/epiphany | f0215e1272b014f093ca6be6c5eb5ba6c73d2c79 | [
"Apache-2.0"
] | 1 | 2021-02-04T07:40:01.000Z | 2021-02-04T07:40:01.000Z | core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py | toszo/epiphany | db9e4777bf4c013e04a685cf72f62cabb484e363 | [
"Apache-2.0"
] | 1 | 2020-06-22T17:32:44.000Z | 2020-06-22T17:32:44.000Z | core/src/epicli/cli/engine/ansible/AnsibleInventoryUpgrade.py | toszo/epiphany | db9e4777bf4c013e04a685cf72f62cabb484e363 | [
"Apache-2.0"
] | null | null | null | import os
from ansible.parsing.dataloader import DataLoader
from ansible.inventory.manager import InventoryManager
from cli.helpers.Step import Step
from cli.helpers.build_saver import get_inventory_path_for_build, check_build_output_version, BUILD_LEGACY
from cli.models.AnsibleHostModel import AnsibleHostModel
from cli.models.AnsibleInventoryItem import AnsibleInventoryItem
from cli.helpers.build_saver import save_inventory
from cli.helpers.objdict_helpers import dict_to_objdict
from cli.helpers.data_loader import load_yamls_file, load_yaml_obj, types as data_types
from cli.helpers.doc_list_helpers import select_single
from cli.helpers.objdict_helpers import merge_objdict
from cli.helpers.data_loader import load_manifest_docs
class AnsibleInventoryUpgrade(Step):
def __init__(self, build_dir, backup_build_dir):
super().__init__(__name__)
self.build_dir = build_dir
self.backup_build_dir = backup_build_dir
self.cluster_model = None
self.manifest_docs = []
def __enter__(self):
super().__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
def get_role(self, inventory, role_name):
for role in inventory:
if role.role == role_name:
return role
return None
def delete_role(self, inventory, role_name):
for i in range(len(inventory)):
if inventory[i].role == role_name:
del inventory[i]
return
def rename_role(self, inventory, role_name, new_role_name):
role = self.get_role(inventory, role_name)
if role != None:
role.role = new_role_name
def upgrade(self):
inventory_path = get_inventory_path_for_build(self.backup_build_dir)
build_version = check_build_output_version(self.backup_build_dir)
self.logger.info(f'Loading backup Ansible inventory: {inventory_path}')
loaded_inventory = InventoryManager(loader = DataLoader(), sources=inventory_path)
# move loaded inventory to templating structure
new_inventory = []
for key in loaded_inventory.groups:
if key != 'all' and key != 'ungrouped':
group_hosts = loaded_inventory.groups[key].hosts
new_hosts = []
for host in group_hosts:
new_hosts.append(AnsibleHostModel(host.address, host.vars['ansible_host']))
new_inventory.append(AnsibleInventoryItem(key, new_hosts))
if build_version == BUILD_LEGACY:
self.logger.info(f'Upgrading Ansible inventory Epiphany < 0.3.0')
# Epiphany < 0.3.0 did not have manifest file in build folder so lets create bare minimum cluster model from inventory
self.cluster_model = dict_to_objdict({
'provider': 'any',
'specification': {
'admin_user': {
'name': loaded_inventory.groups['all'].vars['ansible_user'],
'key_path': loaded_inventory.groups['all'].vars['ansible_ssh_private_key_file']
}
}
})
# Remap roles
self.rename_role(new_inventory, 'master', 'kubernetes_master')
self.rename_role(new_inventory, 'worker', 'kubernetes_node')
self.rename_role(new_inventory, 'deployments', 'applications')
self.rename_role(new_inventory, 'elasticsearch-curator', 'elasticsearch_curator')
self.rename_role(new_inventory, 'jmx-exporter', 'jmx_exporter')
self.rename_role(new_inventory, 'kafka-exporter', 'kafka_exporter')
self.rename_role(new_inventory, 'haproxy_tls_termination', 'haproxy')
# remove linux and reboot roles if present
self.delete_role(new_inventory, 'linux')
self.delete_role(new_inventory, 'reboot')
else:
self.logger.info(f'Upgrading Ansible inventory Epiphany => 0.3.0')
# load cluster model from manifest
self.manifest_docs = load_manifest_docs(self.backup_build_dir)
self.cluster_model = select_single(self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')
# Merge manifest cluster config with newer defaults
default_cluster_model = load_yaml_obj(data_types.DEFAULT, 'common', 'epiphany-cluster')
merge_objdict(default_cluster_model, self.cluster_model)
self.cluster_model = default_cluster_model
# repository & image_registry roles added in v0.4.0
repository = self.get_role(new_inventory, 'repository')
if repository is None:
raise Exception('repository group not found in inventory. '
'Your deployment may not be supported by this version of Epiphany. '
'You may try to use older version first.')
# add image_registry if not present
image_registry = self.get_role(new_inventory, 'image_registry')
if image_registry is None:
hosts = [AnsibleHostModel(repository.hosts[0].name, repository.hosts[0].ip)]
new_inventory.append(AnsibleInventoryItem('image_registry', hosts))
# save new inventory
save_inventory(new_inventory, self.cluster_model, self.build_dir)
return 0
| 44.809917 | 130 | 0.666913 |
2a9622fa29511bb4a30584768e70bd53064321f1 | 13,192 | py | Python | tests/conftest.py | yashbhutwala/kopf | 4ad77dae699d8516ee7c189b11c6cedbe9224975 | [
"MIT"
] | null | null | null | tests/conftest.py | yashbhutwala/kopf | 4ad77dae699d8516ee7c189b11c6cedbe9224975 | [
"MIT"
] | null | null | null | tests/conftest.py | yashbhutwala/kopf | 4ad77dae699d8516ee7c189b11c6cedbe9224975 | [
"MIT"
] | null | null | null | import asyncio
import dataclasses
import io
import json
import logging
import os
import re
import sys
import time
from unittest.mock import Mock
import asynctest
import pykube
import pytest
import pytest_mock
from kopf.config import configure
from kopf.engines.logging import ObjectPrefixingFormatter
from kopf.reactor.registries import Resource
def pytest_configure(config):
config.addinivalue_line('markers', "e2e: end-to-end tests with real operators.")
config.addinivalue_line('markers', "resource_clustered: (internal parameterizatiom mark).")
def pytest_addoption(parser):
parser.addoption("--only-e2e", action="store_true", help="Execute end-to-end tests only.")
parser.addoption("--with-e2e", action="store_true", help="Include end-to-end tests.")
# This logic is not applied if pytest is started explicitly on ./examples/.
# In that case, regular pytest behaviour applies -- this is intended.
def pytest_collection_modifyitems(config, items):
# Make all tests in this directory and below asyncio-compatible by default.
for item in items:
if asyncio.iscoroutinefunction(item.function):
item.add_marker('asyncio')
# Put all e2e tests to the end, as they are assumed to be slow.
def _is_e2e(item):
path = item.location[0]
return path.startswith('tests/e2e/') or path.startswith('examples/')
etc = [item for item in items if not _is_e2e(item)]
e2e = [item for item in items if _is_e2e(item)]
# Mark all e2e tests, no matter how they were detected. Just for filtering.
mark_e2e = pytest.mark.e2e
for item in e2e:
item.add_marker(mark_e2e)
# Minikube tests are heavy and require a cluster. Skip them by default,
# so that the contributors can run pytest without initial tweaks.
mark_skip = pytest.mark.skip(reason="E2E tests are not enabled. "
"Use --with-e2e/--only-e2e to enable.")
if not config.getoption('--with-e2e') and not config.getoption('--only-e2e'):
for item in e2e:
item.add_marker(mark_skip)
# Minify the test-plan if only e2e are requested (all other should be skipped).
if config.getoption('--only-e2e'):
items[:] = e2e
else:
items[:] = etc + e2e
# Substitute the regular mock with the async-aware mock in the `mocker` fixture.
@pytest.fixture(scope='session', autouse=True)
def enforce_asyncio_mocker():
pytest_mock._get_mock_module = lambda config: asynctest
@pytest.fixture()
def resource():
""" The resource used in the tests. Usually mocked, so it does not matter. """
return Resource('zalando.org', 'v1', 'kopfexamples')
#
# Mocks for Kubernetes API clients (any of them). Reasons:
# 1. We do not test the clients, we test the layers on top of them,
# so everything low-level should be mocked and assumed to be functional.
# 2. No external calls must be made under any circumstances.
# The unit-tests must be fully isolated from the environment.
#
@pytest.fixture()
def req_mock(mocker, resource, request):
# Pykube config is needed to create a pykube's API instance.
# But we do not want and do not need to actually authenticate, so we mock.
# Some fields are used by pykube's objects: we have to know them ("leaky abstractions").
cfg_mock = mocker.patch('kopf.clients.auth.get_pykube_cfg').return_value
cfg_mock.cluster = {'server': 'localhost'}
cfg_mock.namespace = 'default'
# Simulated list of cluster-defined CRDs: all of them at once. See: `resource` fixture(s).
# Simulate the resource as cluster-scoped is there is a marker on the test.
namespaced = not any(marker.name == 'resource_clustered' for marker in request.node.own_markers)
res_mock = mocker.patch('pykube.http.HTTPClient.resource_list')
res_mock.return_value = {'resources': [
{'name': 'kopfexamples', 'kind': 'KopfExample', 'namespaced': namespaced},
]}
# Prevent ANY outer requests, no matter what. These ones are usually asserted.
req_mock = mocker.patch('requests.Session').return_value
return req_mock
@pytest.fixture()
def stream(req_mock):
""" A mock for the stream of events as if returned by K8s client. """
def feed(*args):
side_effect = []
for arg in args:
if isinstance(arg, (list, tuple)):
arg = iter(json.dumps(event).encode('utf-8') for event in arg)
side_effect.append(arg)
req_mock.get.return_value.iter_lines.side_effect = side_effect
return Mock(spec_set=['feed'], feed=feed)
#
# Mocks for login & checks. Used in specifialised login tests,
# and in all CLI tests (since login is implicit with CLI commands).
#
@dataclasses.dataclass(frozen=True, eq=False, order=False)
class LoginMocks:
pykube_in_cluster: Mock = None
pykube_from_file: Mock = None
pykube_checker: Mock = None
client_in_cluster: Mock = None
client_from_file: Mock = None
client_checker: Mock = None
@pytest.fixture()
def login_mocks(mocker):
# Pykube config is needed to create a pykube's API instance.
# But we do not want and do not need to actually authenticate, so we mock.
# Some fields are used by pykube's objects: we have to know them ("leaky abstractions").
cfg_mock = mocker.patch('kopf.clients.auth.get_pykube_cfg').return_value
cfg_mock.cluster = {'server': 'localhost'}
cfg_mock.namespace = 'default'
# Make all client libraries potentially optional, but do not skip the tests:
# skipping the tests is the tests' decision, not this mocking fixture's one.
kwargs = {}
try:
import pykube
except ImportError:
pass
else:
kwargs.update(
pykube_in_cluster=mocker.patch.object(pykube.KubeConfig, 'from_service_account'),
pykube_from_file=mocker.patch.object(pykube.KubeConfig, 'from_file'),
pykube_checker=mocker.patch.object(pykube.http.HTTPClient, 'get'),
)
try:
import kubernetes
except ImportError:
pass
else:
kwargs.update(
client_in_cluster=mocker.patch.object(kubernetes.config, 'load_incluster_config'),
client_from_file=mocker.patch.object(kubernetes.config, 'load_kube_config'),
client_checker=mocker.patch.object(kubernetes.client, 'CoreApi'),
)
return LoginMocks(**kwargs)
#
# Simulating that Kubernetes client library is not installed.
#
class ProhibitedImportFinder:
def find_spec(self, fullname, path, target=None):
if fullname == 'kubernetes' or fullname.startswith('kubernetes.'):
raise ImportError("Import is prohibited for tests.")
@pytest.fixture()
def _kubernetes():
# If kubernetes client is required, it should either be installed,
# or skip the test: we cannot simulate its presence (unlike its absence).
return pytest.importorskip('kubernetes')
@pytest.fixture()
def _no_kubernetes():
try:
import kubernetes as kubernetes_before
except ImportError:
yield
return # nothing to patch & restore.
# Remove any cached modules.
preserved = {}
for name, mod in list(sys.modules.items()):
if name == 'kubernetes' or name.startswith('kubernetes.'):
preserved[name] = mod
del sys.modules[name]
# Inject the prohibition for loading this module. And restore when done.
finder = ProhibitedImportFinder()
sys.meta_path.insert(0, finder)
try:
yield
finally:
sys.meta_path.remove(finder)
sys.modules.update(preserved)
# Verify if it works and that we didn't break the importing machinery.
import kubernetes as kubernetes_after
assert kubernetes_after is kubernetes_before
@pytest.fixture(params=[True], ids=['with-client']) # for hinting suffixes
def kubernetes(request):
return request.getfixturevalue('_kubernetes')
@pytest.fixture(params=[False], ids=['no-client']) # for hinting suffixes
def no_kubernetes(request):
return request.getfixturevalue('_no_kubernetes')
@pytest.fixture(params=[False, True], ids=['no-client', 'with-client'])
def any_kubernetes(request):
if request.param:
return request.getfixturevalue('_kubernetes')
else:
return request.getfixturevalue('_no_kubernetes')
#
# Helpers for the timing checks.
#
@pytest.fixture()
def timer():
return Timer()
class Timer(object):
"""
A helper context manager to measure the time of the code-blocks.
Also, supports direct comparison with time-deltas and the numbers of seconds.
Usage:
with Timer() as timer:
do_something()
print(f"Executing for {timer.seconds}s already.")
do_something_else()
print(f"Executed in {timer.seconds}s.")
assert timer < 5.0
"""
def __init__(self):
super().__init__()
self._ts = None
self._te = None
@property
def seconds(self):
if self._ts is None:
return None
elif self._te is None:
return time.perf_counter() - self._ts
else:
return self._te - self._ts
def __repr__(self):
status = 'new' if self._ts is None else 'running' if self._te is None else 'finished'
return f'<Timer: {self.seconds}s ({status})>'
def __enter__(self):
self._ts = time.perf_counter()
self._te = None
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._te = time.perf_counter()
def __int__(self):
return int(self.seconds)
def __float__(self):
return float(self.seconds)
#
# Helpers for the logging checks.
#
@pytest.fixture()
def logstream(caplog):
""" Prefixing is done at the final output. We have to intercept it. """
logger = logging.getLogger()
handlers = list(logger.handlers)
configure(verbose=True)
stream = io.StringIO()
handler = logging.StreamHandler(stream)
formatter = ObjectPrefixingFormatter('prefix %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
try:
with caplog.at_level(logging.DEBUG):
yield stream
finally:
logger.removeHandler(handler)
logger.handlers[:] = handlers # undo `configure()`
@pytest.fixture()
def assert_logs(caplog):
"""
A function to assert the logs are present (by pattern).
The listed message patterns MUST be present, in the order specified.
Some other log messages can also be present, but they are ignored.
"""
def assert_logs_fn(patterns, prohibited=[], strict=False):
__traceback_hide__ = True
remaining_patterns = list(patterns)
for message in caplog.messages:
# The expected pattern is at position 0.
# Looking-ahead: if one of the following patterns matches, while the
# 0th does not, then the log message is missing, and we fail the test.
for idx, pattern in enumerate(remaining_patterns):
m = re.search(pattern, message)
if m:
if idx == 0:
remaining_patterns[:1] = []
break # out of `remaining_patterns` cycle
else:
skipped_patterns = remaining_patterns[:idx]
raise AssertionError(f"Few patterns were skipped: {skipped_patterns!r}")
elif strict:
raise AssertionError(f"Unexpected log message: {message!r}")
# Check that the prohibited patterns do not appear in any message.
for pattern in prohibited:
m = re.search(pattern, message)
if m:
raise AssertionError(f"Prohibited log pattern found: {message!r} ~ {pattern!r}")
# If all patterns have been matched in order, we are done.
# if some are left, but the messages are over, then we fail.
if remaining_patterns:
raise AssertionError(f"Few patterns were missed: {remaining_patterns!r}")
return assert_logs_fn
#
# Helpers for asyncio checks.
#
@pytest.fixture(autouse=True)
def _no_asyncio_pending_tasks():
"""
Ensure there are no unattended asyncio tasks after the test.
It looks both in the test's main event-loop, and in all other event-loops,
such as the background thread of `KopfRunner` (used in e2e tests).
Current solution uses some internals of asyncio, since there is no public
interface for that. The warnings are printed only at the end of pytest.
An alternative way: set event-loop's exception handler, force garbage
collection after every test, and check messages from `asyncio.Task.__del__`.
This, however, requires intercepting all event-loop creation in the code.
"""
# See `asyncio.all_tasks()` implementation for reference.
before = {t for t in list(asyncio.tasks._all_tasks) if not t.done()}
yield
after = {t for t in list(asyncio.tasks._all_tasks) if not t.done()}
remains = after - before
if remains:
pytest.fail(f"Unattended asyncio tasks detected: {remains!r}")
| 34.176166 | 100 | 0.670179 |
660bac962799b3ebada0b5eaa341d1085fbf3e62 | 18,547 | py | Python | stdplugins/glink.py | dqanshi/PornHub | 162a7053ca7f2c0b3617b852559cfaf0502d94a7 | [
"Apache-2.0"
] | 55 | 2019-07-13T15:57:54.000Z | 2021-09-20T16:50:42.000Z | stdplugins/glink.py | dqanshi/PornHub | 162a7053ca7f2c0b3617b852559cfaf0502d94a7 | [
"Apache-2.0"
] | 3 | 2020-04-15T02:08:53.000Z | 2020-06-06T13:45:18.000Z | stdplugins/glink.py | dqanshi/PornHub | 162a7053ca7f2c0b3617b852559cfaf0502d94a7 | [
"Apache-2.0"
] | 450 | 2019-07-12T13:18:41.000Z | 2022-03-29T18:47:42.000Z | """Upload link to gDrive
Syntax:
.glink = shows link
.ghlink = hides link"""
# The entire code given below is verbatim copied from
# https://github.com/cyberboysumanjay/Gdrivedownloader/blob/master/gdrive_upload.py
# there might be some changes made to suit the needs for this repository
# Licensed under MIT License
import aiohttp
import asyncio
import math
import os
import time
from pySmartDL import SmartDL
from telethon import events
from datetime import datetime
from apiclient.discovery import build
from apiclient.http import MediaFileUpload
from apiclient.errors import ResumableUploadError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client import file, client, tools
from mimetypes import guess_type
import httplib2
import subprocess
from uniborg.util import admin_cmd, progress, humanbytes, time_formatter
# Path to token json file, it should be in same directory as script
G_DRIVE_TOKEN_FILE = Config.TMP_DOWNLOAD_DIRECTORY + "/auth_token.txt"
# Copy your credentials from the APIs Console
CLIENT_ID = Config.G_DRIVE_CLIENT_ID
CLIENT_SECRET = Config.G_DRIVE_CLIENT_SECRET
# Check https://developers.google.com/drive/scopes for all available scopes
OAUTH_SCOPE = "https://www.googleapis.com/auth/drive.file"
# Redirect URI for installed apps, can be left as is
REDIRECT_URI = "urn:ietf:wg:oauth:2.0:oob"
# global variable to set Folder ID to upload to
parent_id = Config.GDRIVE_FOLDER_ID
@borg.on(admin_cmd(pattern="glink ?(.*)", allow_sudo=True))
async def download(dryb):
""" For .gdrive command, upload files to google drive. """
if not dryb.text[0].isalpha() and dryb.text[0] not in ("/", "#", "@", "!"):
if dryb.fwd_from:
return
await dryb.edit("Processing ...")
input_str = dryb.pattern_match.group(1)
if CLIENT_ID is None or CLIENT_SECRET is None:
return false
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
required_file_name = None
elif input_str:
start = datetime.now()
url = input_str
file_name = os.path.basename(url)
if "|" in input_str:
url, file_name = input_str.split("|")
url = url.strip()
file_name = file_name.strip()
downloaded_file_name = Config.TMP_DOWNLOAD_DIRECTORY + "" + file_name
downloader = SmartDL(url, downloaded_file_name, progress_bar=False)
downloader.start(blocking=False)
c_time = time.time()
display_message = None
while not downloader.isFinished():
status = downloader.get_status().capitalize()
total_length = downloader.filesize if downloader.filesize else None
downloaded = downloader.get_dl_size()
now = time.time()
diff = now - c_time
percentage = downloader.get_progress()*100
speed = downloader.get_speed()
elapsed_time = round(diff) * 1000
progress_str = "[{0}{1}]\nProgress: {2}%".format(
''.join(["●" for i in range(math.floor(percentage / 5))]),
''.join(["○" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2))
estimated_total_time = downloader.get_eta(human=True)
try:
current_message = f"{status}...\nURL: {url}\nFile Name: {file_name}\n{progress_str}\n{humanbytes(downloaded)} of {humanbytes(total_length)}\nETA: {estimated_total_time}"
if current_message != display_message:
await dryb.edit(current_message)
display_message = current_message
await asyncio.sleep(20)
except Exception as e:
logger.info(str(e))
end = datetime.now()
ms = (end - start).seconds
if downloader.isSuccessful():
await dryb.edit(
"Downloaded to `{}` in {} seconds.\nNow Uploading to Google Drive...".format(
downloaded_file_name, ms)
)
required_file_name = downloaded_file_name
else:
await dryb.edit(
"Incorrect URL\n{}".format(url)
)
elif dryb.reply_to_msg_id:
start = datetime.now()
try:
c_time = time.time()
downloaded_file_name = await dryb.client.download_media(
await dryb.get_reply_message(),
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, dryb, c_time, "Downloading...")
)
)
except Exception as e: # pylint:disable=C0103,W0703
await dryb.edit(str(e))
else:
end = datetime.now()
required_file_name = downloaded_file_name
ms = (end - start).seconds
await dryb.edit(
"Downloaded to `{}` in {} seconds.\nNow Uploading to GDrive...".format(
downloaded_file_name, ms)
)
if required_file_name:
#
if Config.AUTH_TOKEN_DATA is not None:
with open(G_DRIVE_TOKEN_FILE, "w") as t_file:
t_file.write(Config.AUTH_TOKEN_DATA)
# Check if token file exists, if not create it by requesting authorization code
if not os.path.isfile(G_DRIVE_TOKEN_FILE):
storage = await create_token_file(G_DRIVE_TOKEN_FILE, dryb)
http = authorize(G_DRIVE_TOKEN_FILE, storage)
# Authorize, get file parameters, upload file and print out result URL for download
http = authorize(G_DRIVE_TOKEN_FILE, None)
file_name, mime_type = file_ops(required_file_name)
# required_file_name will have the full path
# Sometimes API fails to retrieve starting URI, we wrap it.
try:
g_drive_link = await upload_file(http, required_file_name, file_name, mime_type, dryb)
await dryb.edit(f"**🔥Encrypted G-Link Mode🔥** \nFile:`{required_file_name}`\nHas Successfully Uploaded to : [Encrypted Google Drive Section]({g_drive_link})")
except Exception as e:
await dryb.edit(f"Error while uploading to Google Drive\nError Code:\n`{e}`")
# Get mime type and name of given file
def file_ops(file_path):
mime_type = guess_type(file_path)[0]
mime_type = mime_type if mime_type else "text/plain"
file_name = file_path.split("/")[-1]
return file_name, mime_type
async def create_token_file(token_file, event):
# Run through the OAuth flow and retrieve credentials
flow = OAuth2WebServerFlow(
CLIENT_ID,
CLIENT_SECRET,
OAUTH_SCOPE,
redirect_uri=REDIRECT_URI
)
authorize_url = flow.step1_get_authorize_url()
async with event.client.conversation(Config.PRIVATE_GROUP_BOT_API_ID) as conv:
await conv.send_message(f"Go to the following link in your browser: {authorize_url} and reply the code")
response = conv.wait_event(events.NewMessage(
outgoing=True,
chats=Config.PRIVATE_GROUP_BOT_API_ID
))
response = await response
code = response.message.message.strip()
credentials = flow.step2_exchange(code)
storage = Storage(token_file)
storage.put(credentials)
return storage
def authorize(token_file, storage):
# Get credentials
if storage is None:
storage = Storage(token_file)
credentials = storage.get()
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
credentials.refresh(http)
http = credentials.authorize(http)
return http
async def upload_file(http, file_path, file_name, mime_type, event):
# Create Google Drive service instance
drive_service = build("drive", "v2", http=http, cache_discovery=False)
# File body description
media_body = MediaFileUpload(file_path, mimetype=mime_type, resumable=True)
body = {
"title": file_name,
"description": "Uploaded using github.com/ravana69/pornhub.",
"mimeType": mime_type,
}
if parent_id:
body["parents"] = [{"id": parent_id}]
# Permissions body description: anyone who has link can upload
# Other permissions can be found at https://developers.google.com/drive/v2/reference/permissions
permissions = {
"role": "reader",
"type": "anyone",
"value": None,
"withLink": True
}
# Insert a file
file = drive_service.files().insert(body=body, media_body=media_body)
response = None
while response is None:
status, response = file.next_chunk()
await asyncio.sleep(5)
if status:
percentage = int(status.progress() * 100)
progress_str = "[{0}{1}]\nProgress: {2}%\n".format(
''.join(["●" for i in range(math.floor(percentage / 5))]),
''.join(["○" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2))
await event.edit(f"Uploading to Google Drive...\n\nFile Name: {file_name}\n{progress_str}")
if file:
await event.edit(file_name + " Uploaded Successfully")
# Insert new permissions
drive_service.permissions().insert(fileId=response.get('id'), body=permissions).execute()
# Define file instance and get url for download
file = drive_service.files().get(fileId=response.get('id')).execute()
download_url = response.get("webContentLink")
return download_url
@borg.on(admin_cmd(pattern="gfolder ?(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
folder_link =f"https://drive.google.com/drive/u/2/folders/"+parent_id
await event.edit(f"Your current Google Drive Upload Directory : [Here]({folder_link})")
@borg.on(admin_cmd(pattern="hideglink ?(.*)", allow_sudo=True))
async def download(dryb):
""" For .gdrive command, upload files to google drive. """
if not dryb.text[0].isalpha() and dryb.text[0] not in ("/", "#", "@", "!"):
if dryb.fwd_from:
return
await dryb.edit("Processing ...")
input_str = dryb.pattern_match.group(1)
if CLIENT_ID is None or CLIENT_SECRET is None:
return false
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
required_file_name = None
elif input_str:
start = datetime.now()
url = input_str
file_name = os.path.basename(url)
if "|" in input_str:
url, file_name = input_str.split("|")
url = url.strip()
file_name = file_name.strip()
downloaded_file_name = Config.TMP_DOWNLOAD_DIRECTORY + "" + file_name
downloader = SmartDL(url, downloaded_file_name, progress_bar=False)
downloader.start(blocking=False)
c_time = time.time()
display_message = None
while not downloader.isFinished():
status = downloader.get_status().capitalize()
total_length = downloader.filesize if downloader.filesize else None
downloaded = downloader.get_dl_size()
now = time.time()
diff = now - c_time
percentage = downloader.get_progress()*100
speed = downloader.get_speed()
elapsed_time = round(diff) * 1000
progress_str = "[{0}{1}]\nProgress: {2}%".format(
''.join(["●" for i in range(math.floor(percentage / 5))]),
''.join(["○" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2))
estimated_total_time = downloader.get_eta(human=True)
try:
current_message = f"{status}...\nURL: {url}\nFile Name: {file_name}\n{progress_str}\n{humanbytes(downloaded)} of {humanbytes(total_length)}\nETA: {estimated_total_time}"
if current_message != display_message:
await dryb.edit(current_message)
display_message = current_message
await asyncio.sleep(20)
except Exception as e:
logger.info(str(e))
end = datetime.now()
ms = (end - start).seconds
if downloader.isSuccessful():
await dryb.edit(
"Downloaded to `{}` in {} seconds.\nNow Uploading to Google Drive...".format(
downloaded_file_name, ms)
)
required_file_name = downloaded_file_name
else:
await dryb.edit(
"Incorrect URL\n{}".format(url)
)
elif dryb.reply_to_msg_id:
start = datetime.now()
try:
c_time = time.time()
downloaded_file_name = await dryb.client.download_media(
await dryb.get_reply_message(),
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, dryb, c_time, "Downloading...")
)
)
except Exception as e: # pylint:disable=C0103,W0703
await dryb.edit(str(e))
else:
end = datetime.now()
required_file_name = downloaded_file_name
ms = (end - start).seconds
await dryb.edit(
"Downloaded to `{}` in {} seconds.\nNow Uploading to GDrive...".format(
downloaded_file_name, ms)
)
if required_file_name:
#
if Config.AUTH_TOKEN_DATA is not None:
with open(G_DRIVE_TOKEN_FILE, "w") as t_file:
t_file.write(Config.AUTH_TOKEN_DATA)
# Check if token file exists, if not create it by requesting authorization code
if not os.path.isfile(G_DRIVE_TOKEN_FILE):
storage = await create_token_file(G_DRIVE_TOKEN_FILE, dryb)
http = authorize(G_DRIVE_TOKEN_FILE, storage)
# Authorize, get file parameters, upload file and print out result URL for download
http = authorize(G_DRIVE_TOKEN_FILE, None)
file_name, mime_type = file_ops(required_file_name)
# required_file_name will have the full path
# Sometimes API fails to retrieve starting URI, we wrap it.
try:
g_drive_link = await upload_file(http, required_file_name, file_name, mime_type, dryb)
await dryb.edit(f"File:`{required_file_name}`\nHas Successfully Uploaded to : [Google Drive]({g_drive_link})")
except Exception as e:
await dryb.edit(f"Error while uploading to Google Drive\nError Code:\n`{e}`")
# Get mime type and name of given file
def file_ops(file_path):
mime_type = guess_type(file_path)[0]
mime_type = mime_type if mime_type else "text/plain"
file_name = file_path.split("/")[-1]
return file_name, mime_type
async def create_token_file(token_file, event):
# Run through the OAuth flow and retrieve credentials
flow = OAuth2WebServerFlow(
CLIENT_ID,
CLIENT_SECRET,
OAUTH_SCOPE,
redirect_uri=REDIRECT_URI
)
authorize_url = flow.step1_get_authorize_url()
async with event.client.conversation(Config.PRIVATE_GROUP_BOT_API_ID) as conv:
await conv.send_message(f"Go to the following link in your browser: {authorize_url} and reply the code")
response = conv.wait_event(events.NewMessage(
outgoing=True,
chats=Config.PRIVATE_GROUP_BOT_API_ID
))
response = await response
code = response.message.message.strip()
credentials = flow.step2_exchange(code)
storage = Storage(token_file)
storage.put(credentials)
return storage
def authorize(token_file, storage):
# Get credentials
if storage is None:
storage = Storage(token_file)
credentials = storage.get()
# Create an httplib2.Http object and authorize it with our credentials
http = httplib2.Http()
credentials.refresh(http)
http = credentials.authorize(http)
return http
async def upload_file(http, file_path, file_name, mime_type, event):
# Create Google Drive service instance
drive_service = build("drive", "v2", http=http, cache_discovery=False)
# File body description
media_body = MediaFileUpload(file_path, mimetype=mime_type, resumable=True)
body = {
"title": file_name,
"description": "Uploaded using github.com/ravana69/pornhub.",
"mimeType": mime_type,
}
if parent_id:
body["parents"] = [{"id": parent_id}]
# Permissions body description: anyone who has link can upload
# Other permissions can be found at https://developers.google.com/drive/v2/reference/permissions
permissions = {
"role": "reader",
"type": "anyone",
"value": None,
"withLink": True
}
# Insert a file
file = drive_service.files().insert(body=body, media_body=media_body)
response = None
while response is None:
status, response = file.next_chunk()
await asyncio.sleep(5)
if status:
percentage = int(status.progress() * 100)
progress_str = "[{0}{1}]\nProgress: {2}%\n".format(
''.join(["●" for i in range(math.floor(percentage / 5))]),
''.join(["○" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2))
await event.edit(f"Uploading to Google Drive...\n\nFile Name: {file_name}\n{progress_str}")
if file:
await event.edit(file_name + " Uploaded Successfully")
# Insert new permissions
drive_service.permissions().insert(fileId=response.get('id'), body=permissions).execute()
# Define file instance and get url for download
file = drive_service.files().get(fileId=response.get('id')).execute()
download_url = response.get("G-Drive Link Hidden")
return download_url
| 43.435597 | 189 | 0.616218 |
76df59cb560f26f2e7488ab774426e74ef766093 | 638 | py | Python | setup.py | irskep/gw0rp | f985c919141e19ee099766c18b7fe416f921836c | [
"MIT",
"BSD-3-Clause"
] | 1 | 2017-05-25T17:41:23.000Z | 2017-05-25T17:41:23.000Z | setup.py | irskep/gw0rp | f985c919141e19ee099766c18b7fe416f921836c | [
"MIT",
"BSD-3-Clause"
] | null | null | null | setup.py | irskep/gw0rp | f985c919141e19ee099766c18b7fe416f921836c | [
"MIT",
"BSD-3-Clause"
] | null | null | null | """
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
OPTIONS = dict(
argv_emulation=True,
frameworks=['libavbin.dylib','pymunk/libchipmunk.dylib'],
plist = dict(CFBundleIconFile='gw0rp.icns')#, PyRuntimeLocations=['/Library/Frameworks/Python.framework/Versions/Current/Python', '/System/Library/Frameworks/Python.framework/Versions/Current/Python'])
)
setup(
app=['gw0rp.py'],
data_files=['Data','gamelib','lepton',
'pymunk','psyco','gw0rp.icns', 'yaml', 'pyglet'],
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| 29 | 205 | 0.688088 |
7531bea649843a70ae47b1cd8de547237f6b2826 | 578 | py | Python | 22. Generate Parentheses.py | rohitpatwa/leetcode | f4826763e8f154cac9134d53b154b8299acd39a8 | [
"Xnet",
"X11",
"CECILL-B"
] | 1 | 2020-07-15T20:48:27.000Z | 2020-07-15T20:48:27.000Z | 22. Generate Parentheses.py | rohitpatwa/leetcode | f4826763e8f154cac9134d53b154b8299acd39a8 | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | 22. Generate Parentheses.py | rohitpatwa/leetcode | f4826763e8f154cac9134d53b154b8299acd39a8 | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | # Open parenthesen < n and closed parantheses < open. Keep recursing and backtrackting to generate all combinations.
def generateParenthesis(self, n: int) -> List[str]:
self.res = []
self.n = n
self.helper("", 0, 0)
return self.res
def helper(self, curr, start, end):
if len(curr)==2*self.n:
self.res.append(curr)
return
if start < self.n:
self.helper(curr + "(", start+1, end)
if end < start:
self.helper(curr + ")", start, end + 1) | 30.421053 | 116 | 0.519031 |
fae30c9c8c8548d017a3cfd9a1274ae471278fb2 | 158 | py | Python | programming/Workshop Work/Exercises/ex53.py | probablyacat/degree_level1 | c08262514032ed86eb68c680e369bd9095954745 | [
"MIT"
] | null | null | null | programming/Workshop Work/Exercises/ex53.py | probablyacat/degree_level1 | c08262514032ed86eb68c680e369bd9095954745 | [
"MIT"
] | null | null | null | programming/Workshop Work/Exercises/ex53.py | probablyacat/degree_level1 | c08262514032ed86eb68c680e369bd9095954745 | [
"MIT"
] | null | null | null | #finding a square root of a number
#lets define some things
#x can be any positive integer
x = 4.0
#command for square rooting value x
y = x ** 0.5
print y
| 15.8 | 35 | 0.71519 |
e790386c3f583533ffe8dc416ebf5f860593fb4f | 1,925 | py | Python | nbs/dl2/exp/nb_09c.py | gdario/course-v3 | 9e4abe6abcb640282ccee553e6f244611917916c | [
"Apache-2.0"
] | null | null | null | nbs/dl2/exp/nb_09c.py | gdario/course-v3 | 9e4abe6abcb640282ccee553e6f244611917916c | [
"Apache-2.0"
] | null | null | null | nbs/dl2/exp/nb_09c.py | gdario/course-v3 | 9e4abe6abcb640282ccee553e6f244611917916c | [
"Apache-2.0"
] | null | null | null |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/09c_add_progress_bar.ipynb
from exp.nb_09b import *
import time
from fastprogress.fastprogress import master_bar, progress_bar
from fastprogress.fastprogress import format_time
class AvgStatsCallback(Callback):
def __init__(self, metrics):
self.train_stats = AvgStats(metrics, True)
self.valid_stats = AvgStats(metrics, False)
def begin_fit(self):
met_names = ['loss'] + [m.__name__ for m in self.train_stats.metrics]
names = ['epoch'] + [f'train_{n}' for n in met_names] + [
f'valid_{n}' for n in met_names] + ['time']
self.logger(names)
def begin_epoch(self):
self.train_stats.reset()
self.valid_stats.reset()
self.start_time = time.time()
def after_loss(self):
stats = self.train_stats if self.in_train else self.valid_stats
with torch.no_grad():
stats.accumulate(self.run)
def after_epoch(self):
stats = [str(self.epoch)]
for o in [self.train_stats, self.valid_stats]:
stats += [f'{v:.6f}' for v in o.avg_stats]
stats += [format_time(time.time() - self.start_time)]
self.logger(stats)
class ProgressCallback(Callback):
_order = -1
def begin_fit(self):
self.mbar = master_bar(range(self.epochs))
self.mbar.on_iter_begin()
self.run.logger = partial(self.mbar.write, table=True)
def after_fit(self):
self.mbar.on_iter_end()
def after_batch(self):
self.pb.update(self.iter)
def begin_epoch(self):
self.set_pb()
def begin_validate(self):
self.set_pb()
def set_pb(self):
self.pb = progress_bar(self.dl, parent=self.mbar) # , auto_update=False)
self.mbar.update(self.epoch) | 31.048387 | 80 | 0.608831 |
da52fa57f203232ac4e03cabaf697a967a491846 | 1,242 | py | Python | project_euler/023_abundant_numbers.py | sabih-h/number_theory_problems | 13c88801c81dac975e4712041f5661ff74c62ab4 | [
"MIT"
] | null | null | null | project_euler/023_abundant_numbers.py | sabih-h/number_theory_problems | 13c88801c81dac975e4712041f5661ff74c62ab4 | [
"MIT"
] | null | null | null | project_euler/023_abundant_numbers.py | sabih-h/number_theory_problems | 13c88801c81dac975e4712041f5661ff74c62ab4 | [
"MIT"
] | null | null | null | from datetime import datetime
start_time = datetime.now()
#This function takes a number as arguement
#and returns its proper divisors
def sum_proper_divisors(num):
total = 0
if num % 2 == 0:
a_set = {1}
for x in range(2, int(num/2)):
if num % x == 0:
a_set.add(num/x)
a_set.add(x)
total += x
else:
a_set = set()
for x in range(1, int(num/2), 2):
if num % x == 0:
a_set.add(num/x)
a_set.add(x)
total += x
return a_set
print sorted(sum_proper_divisors(123456788))
#This function finds all abundant numbers into 1 file and
#all numbers which are not adundant into a different file
def abundant_numbers(i = 28124):
abundant_set = set()
non_abundant_set = set()
for x in range(1, i):
if sum_proper_divisors(x) > x:
abundant_set.add(x)
# else:
# non_abundant_set.add(x)
return abundant_set
# abundant_numbers()
def sum_of_abundant_nums():
a_set = set()
a_list = abundant_numbers()
y = 0
for x in a_list:
for y in a_list:
if x + y > 28123:
break
else:
a_set.add(x+y)
non_sums = set()
for x in range(1, 28124):
if x not in a_set:
non_sums.add(x)
print sum(non_sums)
return sum(non_sums)
# sum_of_abundant_nums()
print datetime.now() - start_time
| 18.537313 | 58 | 0.663446 |
a18dde41609ec9c5eb76efc174de6a9d7f426ae6 | 361 | py | Python | ex090.py | renatocortez/ExerciciosPython | 9095e1666953faf18c8e5d20b8d0fc3b2493a092 | [
"MIT"
] | null | null | null | ex090.py | renatocortez/ExerciciosPython | 9095e1666953faf18c8e5d20b8d0fc3b2493a092 | [
"MIT"
] | null | null | null | ex090.py | renatocortez/ExerciciosPython | 9095e1666953faf18c8e5d20b8d0fc3b2493a092 | [
"MIT"
] | null | null | null | aluno = dict()
aluno['nome'] = str(input('Nome: '))
aluno['media'] = float(input(f'Média de {aluno["nome"]}: '))
if aluno['media'] >= 7:
aluno['situacao'] = 'Aprovado'
elif 5 <= aluno['media'] < 7:
aluno['situacao'] = 'Recuperação'
else:
aluno['situacao'] = 'Reprovado'
print('-=' * 30)
for k, v in aluno.items():
print(f'{k} é igual a {v}')
| 24.066667 | 60 | 0.570637 |
0964c4c292d792376bc5911826e6edcbc3248f4b | 234 | py | Python | Primeiros Passos/1-DAY ONE/Calculo de Acrescimov2.py | pedroluceena/TreinosPI | c11a76a1361f61a71e16edb2127eb08c12c090e1 | [
"MIT"
] | null | null | null | Primeiros Passos/1-DAY ONE/Calculo de Acrescimov2.py | pedroluceena/TreinosPI | c11a76a1361f61a71e16edb2127eb08c12c090e1 | [
"MIT"
] | null | null | null | Primeiros Passos/1-DAY ONE/Calculo de Acrescimov2.py | pedroluceena/TreinosPI | c11a76a1361f61a71e16edb2127eb08c12c090e1 | [
"MIT"
] | null | null | null | print('AUMENTO DE SALARIO')
salario=float(input('Qual é o valor do salario:R$'))
acrescimo=salario*0.15
s=salario+acrescimo
print('Parabéns o seu salario é R${} ,teve um aumento de 15% com isso ficou R${:.2f} reais'.format(salario,s)) | 46.8 | 110 | 0.735043 |
2f7359d27bc3e89c449b486f851556d3df150ec1 | 1,165 | py | Python | tests/algorithms/test_algorithm_hparams.py | mosaicml/composer | a253d7dee8278e66d036bc191111bbe264ace0da | [
"Apache-2.0"
] | 945 | 2021-10-13T16:24:20.000Z | 2022-03-31T21:21:54.000Z | tests/algorithms/test_algorithm_hparams.py | mosaicml/composer | a253d7dee8278e66d036bc191111bbe264ace0da | [
"Apache-2.0"
] | 544 | 2021-10-13T20:23:27.000Z | 2022-03-31T02:47:54.000Z | tests/algorithms/test_algorithm_hparams.py | mosaicml/composer | a253d7dee8278e66d036bc191111bbe264ace0da | [
"Apache-2.0"
] | 39 | 2021-10-13T14:33:33.000Z | 2022-03-31T11:13:19.000Z | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
from typing import Type
import pytest
import composer.algorithms
from composer.algorithms.algorithm_hparams_registry import algorithm_registry
from composer.core import Algorithm
from tests.algorithms.algorithm_settings import get_alg_kwargs, get_algs_with_marks
from tests.common import get_module_subclasses
from tests.common.hparams import assert_in_registry, assert_yaml_loads
@pytest.mark.parametrize("alg_cls", get_algs_with_marks())
def test_algs_are_constructable(alg_cls: Type[Algorithm]):
assert isinstance(alg_cls(**get_alg_kwargs(alg_cls)), Algorithm)
@pytest.mark.parametrize("alg_cls", get_module_subclasses(composer.algorithms, Algorithm))
def test_all_algs_in_registry(alg_cls: Type[Algorithm]):
assert_in_registry(alg_cls, algorithm_registry)
@pytest.mark.parametrize("alg_cls", get_algs_with_marks())
def test_algs_load_from_yaml(alg_cls: Type[Algorithm]):
kwargs = get_alg_kwargs(alg_cls)
if kwargs is None:
pytest.xfail(f"Missing settings for algorithm {alg_cls.__name__}")
assert_yaml_loads(alg_cls, kwargs, expected=alg_cls)
| 36.40625 | 90 | 0.820601 |
c8060d1e820e0441ece9d1e2942e35359593e29e | 714 | py | Python | create_derivatives/test_helpers.py | RockefellerArchiveCenter/pictor | 244b1a2016664974f38885d0ab2e6ac472306a85 | [
"MIT"
] | null | null | null | create_derivatives/test_helpers.py | RockefellerArchiveCenter/pictor | 244b1a2016664974f38885d0ab2e6ac472306a85 | [
"MIT"
] | 88 | 2021-07-21T15:14:40.000Z | 2022-03-10T20:11:40.000Z | create_derivatives/test_helpers.py | RockefellerArchiveCenter/pictor | 244b1a2016664974f38885d0ab2e6ac472306a85 | [
"MIT"
] | null | null | null | import shutil
from pathlib import Path
def make_dir(directory_path, remove_first=False, parents=True):
"""Makes a directory. If remove_first is set to true, removes directory if it exists; if set to false, does not make directory if it exists"""
path = Path(directory_path)
if path.exists() and remove_first:
shutil.rmtree(directory_path)
if not path.exists():
path.mkdir(parents=parents)
def set_up_bag(tmp_dir, fixture_directory, bag):
"""Adds an uncompressed bag fixture to the temp directory and database"""
bag_path = Path(tmp_dir, bag)
if not bag_path.exists():
shutil.copytree(Path("create_derivatives", "fixtures", fixture_directory, bag), bag_path)
| 37.578947 | 146 | 0.72409 |
10123e28c3e589361067b10abd3dd3b087cdeee9 | 5,858 | py | Python | model.py | Macc520/Vessel-segement-Pytorch | 6c76d8cad7f0d42cc359515dfc43956858382cb0 | [
"Unlicense"
] | null | null | null | model.py | Macc520/Vessel-segement-Pytorch | 6c76d8cad7f0d42cc359515dfc43956858382cb0 | [
"Unlicense"
] | null | null | null | model.py | Macc520/Vessel-segement-Pytorch | 6c76d8cad7f0d42cc359515dfc43956858382cb0 | [
"Unlicense"
] | null | null | null | # ==================================================================
class attention_block(nn.Module):
def __init__(self,ch_in, ch_out):
super(attention_block,self).__init__()
self.sigmoid = nn.Sigmoid()
self.conv = nn.Conv2d(ch_in, ch_out, 1)
self.BN = nn.BatchNorm2d(ch_out)
def forward(self,x,f):
x1 = x
x = self.sigmoid(x)
f = f.permute(0,1,3,2)
y = torch.mul(f,x)
# y = y/self.BN(x)*self.BN(f)
y = self.conv(y)
y = x1+y
return y
class conv_block(nn.Module):
def __init__(self, ch_in, ch_out):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(),
nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU()
)
def forward(self, x):
x = self.conv(x)
return x
class up_conv(nn.Module):
def __init__(self, ch_in, ch_out):
super(up_conv, self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU()
)
def forward(self, x):
x = self.up(x)
return x
class U_Net(nn.Module):
def __init__(self, img_ch=3, output_ch=1):
super(U_Net, self).__init__()
self.attention1 = attention_block(128,128)
self.attention2 = attention_block(128,128)
self.attention3 = attention_block(64,64)
# self.attention1 = Attention_block(128,128,64)
# self.attention2 = Attention_block(64,64,32)
# self.attention3 = attention_block(64,64)
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.upsampling1 = nn.Upsample(scale_factor=2)
self.upsampling2 = nn.Upsample(scale_factor=2)
self.upsampling3 = nn.Upsample(scale_factor=2)
self.Conv1 = conv_block(ch_in=img_ch, ch_out=64)
self.Conv2 = conv_block(ch_in=64, ch_out=128)
self.Conv3 = conv_block(ch_in=128, ch_out=256)
self.Conv4 = conv_block(ch_in=256, ch_out=512)
self.Conv5 = conv_block(ch_in=512, ch_out=1024)
self.Up5 = up_conv(ch_in=1024, ch_out=512)
self.Up_conv5 = conv_block(ch_in=1024, ch_out=512)
self.Up4 = up_conv(ch_in=512, ch_out=256)
self.Up_conv4 = conv_block(ch_in=512, ch_out=256)
self.Up3 = up_conv(ch_in=256, ch_out=128)
self.Up_conv3 = conv_block(ch_in=256, ch_out=128)
self.Up2 = up_conv(ch_in=128, ch_out=64)
self.Up_conv2 = conv_block(ch_in=128, ch_out=64)
self.UP6 = up_conv(ch_in=128, ch_out=128)
self.UP7 = up_conv(ch_in=256, ch_out=256)
# self.Conv6 = conv_block(ch_in=128, ch_out=64)
# self.Conv7 = conv_block(ch_in=256, ch_out=512)
# self.Up_conv8 = conv_block(ch_in=512, ch_out=1024)
self.Conv_1x1 = nn.Conv2d(64, output_ch, kernel_size=1, stride=1, padding=0)
self.nl1 = nn.Conv2d(192,128,(7,1),stride=(1, 1),padding=(3,0)) # 7x1卷积
self.nl2 = nn.Conv2d(128,64,(1,7),stride=(1, 1),padding=(0,3)) # 1x7卷积
# self.nl3 = nn.Conv2d(192,64,(3,3),stride=(1, 1),padding=(1,1))
# self.nl3 = nn.Conv2d(192,64,(7,7),stride=(1, 1),padding=(3,3))
# self.nl3 = nn.Conv2d(192,64,(9,9),stride=(1, 1),padding=(4,4))
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
def forward(self, x):
x1 = self.Conv1(x)
x2 = self.Maxpool(x1)
x2 = self.Conv2(x2)
Upx2 = self.Up2(x2) #64*48*48
sum_1 = torch.cat((Upx2, x1), dim=1) #128*48*48
# sum_1 = self.Up_conv8(sum_1)
#print(sum_1.shape)
# print(Upx2.shape)
x3 = self.Maxpool(x2)
x3 = self.Conv3(x3)
x4 = self.Maxpool(x3)
x4 = self.Conv4(x4)
x5 = self.Maxpool(x4)
x5 = self.Conv5(x5)
# decoding + concat path
d5 = self.Up5(x5)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d4_1 = d4
# upd4 = self.Up3(d4_1)
#print(upd4.shape) #1*128*24*24
# upd4 = self.Up_conv3(upd4)
upd4 = self.upsampling1(d4_1)
upd4 = self.upsampling2(upd4)
#print(upd4.shape)#1*256*48*48
upd4 = self.Up_conv3(upd4)
#print(upd4.shape)
atten1 = self.attention1(sum_1,upd4)
#print(atten1.shape)
d3 = self.Up3(d4)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d3_1 = d3
upd3 = self.upsampling3(d3_1)
#print(upd4.shape) #1*128*24*24
#print(upd4.shape)#1*64*48*48
#upd4 = self.Conv6(upd4)
#print(upd3.shape)
atten2 = self.attention1(atten1,upd3)
#print(atten2.shape)
d2 = self.Up2(d3)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
d2_1 = d2
atten2 = self.Up_conv2(atten2)
atten3 = self.attention3(atten2,d2_1)
#print(atten3.shape)
d1 = self.Conv_1x1(atten1)
d1 = F.softmax(d1,dim=1) # mine
d2 = self.Conv_1x1(atten2)
d2 = F.softmax(d2,dim=1)
d3 = self.Conv_1x1(atten3)
d3 = F.softmax(d3,dim=1)
d4 = torch.cat((atten1,atten2,atten3),dim=1)
d4 = self.nl1(d4)
d4 = self.relu1(d4)
d4 = self.nl2(d4)
d4 = self.relu1(d4)
d4 = self.Conv_1x1(d4)
d4 = F.softmax(d4,dim=1)
return d1,d2,d3,d4
| 30.831579 | 85 | 0.551895 |
544e421e41e3e87bc17e31b8398c0cd18dadabf9 | 31,487 | py | Python | espnet/nets/pytorch_backend/e2e_asr_mulenc.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | espnet/nets/pytorch_backend/e2e_asr_mulenc.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | espnet/nets/pytorch_backend/e2e_asr_mulenc.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Copyright 2017 Johns Hopkins University (Ruizhi Li)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Define e2e module for multi-encoder network. https://arxiv.org/pdf/1811.04903.pdf."""
import argparse
import logging
import math
import os
from itertools import groupby
import chainer
import numpy as np
import torch
from chainer import reporter
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.e2e_asr_common import label_smoothing_dist
from espnet.nets.pytorch_backend.ctc import ctc_for
from espnet.nets.pytorch_backend.nets_utils import (
get_subsample,
pad_list,
to_device,
to_torch_tensor,
)
from espnet.nets.pytorch_backend.rnn.attentions import att_for
from espnet.nets.pytorch_backend.rnn.decoders import decoder_for
from espnet.nets.pytorch_backend.rnn.encoders import Encoder, encoder_for
from espnet.nets.scorers.ctc import CTCPrefixScorer
from espnet.utils.cli_utils import strtobool
CTC_LOSS_THRESHOLD = 10000
class Reporter(chainer.Chain):
"""Define a chainer reporter wrapper."""
def report(self, loss_ctc_list, loss_att, acc, cer_ctc_list, cer, wer, mtl_loss):
"""Define a chainer reporter function."""
# loss_ctc_list = [weighted CTC, CTC1, CTC2, ... CTCN]
# cer_ctc_list = [weighted cer_ctc, cer_ctc_1, cer_ctc_2, ... cer_ctc_N]
num_encs = len(loss_ctc_list) - 1
reporter.report({"loss_ctc": loss_ctc_list[0]}, self)
for i in range(num_encs):
reporter.report({"loss_ctc{}".format(i + 1): loss_ctc_list[i + 1]}, self)
reporter.report({"loss_att": loss_att}, self)
reporter.report({"acc": acc}, self)
reporter.report({"cer_ctc": cer_ctc_list[0]}, self)
for i in range(num_encs):
reporter.report({"cer_ctc{}".format(i + 1): cer_ctc_list[i + 1]}, self)
reporter.report({"cer": cer}, self)
reporter.report({"wer": wer}, self)
logging.info("mtl loss:" + str(mtl_loss))
reporter.report({"loss": mtl_loss}, self)
class E2E(ASRInterface, torch.nn.Module):
"""E2E module.
:param List idims: List of dimensions of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments for multi-encoder setting."""
E2E.encoder_add_arguments(parser)
E2E.attention_add_arguments(parser)
E2E.decoder_add_arguments(parser)
E2E.ctc_add_arguments(parser)
return parser
@staticmethod
def encoder_add_arguments(parser):
"""Add arguments for encoders in multi-encoder setting."""
group = parser.add_argument_group("E2E encoder setting")
group.add_argument(
"--etype",
action="append",
type=str,
choices=[
"lstm",
"blstm",
"lstmp",
"blstmp",
"vgglstmp",
"vggblstmp",
"vgglstm",
"vggblstm",
"gru",
"bgru",
"grup",
"bgrup",
"vgggrup",
"vggbgrup",
"vgggru",
"vggbgru",
],
help="Type of encoder network architecture",
)
group.add_argument(
"--elayers",
type=int,
action="append",
help="Number of encoder layers "
"(for shared recognition part in multi-speaker asr mode)",
)
group.add_argument(
"--eunits",
"-u",
type=int,
action="append",
help="Number of encoder hidden units",
)
group.add_argument(
"--eprojs", default=320, type=int, help="Number of encoder projection units"
)
group.add_argument(
"--subsample",
type=str,
action="append",
help="Subsample input frames x_y_z means "
"subsample every x frame at 1st layer, "
"every y frame at 2nd layer etc.",
)
return parser
@staticmethod
def attention_add_arguments(parser):
"""Add arguments for attentions in multi-encoder setting."""
group = parser.add_argument_group("E2E attention setting")
# attention
group.add_argument(
"--atype",
type=str,
action="append",
choices=[
"noatt",
"dot",
"add",
"location",
"coverage",
"coverage_location",
"location2d",
"location_recurrent",
"multi_head_dot",
"multi_head_add",
"multi_head_loc",
"multi_head_multi_res_loc",
],
help="Type of attention architecture",
)
group.add_argument(
"--adim",
type=int,
action="append",
help="Number of attention transformation dimensions",
)
group.add_argument(
"--awin",
type=int,
action="append",
help="Window size for location2d attention",
)
group.add_argument(
"--aheads",
type=int,
action="append",
help="Number of heads for multi head attention",
)
group.add_argument(
"--aconv-chans",
type=int,
action="append",
help="Number of attention convolution channels \
(negative value indicates no location-aware attention)",
)
group.add_argument(
"--aconv-filts",
type=int,
action="append",
help="Number of attention convolution filters \
(negative value indicates no location-aware attention)",
)
group.add_argument(
"--dropout-rate",
type=float,
action="append",
help="Dropout rate for the encoder",
)
# hierarchical attention network (HAN)
group.add_argument(
"--han-type",
default="dot",
type=str,
choices=[
"noatt",
"dot",
"add",
"location",
"coverage",
"coverage_location",
"location2d",
"location_recurrent",
"multi_head_dot",
"multi_head_add",
"multi_head_loc",
"multi_head_multi_res_loc",
],
help="Type of attention architecture (multi-encoder asr mode only)",
)
group.add_argument(
"--han-dim",
default=320,
type=int,
help="Number of attention transformation dimensions in HAN",
)
group.add_argument(
"--han-win",
default=5,
type=int,
help="Window size for location2d attention in HAN",
)
group.add_argument(
"--han-heads",
default=4,
type=int,
help="Number of heads for multi head attention in HAN",
)
group.add_argument(
"--han-conv-chans",
default=-1,
type=int,
help="Number of attention convolution channels in HAN \
(negative value indicates no location-aware attention)",
)
group.add_argument(
"--han-conv-filts",
default=100,
type=int,
help="Number of attention convolution filters in HAN \
(negative value indicates no location-aware attention)",
)
return parser
@staticmethod
def decoder_add_arguments(parser):
"""Add arguments for decoder in multi-encoder setting."""
group = parser.add_argument_group("E2E decoder setting")
group.add_argument(
"--dtype",
default="lstm",
type=str,
choices=["lstm", "gru"],
help="Type of decoder network architecture",
)
group.add_argument(
"--dlayers", default=1, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=320, type=int, help="Number of decoder hidden units"
)
group.add_argument(
"--dropout-rate-decoder",
default=0.0,
type=float,
help="Dropout rate for the decoder",
)
group.add_argument(
"--sampling-probability",
default=0.0,
type=float,
help="Ratio of predicted labels fed back to decoder",
)
group.add_argument(
"--lsm-type",
const="",
default="",
type=str,
nargs="?",
choices=["", "unigram"],
help="Apply label smoothing with a specified distribution type",
)
return parser
@staticmethod
def ctc_add_arguments(parser):
"""Add arguments for ctc in multi-encoder setting."""
group = parser.add_argument_group("E2E multi-ctc setting")
group.add_argument(
"--share-ctc",
type=strtobool,
default=False,
help="The flag to switch to share ctc across multiple encoders "
"(multi-encoder asr mode only).",
)
group.add_argument(
"--weights-ctc-train",
type=float,
action="append",
help="ctc weight assigned to each encoder during training.",
)
group.add_argument(
"--weights-ctc-dec",
type=float,
action="append",
help="ctc weight assigned to each encoder during decoding.",
)
return parser
def get_total_subsampling_factor(self):
"""Get total subsampling factor."""
if isinstance(self.enc, Encoder):
return self.enc.conv_subsampling_factor * int(
np.prod(self.subsample_list[0])
)
else:
return self.enc[0].conv_subsampling_factor * int(
np.prod(self.subsample_list[0])
)
def __init__(self, idims, odim, args):
"""Initialize this class with python-level args.
Args:
idims (list): list of the number of an input feature dim.
odim (int): The number of output vocab.
args (Namespace): arguments
"""
super(E2E, self).__init__()
torch.nn.Module.__init__(self)
self.mtlalpha = args.mtlalpha
assert 0.0 <= self.mtlalpha <= 1.0, "mtlalpha should be [0.0, 1.0]"
self.verbose = args.verbose
# NOTE: for self.build method
args.char_list = getattr(args, "char_list", None)
self.char_list = args.char_list
self.outdir = args.outdir
self.space = args.sym_space
self.blank = args.sym_blank
self.reporter = Reporter()
self.num_encs = args.num_encs
self.share_ctc = args.share_ctc
# below means the last number becomes eos/sos ID
# note that sos/eos IDs are identical
self.sos = odim - 1
self.eos = odim - 1
# subsample info
self.subsample_list = get_subsample(args, mode="asr", arch="rnn_mulenc")
# label smoothing info
if args.lsm_type and os.path.isfile(args.train_json):
logging.info("Use label smoothing with " + args.lsm_type)
labeldist = label_smoothing_dist(
odim, args.lsm_type, transcript=args.train_json
)
else:
labeldist = None
# speech translation related
self.replace_sos = getattr(
args, "replace_sos", False
) # use getattr to keep compatibility
self.frontend = None
# encoder
self.enc = encoder_for(args, idims, self.subsample_list)
# ctc
self.ctc = ctc_for(args, odim)
# attention
self.att = att_for(args)
# hierarchical attention network
han = att_for(args, han_mode=True)
self.att.append(han)
# decoder
self.dec = decoder_for(args, odim, self.sos, self.eos, self.att, labeldist)
if args.mtlalpha > 0 and self.num_encs > 1:
# weights-ctc,
# e.g. ctc_loss = w_1*ctc_1_loss + w_2 * ctc_2_loss + w_N * ctc_N_loss
self.weights_ctc_train = args.weights_ctc_train / np.sum(
args.weights_ctc_train
) # normalize
self.weights_ctc_dec = args.weights_ctc_dec / np.sum(
args.weights_ctc_dec
) # normalize
logging.info(
"ctc weights (training during training): "
+ " ".join([str(x) for x in self.weights_ctc_train])
)
logging.info(
"ctc weights (decoding during training): "
+ " ".join([str(x) for x in self.weights_ctc_dec])
)
else:
self.weights_ctc_dec = [1.0]
self.weights_ctc_train = [1.0]
# weight initialization
self.init_like_chainer()
# options for beam search
if args.report_cer or args.report_wer:
recog_args = {
"beam_size": args.beam_size,
"penalty": args.penalty,
"ctc_weight": args.ctc_weight,
"maxlenratio": args.maxlenratio,
"minlenratio": args.minlenratio,
"lm_weight": args.lm_weight,
"rnnlm": args.rnnlm,
"nbest": args.nbest,
"space": args.sym_space,
"blank": args.sym_blank,
"tgt_lang": False,
"ctc_weights_dec": self.weights_ctc_dec,
}
self.recog_args = argparse.Namespace(**recog_args)
self.report_cer = args.report_cer
self.report_wer = args.report_wer
else:
self.report_cer = False
self.report_wer = False
self.rnnlm = None
self.logzero = -10000000000.0
self.loss = None
self.acc = None
def init_like_chainer(self):
"""Initialize weight like chainer.
chainer basically uses LeCun way: W ~ Normal(0, fan_in ** -0.5), b = 0
pytorch basically uses W, b ~ Uniform(-fan_in**-0.5, fan_in**-0.5)
however, there are two exceptions as far as I know.
- EmbedID.W ~ Normal(0, 1)
- LSTM.upward.b[forget_gate_range] = 1 (but not used in NStepLSTM)
"""
def lecun_normal_init_parameters(module):
for p in module.parameters():
data = p.data
if data.dim() == 1:
# bias
data.zero_()
elif data.dim() == 2:
# linear weight
n = data.size(1)
stdv = 1.0 / math.sqrt(n)
data.normal_(0, stdv)
elif data.dim() in (3, 4):
# conv weight
n = data.size(1)
for k in data.size()[2:]:
n *= k
stdv = 1.0 / math.sqrt(n)
data.normal_(0, stdv)
else:
raise NotImplementedError
def set_forget_bias_to_one(bias):
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.0)
lecun_normal_init_parameters(self)
# exceptions
# embed weight ~ Normal(0, 1)
self.dec.embed.weight.data.normal_(0, 1)
# forget-bias = 1.0
# https://discuss.pytorch.org/t/set-forget-gate-bias-of-lstm/1745
for i in range(len(self.dec.decoder)):
set_forget_bias_to_one(self.dec.decoder[i].bias_ih)
def forward(self, xs_pad_list, ilens_list, ys_pad):
"""E2E forward.
:param List xs_pad_list: list of batch (torch.Tensor) of padded input sequences
[(B, Tmax_1, idim), (B, Tmax_2, idim),..]
:param List ilens_list:
list of batch (torch.Tensor) of lengths of input sequences [(B), (B), ..]
:param torch.Tensor ys_pad:
batch of padded character id sequence tensor (B, Lmax)
:return: loss value
:rtype: torch.Tensor
"""
import editdistance
if self.replace_sos:
tgt_lang_ids = ys_pad[:, 0:1]
ys_pad = ys_pad[:, 1:] # remove target language ID in the beginning
else:
tgt_lang_ids = None
hs_pad_list, hlens_list, self.loss_ctc_list = [], [], []
for idx in range(self.num_encs):
# 1. Encoder
hs_pad, hlens, _ = self.enc[idx](xs_pad_list[idx], ilens_list[idx])
# 2. CTC loss
if self.mtlalpha == 0:
self.loss_ctc_list.append(None)
else:
ctc_idx = 0 if self.share_ctc else idx
loss_ctc = self.ctc[ctc_idx](hs_pad, hlens, ys_pad)
self.loss_ctc_list.append(loss_ctc)
hs_pad_list.append(hs_pad)
hlens_list.append(hlens)
# 3. attention loss
if self.mtlalpha == 1:
self.loss_att, acc = None, None
else:
self.loss_att, acc, _ = self.dec(
hs_pad_list, hlens_list, ys_pad, lang_ids=tgt_lang_ids
)
self.acc = acc
# 4. compute cer without beam search
if self.mtlalpha == 0 or self.char_list is None:
cer_ctc_list = [None] * (self.num_encs + 1)
else:
cer_ctc_list = []
for ind in range(self.num_encs):
cers = []
ctc_idx = 0 if self.share_ctc else ind
y_hats = self.ctc[ctc_idx].argmax(hs_pad_list[ind]).data
for i, y in enumerate(y_hats):
y_hat = [x[0] for x in groupby(y)]
y_true = ys_pad[i]
seq_hat = [
self.char_list[int(idx)] for idx in y_hat if int(idx) != -1
]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.space, " ")
seq_hat_text = seq_hat_text.replace(self.blank, "")
seq_true_text = "".join(seq_true).replace(self.space, " ")
hyp_chars = seq_hat_text.replace(" ", "")
ref_chars = seq_true_text.replace(" ", "")
if len(ref_chars) > 0:
cers.append(
editdistance.eval(hyp_chars, ref_chars) / len(ref_chars)
)
cer_ctc = sum(cers) / len(cers) if cers else None
cer_ctc_list.append(cer_ctc)
cer_ctc_weighted = np.sum(
[
item * self.weights_ctc_train[i]
for i, item in enumerate(cer_ctc_list)
]
)
cer_ctc_list = [float(cer_ctc_weighted)] + [
float(item) for item in cer_ctc_list
]
# 5. compute cer/wer
if self.training or not (self.report_cer or self.report_wer):
cer, wer = 0.0, 0.0
# oracle_cer, oracle_wer = 0.0, 0.0
else:
if self.recog_args.ctc_weight > 0.0:
lpz_list = []
for idx in range(self.num_encs):
ctc_idx = 0 if self.share_ctc else idx
lpz = self.ctc[ctc_idx].log_softmax(hs_pad_list[idx]).data
lpz_list.append(lpz)
else:
lpz_list = None
word_eds, word_ref_lens, char_eds, char_ref_lens = [], [], [], []
nbest_hyps = self.dec.recognize_beam_batch(
hs_pad_list,
hlens_list,
lpz_list,
self.recog_args,
self.char_list,
self.rnnlm,
lang_ids=tgt_lang_ids.squeeze(1).tolist() if self.replace_sos else None,
)
# remove <sos> and <eos>
y_hats = [nbest_hyp[0]["yseq"][1:-1] for nbest_hyp in nbest_hyps]
for i, y_hat in enumerate(y_hats):
y_true = ys_pad[i]
seq_hat = [self.char_list[int(idx)] for idx in y_hat if int(idx) != -1]
seq_true = [
self.char_list[int(idx)] for idx in y_true if int(idx) != -1
]
seq_hat_text = "".join(seq_hat).replace(self.recog_args.space, " ")
seq_hat_text = seq_hat_text.replace(self.recog_args.blank, "")
seq_true_text = "".join(seq_true).replace(self.recog_args.space, " ")
hyp_words = seq_hat_text.split()
ref_words = seq_true_text.split()
word_eds.append(editdistance.eval(hyp_words, ref_words))
word_ref_lens.append(len(ref_words))
hyp_chars = seq_hat_text.replace(" ", "")
ref_chars = seq_true_text.replace(" ", "")
char_eds.append(editdistance.eval(hyp_chars, ref_chars))
char_ref_lens.append(len(ref_chars))
wer = (
0.0
if not self.report_wer
else float(sum(word_eds)) / sum(word_ref_lens)
)
cer = (
0.0
if not self.report_cer
else float(sum(char_eds)) / sum(char_ref_lens)
)
alpha = self.mtlalpha
if alpha == 0:
self.loss = self.loss_att
loss_att_data = float(self.loss_att)
loss_ctc_data_list = [None] * (self.num_encs + 1)
elif alpha == 1:
self.loss = torch.sum(
torch.cat(
[
(item * self.weights_ctc_train[i]).unsqueeze(0)
for i, item in enumerate(self.loss_ctc_list)
]
)
)
loss_att_data = None
loss_ctc_data_list = [float(self.loss)] + [
float(item) for item in self.loss_ctc_list
]
else:
self.loss_ctc = torch.sum(
torch.cat(
[
(item * self.weights_ctc_train[i]).unsqueeze(0)
for i, item in enumerate(self.loss_ctc_list)
]
)
)
self.loss = alpha * self.loss_ctc + (1 - alpha) * self.loss_att
loss_att_data = float(self.loss_att)
loss_ctc_data_list = [float(self.loss_ctc)] + [
float(item) for item in self.loss_ctc_list
]
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_ctc_data_list,
loss_att_data,
acc,
cer_ctc_list,
cer,
wer,
loss_data,
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def scorers(self):
"""Get scorers for `beam_search` (optional).
Returns:
dict[str, ScorerInterface]: dict of `ScorerInterface` objects
"""
return dict(decoder=self.dec, ctc=CTCPrefixScorer(self.ctc, self.eos))
def encode(self, x_list):
"""Encode feature.
Args:
x_list (list): input feature [(T1, D), (T2, D), ... ]
Returns:
list
encoded feature [(T1, D), (T2, D), ... ]
"""
self.eval()
ilens_list = [[x_list[idx].shape[0]] for idx in range(self.num_encs)]
# subsample frame
x_list = [
x_list[idx][:: self.subsample_list[idx][0], :]
for idx in range(self.num_encs)
]
p = next(self.parameters())
x_list = [
torch.as_tensor(x_list[idx], device=p.device, dtype=p.dtype)
for idx in range(self.num_encs)
]
# make a utt list (1) to use the same interface for encoder
xs_list = [
x_list[idx].contiguous().unsqueeze(0) for idx in range(self.num_encs)
]
# 1. encoder
hs_list = []
for idx in range(self.num_encs):
hs, _, _ = self.enc[idx](xs_list[idx], ilens_list[idx])
hs_list.append(hs[0])
return hs_list
def recognize(self, x_list, recog_args, char_list, rnnlm=None):
"""E2E beam search.
:param list of ndarray x: list of input acoustic feature [(T1, D), (T2,D),...]
:param Namespace recog_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
hs_list = self.encode(x_list)
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
if self.share_ctc:
lpz_list = [
self.ctc[0].log_softmax(hs_list[idx].unsqueeze(0))[0]
for idx in range(self.num_encs)
]
else:
lpz_list = [
self.ctc[idx].log_softmax(hs_list[idx].unsqueeze(0))[0]
for idx in range(self.num_encs)
]
else:
lpz_list = None
# 2. Decoder
# decode the first utterance
y = self.dec.recognize_beam(hs_list, lpz_list, recog_args, char_list, rnnlm)
return y
def recognize_batch(self, xs_list, recog_args, char_list, rnnlm=None):
"""E2E beam search.
:param list xs_list: list of list of input acoustic feature arrays
[[(T1_1, D), (T1_2, D), ...],[(T2_1, D), (T2_2, D), ...], ...]
:param Namespace recog_args: argument Namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
prev = self.training
self.eval()
ilens_list = [
np.fromiter((xx.shape[0] for xx in xs_list[idx]), dtype=np.int64)
for idx in range(self.num_encs)
]
# subsample frame
xs_list = [
[xx[:: self.subsample_list[idx][0], :] for xx in xs_list[idx]]
for idx in range(self.num_encs)
]
xs_list = [
[to_device(self, to_torch_tensor(xx).float()) for xx in xs_list[idx]]
for idx in range(self.num_encs)
]
xs_pad_list = [pad_list(xs_list[idx], 0.0) for idx in range(self.num_encs)]
# 1. Encoder
hs_pad_list, hlens_list = [], []
for idx in range(self.num_encs):
hs_pad, hlens, _ = self.enc[idx](xs_pad_list[idx], ilens_list[idx])
hs_pad_list.append(hs_pad)
hlens_list.append(hlens)
# calculate log P(z_t|X) for CTC scores
if recog_args.ctc_weight > 0.0:
if self.share_ctc:
lpz_list = [
self.ctc[0].log_softmax(hs_pad_list[idx])
for idx in range(self.num_encs)
]
else:
lpz_list = [
self.ctc[idx].log_softmax(hs_pad_list[idx])
for idx in range(self.num_encs)
]
normalize_score = False
else:
lpz_list = None
normalize_score = True
# 2. Decoder
hlens_list = [
torch.tensor(list(map(int, hlens_list[idx])))
for idx in range(self.num_encs)
] # make sure hlens is tensor
y = self.dec.recognize_beam_batch(
hs_pad_list,
hlens_list,
lpz_list,
recog_args,
char_list,
rnnlm,
normalize_score=normalize_score,
)
if prev:
self.train()
return y
def calculate_all_attentions(self, xs_pad_list, ilens_list, ys_pad):
"""E2E attention calculation.
:param List xs_pad_list: list of batch (torch.Tensor) of padded input sequences
[(B, Tmax_1, idim), (B, Tmax_2, idim),..]
:param List ilens_list:
list of batch (torch.Tensor) of lengths of input sequences [(B), (B), ..]
:param torch.Tensor ys_pad:
batch of padded character id sequence tensor (B, Lmax)
:return: attention weights with the following shape,
1) multi-head case => attention weights (B, H, Lmax, Tmax),
2) multi-encoder case
=> [(B, Lmax, Tmax1), (B, Lmax, Tmax2), ..., (B, Lmax, NumEncs)]
3) other case => attention weights (B, Lmax, Tmax).
:rtype: float ndarray or list
"""
self.eval()
with torch.no_grad():
# 1. Encoder
if self.replace_sos:
tgt_lang_ids = ys_pad[:, 0:1]
ys_pad = ys_pad[:, 1:] # remove target language ID in the beginning
else:
tgt_lang_ids = None
hs_pad_list, hlens_list = [], []
for idx in range(self.num_encs):
hs_pad, hlens, _ = self.enc[idx](xs_pad_list[idx], ilens_list[idx])
hs_pad_list.append(hs_pad)
hlens_list.append(hlens)
# 2. Decoder
att_ws = self.dec.calculate_all_attentions(
hs_pad_list, hlens_list, ys_pad, lang_ids=tgt_lang_ids
)
self.train()
return att_ws
def calculate_all_ctc_probs(self, xs_pad_list, ilens_list, ys_pad):
"""E2E CTC probability calculation.
:param List xs_pad_list: list of batch (torch.Tensor) of padded input sequences
[(B, Tmax_1, idim), (B, Tmax_2, idim),..]
:param List ilens_list:
list of batch (torch.Tensor) of lengths of input sequences [(B), (B), ..]
:param torch.Tensor ys_pad:
batch of padded character id sequence tensor (B, Lmax)
:return: CTC probability (B, Tmax, vocab)
:rtype: float ndarray or list
"""
probs_list = [None]
if self.mtlalpha == 0:
return probs_list
self.eval()
probs_list = []
with torch.no_grad():
# 1. Encoder
for idx in range(self.num_encs):
hs_pad, hlens, _ = self.enc[idx](xs_pad_list[idx], ilens_list[idx])
# 2. CTC loss
ctc_idx = 0 if self.share_ctc else idx
probs = self.ctc[ctc_idx].softmax(hs_pad).cpu().numpy()
probs_list.append(probs)
self.train()
return probs_list
| 35.378652 | 88 | 0.523422 |
8a19fc34804be4ef0f39509118cf6fa5e2cd93b9 | 6,514 | py | Python | scrapers/GenericScraper.py | jonmsawyer/jscraper | ab10db6ba180586fca049141715dd687013edc42 | [
"MIT"
] | null | null | null | scrapers/GenericScraper.py | jonmsawyer/jscraper | ab10db6ba180586fca049141715dd687013edc42 | [
"MIT"
] | null | null | null | scrapers/GenericScraper.py | jonmsawyer/jscraper | ab10db6ba180586fca049141715dd687013edc42 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
'''GenericScraper is a derived class of TemplateScraper for driving the generic image scraper. This
image scraper will attempt to scrape any image off of any URI resource using common known methods
to download those images.
'''
# pylint: disable=ungrouped-imports
import os
import sys
import argparse
try:
from scrapers.TemplateScraper import TemplateScraper
from scrapers.ScraperDriver import ScraperDriver
except (ModuleNotFoundError, ImportError):
from TemplateScraper import TemplateScraper
from ScraperDriver import ScraperDriver
try:
from scrapers import SALC
except (ModuleNotFoundError, ImportError):
from __init__ import SALC
class GenericScraper(TemplateScraper):
"""Generic scraper class."""
name = 'generic'
filename = os.path.basename(__file__)
def __init__(self, driver, *args, **kwargs):
super().__init__(driver, self.name, *args, **kwargs)
self.log('name from GenericScraper():', self.name)
def parse_arguments(self):
'''Get the arguments parser and add arguments to it. Then parse `args` with the parser
definition defined in the base class to obtain an `options` dict.
'''
self.parser = argparse.ArgumentParser(prog=self.prog,
formatter_class=argparse.RawTextHelpFormatter,
description='''\
Scrape one or more URI resources for images.''',
epilog='''\
examples:
$ python %(prog)s \\
https://foo.example.com/
Scrape all images located at,
https://foo.example.com/, but do not recurse into
any other sub-resources.
$ python %(prog)s \\
-cf scraper.conf \\
http://foo.example.com/
Read in all configuration options from the
configuration file `scraper.conf', then scrape all
images from http://foo.example.com/.
$ python %(prog)s \\
-nr 4 \\
/usr/share/icons
Scrape all images located at the local directory,
/usr/share/icons, and recursively grab all
sub-resources under that directory downloading 4
simultaneous images until completed.
$ python %(prog)s \\
-a ScraperBot \\
--cookie=cookie.txt \\
-od images \\
-e jpg,png \\
-lt site \\
-lp http://foo.example.com/login \\
-l 512k \\
--no-follow \\
--names="^img[0-9]+" \\
-un myuser \\
-pw mypass \\
-r \\
-of log.txt \\
-s 10 \\
-w 7 \\
-nr 10 \\
http://foo.example.com/
Recursively scrape all images and sub-resources at
http://foo.example.com with the "ScraperBot" user
agent, using the cookie file "cookie.txt", storing
downloaded resources to local directory "images",
limiting all downloads to the jpg and png
extensions, with login type of "site", using
http://foo.example.com/login as the login page,
rate limiting downloads to 512Kbps, scraping
images no referrer URI (no follow), limiting file
names to grab to img#[#].{{jpg,png}}, with username
as "myuser" with password "mypass", outputting the
generated log file to "log.txt", sleeping between
0 and 10 seconds between each sub-resource get,
waiting between 0 and 7 seconds for each resource,
all while downloading 10 simultaneous resources
at once until completed.
$ python %(prog)s
-un anonymous \\
ftps://bar.example.com/images
Recursively scrape all images at the ftps resource
located at bar.example.com/images, anonymously.
$ python %(prog)s \\
-r \\
http://foo.bar.com/images \\
http://bar.example.com/images/mirror1 \\
http://qux.example.com/images/backup2
Recursively scrape http://foo.bar.com/images,
http://bar.example.com/images/mirror1, and
http://qux.example.com/images/backup2 into the
local current working directory.
$ python %(prog)s --version
Display the version of scraper.py and exit.
$ python %(prog)s --help
Show this help message for the generic scraper
and exit
{SALC}'''.format(SALC=SALC))
super().parse_arguments()
@staticmethod
def sub_parser(subparsers):
'''A subparser is passed in as `subparsers`. Add a new subparser to the `subparsers` object
then return that subparser. See `argparse.ArgumentsParser` for details.
'''
parser = subparsers.add_parser('generic',
help=('Invoke the generic scraper to scrape images off '
'of any URI resource. This is a general scraper '
'and may not grab every image.'))
return parser
def handle(self):
'''Main class method that drives the work on scraping the images for this GenericScraper.
'''
self.write('Args:', self.args)
self.write('Parser:', self.parser)
self.write('Parsed options:', self.options)
self.write('')
self.write('This is the GenericScraper.')
if __name__ == '__main__':
# If GenericScraper was invoked via the command line, initialize a driver and obtain the
# GenericScraper, then execute the main handle() method.
driver = ScraperDriver(*sys.argv)
driver.log('Args:', sys.argv)
scraper = GenericScraper(driver)
driver.log('scraper =', scraper)
scraper.handle()
| 40.459627 | 100 | 0.534695 |
5b5e137c2e0d90fadc30a66dcead441675992fcf | 859 | py | Python | test/test_ldap_login.py | jazlee/centric-ibridge | 27158ff64e742f30d789bd46123cb7af7c81d975 | [
"Apache-2.0"
] | null | null | null | test/test_ldap_login.py | jazlee/centric-ibridge | 27158ff64e742f30d789bd46123cb7af7c81d975 | [
"Apache-2.0"
] | 1 | 2022-02-04T06:59:12.000Z | 2022-02-04T06:59:12.000Z | test/test_ldap_login.py | jazlee/centric-ibridge | 27158ff64e742f30d789bd46123cb7af7c81d975 | [
"Apache-2.0"
] | 4 | 2022-02-02T16:51:01.000Z | 2022-02-03T04:28:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2022 Busana Apparel Group. All rights reserved.
#
# This product and it's source code is protected by patents, copyright laws and
# international copyright treaties, as well as other intellectual property
# laws and treaties. The product is licensed, not sold.
#
# The source code and sample programs in this package or parts hereof
# as well as the documentation shall not be copied, modified or redistributed
# without permission, explicit or implied, of the author.
#
# This module is part of Centric PLM Integration Bridge and is released under
# the Apache-2.0 License: https://www.apache.org/licenses/LICENSE-2.0
import ldap
from unittest import TestCase
class LDAPLoginTest(TestCase):
ldap_connection = None
def setUp(self) -> None:
self.ldap_connection = ldap.open()
| 33.038462 | 79 | 0.750873 |
1e4dd076dab0f442656b89469d9ad47b6eb0790b | 1,345 | py | Python | src/cui/uploader/command/repository/Deleter.py | ytyaru0/GitHub.Uploader.Pi3.Https.201802280700 | 0d31c37f42f1b2dde8185563b085bd7997d171cd | [
"CC0-1.0"
] | null | null | null | src/cui/uploader/command/repository/Deleter.py | ytyaru0/GitHub.Uploader.Pi3.Https.201802280700 | 0d31c37f42f1b2dde8185563b085bd7997d171cd | [
"CC0-1.0"
] | null | null | null | src/cui/uploader/command/repository/Deleter.py | ytyaru0/GitHub.Uploader.Pi3.Https.201802280700 | 0d31c37f42f1b2dde8185563b085bd7997d171cd | [
"CC0-1.0"
] | null | null | null | #!python3
#encoding:utf-8
import os.path
from web.log.Log import Log
from database.Database import Database as Db
class Deleter:
def __init__(self, client, args):
self.__client = client
self.__args = args
self.__userRepo = Db().Repositories[self.__args.username]
self.__repo_name = os.path.basename(self.__args.path_dir_pj)
def ShowDeleteRecords(self):
repo = self.__userRepo['Repositories'].find_one(Name=self.__repo_name)
Log().Logger.info(repo)
Log().Logger.info(self.__userRepo['Counts'].find_one(RepositoryId=repo['Id']))
for record in self.__userRepo['Languages'].find(RepositoryId=repo['Id']):
Log().Logger.info(record)
def Delete(self):
self.__DeleteLocalRepository()
self.__client.Repositories.delete()
self.__DeleteDb()
def __DeleteLocalRepository(self):
import shutil
shutil.rmtree(os.path.join(self.__args.path_dir_pj, '.git'))
def __DeleteDb(self):
repo = self.__userRepo['Repositories'].find_one(Name=self.__repo_name)
self.__userRepo.begin()
self.__userRepo['Languages'].delete(RepositoryId=repo['Id'])
self.__userRepo['Counts'].delete(RepositoryId=repo['Id'])
self.__userRepo['Repositories'].delete(Id=repo['Id'])
self.__userRepo.commit()
| 35.394737 | 86 | 0.672119 |
397fe893623b3f0550846779a90778d3e87cf8d7 | 459 | py | Python | corehq/ex-submodules/casexml/apps/case/tests/test_const.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 471 | 2015-01-10T02:55:01.000Z | 2022-03-29T18:07:18.000Z | corehq/ex-submodules/casexml/apps/case/tests/test_const.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 14,354 | 2015-01-01T07:38:23.000Z | 2022-03-31T20:55:14.000Z | corehq/ex-submodules/casexml/apps/case/tests/test_const.py | dimagilg/commcare-hq | ea1786238eae556bb7f1cbd8d2460171af1b619c | [
"BSD-3-Clause"
] | 175 | 2015-01-06T07:16:47.000Z | 2022-03-29T13:27:01.000Z | from datetime import datetime
# these match properties in the xml
ORIGINAL_DATE = datetime(2010, 6, 29, 13, 42, 50)
MODIFY_DATE = datetime(2010, 6, 30, 13, 42, 50)
MODIFY_2_DATE = datetime(2010, 7, 4, 13, 42, 50)
UPDATE_DATE = datetime(2010, 5, 12, 13, 42, 50)
CLOSE_DATE = datetime(2010, 7, 1, 13, 42, 50)
REFER_DATE = datetime(2010, 7, 2, 13, 42, 50)
REFER_DATE_UPDATE = datetime(2010, 7, 5, 13, 42, 50)
REFER_DATE_CLOSE = datetime(2010, 7, 6, 13, 42, 50)
| 38.25 | 52 | 0.69281 |
9887ff673529314804a2c875d1956e1bda68c3ed | 62,550 | py | Python | compose/cli/main.py | YvesGaelCheny/compose | 63be435515f60e2af4d27d7462b4c6fbe92fe7af | [
"Apache-2.0"
] | null | null | null | compose/cli/main.py | YvesGaelCheny/compose | 63be435515f60e2af4d27d7462b4c6fbe92fe7af | [
"Apache-2.0"
] | null | null | null | compose/cli/main.py | YvesGaelCheny/compose | 63be435515f60e2af4d27d7462b4c6fbe92fe7af | [
"Apache-2.0"
] | null | null | null | import contextlib
import functools
import json
import logging
import pipes
import re
import subprocess
import sys
from distutils.spawn import find_executable
from inspect import getdoc
from operator import attrgetter
import docker.errors
import docker.utils
from . import errors
from . import signals
from .. import __version__
from ..config import ConfigurationError
from ..config import parse_environment
from ..config import parse_labels
from ..config import resolve_build_args
from ..config.environment import Environment
from ..config.serialize import serialize_config
from ..config.types import VolumeSpec
from ..const import IS_WINDOWS_PLATFORM
from ..errors import StreamParseError
from ..metrics.decorator import metrics
from ..parallel import ParallelStreamWriter
from ..progress_stream import StreamOutputError
from ..project import get_image_digests
from ..project import MissingDigests
from ..project import NoSuchService
from ..project import OneOffFilter
from ..project import ProjectError
from ..service import BuildAction
from ..service import BuildError
from ..service import ConvergenceStrategy
from ..service import ImageType
from ..service import NeedsBuildError
from ..service import OperationFailedError
from ..utils import filter_attached_for_up
from .colors import AnsiMode
from .command import get_config_from_options
from .command import get_project_dir
from .command import project_from_options
from .docopt_command import DocoptDispatcher
from .docopt_command import get_handler
from .docopt_command import NoSuchCommand
from .errors import UserError
from .formatter import ConsoleWarningFormatter
from .formatter import Formatter
from .log_printer import build_log_presenters
from .log_printer import LogPrinter
from .utils import get_version_info
from .utils import human_readable_file_size
from .utils import yesno
from compose.metrics.client import MetricsCommand
from compose.metrics.client import Status
if not IS_WINDOWS_PLATFORM:
from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation
log = logging.getLogger(__name__)
def main(): # noqa: C901
signals.ignore_sigpipe()
command = None
try:
_, opts, command = DocoptDispatcher.get_command_and_options(
TopLevelCommand,
get_filtered_args(sys.argv[1:]),
{'options_first': True, 'version': get_version_info('compose')})
except Exception:
pass
try:
command_func = dispatch()
command_func()
except (KeyboardInterrupt, signals.ShutdownException):
exit_with_metrics(command, "Aborting.", status=Status.FAILURE)
except (UserError, NoSuchService, ConfigurationError,
ProjectError, OperationFailedError) as e:
exit_with_metrics(command, e.msg, status=Status.FAILURE)
except BuildError as e:
reason = ""
if e.reason:
reason = " : " + e.reason
exit_with_metrics(command,
"Service '{}' failed to build{}".format(e.service.name, reason),
status=Status.FAILURE)
except StreamOutputError as e:
exit_with_metrics(command, e, status=Status.FAILURE)
except NeedsBuildError as e:
exit_with_metrics(command,
"Service '{}' needs to be built, but --no-build was passed.".format(
e.service.name), status=Status.FAILURE)
except NoSuchCommand as e:
commands = "\n".join(parse_doc_section("commands:", getdoc(e.supercommand)))
exit_with_metrics(e.command, "No such command: {}\n\n{}".format(e.command, commands))
except (errors.ConnectionError, StreamParseError):
exit_with_metrics(command, status=Status.FAILURE)
except SystemExit as e:
status = Status.SUCCESS
if len(sys.argv) > 1 and '--help' not in sys.argv:
status = Status.FAILURE
if command and len(sys.argv) >= 3 and sys.argv[2] == '--help':
command = '--help ' + command
if not command and len(sys.argv) >= 2 and sys.argv[1] == '--help':
command = '--help'
msg = e.args[0] if len(e.args) else ""
code = 0
if isinstance(e.code, int):
code = e.code
exit_with_metrics(command, log_msg=msg, status=status,
exit_code=code)
def get_filtered_args(args):
if args[0] in ('-h', '--help'):
return []
if args[0] == '--version':
return ['version']
def exit_with_metrics(command, log_msg=None, status=Status.SUCCESS, exit_code=1):
if log_msg:
if not exit_code:
log.info(log_msg)
else:
log.error(log_msg)
MetricsCommand(command, status=status).send_metrics()
sys.exit(exit_code)
def dispatch():
console_stream = sys.stderr
console_handler = logging.StreamHandler(console_stream)
setup_logging(console_handler)
dispatcher = DocoptDispatcher(
TopLevelCommand,
{'options_first': True, 'version': get_version_info('compose')})
options, handler, command_options = dispatcher.parse(sys.argv[1:])
ansi_mode = AnsiMode.AUTO
try:
if options.get("--ansi"):
ansi_mode = AnsiMode(options.get("--ansi"))
except ValueError:
raise UserError(
'Invalid value for --ansi: {}. Expected one of {}.'.format(
options.get("--ansi"),
', '.join(m.value for m in AnsiMode)
)
)
if options.get("--no-ansi"):
if options.get("--ansi"):
raise UserError("--no-ansi and --ansi cannot be combined.")
log.warning('--no-ansi option is deprecated and will be removed in future versions.')
ansi_mode = AnsiMode.NEVER
setup_console_handler(console_handler,
options.get('--verbose'),
ansi_mode.use_ansi_codes(console_handler.stream),
options.get("--log-level"))
setup_parallel_logger(ansi_mode)
if ansi_mode is AnsiMode.NEVER:
command_options['--no-color'] = True
return functools.partial(perform_command, options, handler, command_options)
def perform_command(options, handler, command_options):
if options['COMMAND'] in ('help', 'version'):
# Skip looking up the compose file.
handler(command_options)
return
if options['COMMAND'] == 'config':
command = TopLevelCommand(None, options=options)
handler(command, command_options)
return
project = project_from_options('.', options)
command = TopLevelCommand(project, options=options)
with errors.handle_connection_errors(project.client):
handler(command, command_options)
def setup_logging(console_handler):
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.DEBUG)
# Disable requests and docker-py logging
logging.getLogger("urllib3").propagate = False
logging.getLogger("requests").propagate = False
logging.getLogger("docker").propagate = False
def setup_parallel_logger(ansi_mode):
ParallelStreamWriter.set_default_ansi_mode(ansi_mode)
def setup_console_handler(handler, verbose, use_console_formatter=True, level=None):
if use_console_formatter:
format_class = ConsoleWarningFormatter
else:
format_class = logging.Formatter
if verbose:
handler.setFormatter(format_class('%(name)s.%(funcName)s: %(message)s'))
loglevel = logging.DEBUG
else:
handler.setFormatter(format_class())
loglevel = logging.INFO
if level is not None:
levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL,
}
loglevel = levels.get(level.upper())
if loglevel is None:
raise UserError(
'Invalid value for --log-level. Expected one of DEBUG, INFO, WARNING, ERROR, CRITICAL.'
)
handler.setLevel(loglevel)
# stolen from docopt master
def parse_doc_section(name, source):
pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
re.IGNORECASE | re.MULTILINE)
return [s.strip() for s in pattern.findall(source)]
class TopLevelCommand:
"""Define and run multi-container applications with Docker.
Usage:
docker-compose [-f <arg>...] [--profile <name>...] [options] [--] [COMMAND] [ARGS...]
docker-compose -h|--help
Options:
-f, --file FILE Specify an alternate compose file
(default: docker-compose.yml)
-p, --project-name NAME Specify an alternate project name
(default: directory name)
--profile NAME Specify a profile to enable
-c, --context NAME Specify a context name
--verbose Show more output
--log-level LEVEL Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
--ansi (never|always|auto) Control when to print ANSI control characters
--no-ansi Do not print ANSI control characters (DEPRECATED)
-v, --version Print version and exit
-H, --host HOST Daemon socket to connect to
--tls Use TLS; implied by --tlsverify
--tlscacert CA_PATH Trust certs signed only by this CA
--tlscert CLIENT_CERT_PATH Path to TLS certificate file
--tlskey TLS_KEY_PATH Path to TLS key file
--tlsverify Use TLS and verify the remote
--skip-hostname-check Don't check the daemon's hostname against the
name specified in the client certificate
--project-directory PATH Specify an alternate working directory
(default: the path of the Compose file)
--compatibility If set, Compose will attempt to convert keys
in v3 files to their non-Swarm equivalent (DEPRECATED)
--env-file PATH Specify an alternate environment file
Commands:
build Build or rebuild services
config Validate and view the Compose file
create Create services
down Stop and remove resources
events Receive real time events from containers
exec Execute a command in a running container
help Get help on a command
images List images
kill Kill containers
logs View output from containers
pause Pause services
port Print the public port for a port binding
ps List containers
pull Pull service images
push Push service images
restart Restart services
rm Remove stopped containers
run Run a one-off command
scale Set number of containers for a service
start Start services
stop Stop services
top Display the running processes
unpause Unpause services
up Create and start containers
version Show version information and quit
"""
def __init__(self, project, options=None):
self.project = project
self.toplevel_options = options or {}
@property
def project_dir(self):
return get_project_dir(self.toplevel_options)
@property
def toplevel_environment(self):
environment_file = self.toplevel_options.get('--env-file')
return Environment.from_env_file(self.project_dir, environment_file)
@metrics()
def build(self, options):
"""
Build or rebuild services.
Services are built once and then tagged as `project_service`,
e.g. `composetest_db`. If you change a service's `Dockerfile` or the
contents of its build directory, you can run `docker-compose build` to rebuild it.
Usage: build [options] [--build-arg key=val...] [--] [SERVICE...]
Options:
--build-arg key=val Set build-time variables for services.
--compress Compress the build context using gzip.
--force-rm Always remove intermediate containers.
-m, --memory MEM Set memory limit for the build container.
--no-cache Do not use cache when building the image.
--no-rm Do not remove intermediate containers after a successful build.
--parallel Build images in parallel.
--progress string Set type of progress output (auto, plain, tty).
--pull Always attempt to pull a newer version of the image.
-q, --quiet Don't print anything to STDOUT
"""
service_names = options['SERVICE']
build_args = options.get('--build-arg', None)
if build_args:
if not service_names and docker.utils.version_lt(self.project.client.api_version, '1.25'):
raise UserError(
'--build-arg is only supported when services are specified for API version < 1.25.'
' Please use a Compose file version > 2.2 or specify which services to build.'
)
build_args = resolve_build_args(build_args, self.toplevel_environment)
native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD', True)
self.project.build(
service_names=options['SERVICE'],
no_cache=bool(options.get('--no-cache', False)),
pull=bool(options.get('--pull', False)),
force_rm=bool(options.get('--force-rm', False)),
memory=options.get('--memory'),
rm=not bool(options.get('--no-rm', False)),
build_args=build_args,
gzip=options.get('--compress', False),
parallel_build=options.get('--parallel', False),
silent=options.get('--quiet', False),
cli=native_builder,
progress=options.get('--progress'),
)
@metrics()
def config(self, options):
"""
Validate and view the Compose file.
Usage: config [options]
Options:
--resolve-image-digests Pin image tags to digests.
--no-interpolate Don't interpolate environment variables.
-q, --quiet Only validate the configuration, don't print
anything.
--services Print the service names, one per line.
--volumes Print the volume names, one per line.
--hash="*" Print the service config hash, one per line.
Set "service1,service2" for a list of specified services
or use the wildcard symbol to display all services.
"""
additional_options = {'--no-interpolate': options.get('--no-interpolate')}
compose_config = get_config_from_options('.', self.toplevel_options, additional_options)
image_digests = None
if options['--resolve-image-digests']:
self.project = project_from_options('.', self.toplevel_options, additional_options)
with errors.handle_connection_errors(self.project.client):
image_digests = image_digests_for_project(self.project)
if options['--quiet']:
return
if options['--services']:
print('\n'.join(service['name'] for service in compose_config.services))
return
if options['--volumes']:
print('\n'.join(volume for volume in compose_config.volumes))
return
if options['--hash'] is not None:
h = options['--hash']
self.project = project_from_options('.', self.toplevel_options, additional_options)
services = [svc for svc in options['--hash'].split(',')] if h != '*' else None
with errors.handle_connection_errors(self.project.client):
for service in self.project.get_services(services):
print('{} {}'.format(service.name, service.config_hash))
return
print(serialize_config(compose_config, image_digests, not options['--no-interpolate']))
@metrics()
def create(self, options):
"""
Creates containers for a service.
This command is deprecated. Use the `up` command with `--no-start` instead.
Usage: create [options] [SERVICE...]
Options:
--force-recreate Recreate containers even if their configuration and
image haven't changed. Incompatible with --no-recreate.
--no-recreate If containers already exist, don't recreate them.
Incompatible with --force-recreate.
--no-build Don't build an image, even if it's missing.
--build Build images before creating containers.
"""
service_names = options['SERVICE']
log.warning(
'The create command is deprecated. '
'Use the up command with the --no-start flag instead.'
)
self.project.create(
service_names=service_names,
strategy=convergence_strategy_from_opts(options),
do_build=build_action_from_opts(options),
)
@metrics()
def down(self, options):
"""
Stops containers and removes containers, networks, volumes, and images
created by `up`.
By default, the only things removed are:
- Containers for services defined in the Compose file
- Networks defined in the `networks` section of the Compose file
- The default network, if one is used
Networks and volumes defined as `external` are never removed.
Usage: down [options]
Options:
--rmi type Remove images. Type must be one of:
'all': Remove all images used by any service.
'local': Remove only images that don't have a
custom tag set by the `image` field.
-v, --volumes Remove named volumes declared in the `volumes`
section of the Compose file and anonymous volumes
attached to containers.
--remove-orphans Remove containers for services not defined in the
Compose file
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
if ignore_orphans and options['--remove-orphans']:
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
image_type = image_type_from_opt('--rmi', options['--rmi'])
timeout = timeout_from_opts(options)
self.project.down(
image_type,
options['--volumes'],
options['--remove-orphans'],
timeout=timeout,
ignore_orphans=ignore_orphans)
def events(self, options):
"""
Receive real time events from containers.
Usage: events [options] [--] [SERVICE...]
Options:
--json Output events as a stream of json objects
"""
def format_event(event):
attributes = ["%s=%s" % item for item in event['attributes'].items()]
return ("{time} {type} {action} {id} ({attrs})").format(
attrs=", ".join(sorted(attributes)),
**event)
def json_format_event(event):
event['time'] = event['time'].isoformat()
event.pop('container')
return json.dumps(event)
for event in self.project.events():
formatter = json_format_event if options['--json'] else format_event
print(formatter(event))
sys.stdout.flush()
@metrics("exec")
def exec_command(self, options):
"""
Execute a command in a running container
Usage: exec [options] [-e KEY=VAL...] [--] SERVICE COMMAND [ARGS...]
Options:
-d, --detach Detached mode: Run command in the background.
--privileged Give extended privileges to the process.
-u, --user USER Run the command as this user.
-T Disable pseudo-tty allocation. By default `docker-compose exec`
allocates a TTY.
--index=index index of the container if there are multiple
instances of a service [default: 1]
-e, --env KEY=VAL Set environment variables (can be used multiple times,
not supported in API < 1.25)
-w, --workdir DIR Path to workdir directory for this command.
"""
use_cli = not self.toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
index = int(options.get('--index'))
service = self.project.get_service(options['SERVICE'])
detach = options.get('--detach')
if options['--env'] and docker.utils.version_lt(self.project.client.api_version, '1.25'):
raise UserError("Setting environment for exec is not supported in API < 1.25 (%s)"
% self.project.client.api_version)
if options['--workdir'] and docker.utils.version_lt(self.project.client.api_version, '1.35'):
raise UserError("Setting workdir for exec is not supported in API < 1.35 (%s)"
% self.project.client.api_version)
try:
container = service.get_container(number=index)
except ValueError as e:
raise UserError(str(e))
command = [options['COMMAND']] + options['ARGS']
tty = not options["-T"]
if IS_WINDOWS_PLATFORM or use_cli and not detach:
sys.exit(call_docker(
build_exec_command(options, container.id, command),
self.toplevel_options, self.toplevel_environment)
)
create_exec_options = {
"privileged": options["--privileged"],
"user": options["--user"],
"tty": tty,
"stdin": True,
"workdir": options["--workdir"],
}
if docker.utils.version_gte(self.project.client.api_version, '1.25'):
create_exec_options["environment"] = options["--env"]
exec_id = container.create_exec(command, **create_exec_options)
if detach:
container.start_exec(exec_id, tty=tty, stream=True)
return
signals.set_signal_handler_to_shutdown()
try:
operation = ExecOperation(
self.project.client,
exec_id,
interactive=tty,
)
pty = PseudoTerminal(self.project.client, operation)
pty.start()
except signals.ShutdownException:
log.info("received shutdown exception: closing")
exit_code = self.project.client.exec_inspect(exec_id).get("ExitCode")
sys.exit(exit_code)
@classmethod
@metrics()
def help(cls, options):
"""
Get help on a command.
Usage: help [COMMAND]
"""
if options['COMMAND']:
subject = get_handler(cls, options['COMMAND'])
else:
subject = cls
print(getdoc(subject))
@metrics()
def images(self, options):
"""
List images used by the created containers.
Usage: images [options] [--] [SERVICE...]
Options:
-q, --quiet Only display IDs
"""
containers = sorted(
self.project.containers(service_names=options['SERVICE'], stopped=True) +
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
key=attrgetter('name'))
if options['--quiet']:
for image in {c.image for c in containers}:
print(image.split(':')[1])
return
def add_default_tag(img_name):
if ':' not in img_name.split('/')[-1]:
return '{}:latest'.format(img_name)
return img_name
headers = [
'Container',
'Repository',
'Tag',
'Image Id',
'Size'
]
rows = []
for container in containers:
image_config = container.image_config
service = self.project.get_service(container.service)
index = 0
img_name = add_default_tag(service.image_name)
if img_name in image_config['RepoTags']:
index = image_config['RepoTags'].index(img_name)
repo_tags = (
image_config['RepoTags'][index].rsplit(':', 1) if image_config['RepoTags']
else ('<none>', '<none>')
)
image_id = image_config['Id'].split(':')[1][:12]
size = human_readable_file_size(image_config['Size'])
rows.append([
container.name,
repo_tags[0],
repo_tags[1],
image_id,
size
])
print(Formatter.table(headers, rows))
@metrics()
def kill(self, options):
"""
Force stop service containers.
Usage: kill [options] [--] [SERVICE...]
Options:
-s SIGNAL SIGNAL to send to the container.
Default signal is SIGKILL.
"""
signal = options.get('-s', 'SIGKILL')
self.project.kill(service_names=options['SERVICE'], signal=signal)
@metrics()
def logs(self, options):
"""
View output from containers.
Usage: logs [options] [--] [SERVICE...]
Options:
--no-color Produce monochrome output.
-f, --follow Follow log output.
-t, --timestamps Show timestamps.
--tail="all" Number of lines to show from the end of the logs
for each container.
--no-log-prefix Don't print prefix in logs.
--grep="something" Simple grep on an ascii string
"""
containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
tail = options['--tail']
if tail is not None:
if tail.isdigit():
tail = int(tail)
elif tail != 'all':
raise UserError("tail flag must be all or a number")
grep = options['--grep']
if grep is not None:
if type(grep) == str:
grep = str(grep)
else:
raise UserError("grep value must be a valide string")
log_args = {
'follow': options['--follow'],
'tail': tail,
'timestamps': options['--timestamps'],
'grep': grep
}
print("Attaching to", list_containers(containers))
log_printer_from_project(
self.project,
containers,
options['--no-color'],
log_args,
event_stream=self.project.events(service_names=options['SERVICE']),
keep_prefix=not options['--no-log-prefix']).run()
@metrics()
def pause(self, options):
"""
Pause services.
Usage: pause [SERVICE...]
"""
containers = self.project.pause(service_names=options['SERVICE'])
exit_if(not containers, 'No containers to pause', 1)
@metrics()
def port(self, options):
"""
Print the public port for a port binding.
Usage: port [options] [--] SERVICE PRIVATE_PORT
Options:
--protocol=proto tcp or udp [default: tcp]
--index=index index of the container if there are multiple
instances of a service [default: 1]
"""
index = int(options.get('--index'))
service = self.project.get_service(options['SERVICE'])
try:
container = service.get_container(number=index)
except ValueError as e:
raise UserError(str(e))
print(container.get_local_port(
options['PRIVATE_PORT'],
protocol=options.get('--protocol') or 'tcp') or '')
@metrics()
def ps(self, options):
"""
List containers.
Usage: ps [options] [--] [SERVICE...]
Options:
-q, --quiet Only display IDs
--services Display services
--filter KEY=VAL Filter services by a property
-a, --all Show all stopped containers (including those created by the run command)
"""
if options['--quiet'] and options['--services']:
raise UserError('--quiet and --services cannot be combined')
if options['--services']:
filt = build_filter(options.get('--filter'))
services = self.project.services
if filt:
services = filter_services(filt, services, self.project)
print('\n'.join(service.name for service in services))
return
if options['--all']:
containers = sorted(self.project.containers(service_names=options['SERVICE'],
one_off=OneOffFilter.include, stopped=True),
key=attrgetter('name'))
else:
containers = sorted(
self.project.containers(service_names=options['SERVICE'], stopped=True) +
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
key=attrgetter('name'))
if options['--quiet']:
for container in containers:
print(container.id)
else:
headers = [
'Name',
'Command',
'State',
'Ports',
]
rows = []
for container in containers:
command = container.human_readable_command
if len(command) > 30:
command = '%s ...' % command[:26]
rows.append([
container.name,
command,
container.human_readable_state,
container.human_readable_ports,
])
print(Formatter.table(headers, rows))
@metrics()
def pull(self, options):
"""
Pulls images for services defined in a Compose file, but does not start the containers.
Usage: pull [options] [--] [SERVICE...]
Options:
--ignore-pull-failures Pull what it can and ignores images with pull failures.
--parallel Deprecated, pull multiple images in parallel (enabled by default).
--no-parallel Disable parallel pulling.
-q, --quiet Pull without printing progress information
--include-deps Also pull services declared as dependencies
"""
if options.get('--parallel'):
log.warning('--parallel option is deprecated and will be removed in future versions.')
self.project.pull(
service_names=options['SERVICE'],
ignore_pull_failures=options.get('--ignore-pull-failures'),
parallel_pull=not options.get('--no-parallel'),
silent=options.get('--quiet'),
include_deps=options.get('--include-deps'),
)
@metrics()
def push(self, options):
"""
Pushes images for services.
Usage: push [options] [--] [SERVICE...]
Options:
--ignore-push-failures Push what it can and ignores images with push failures.
"""
self.project.push(
service_names=options['SERVICE'],
ignore_push_failures=options.get('--ignore-push-failures')
)
@metrics()
def rm(self, options):
"""
Removes stopped service containers.
By default, anonymous volumes attached to containers will not be removed. You
can override this with `-v`. To list all volumes, use `docker volume ls`.
Any data which is not in a volume will be lost.
Usage: rm [options] [--] [SERVICE...]
Options:
-f, --force Don't ask to confirm removal
-s, --stop Stop the containers, if required, before removing
-v Remove any anonymous volumes attached to containers
-a, --all Deprecated - no effect.
"""
if options.get('--all'):
log.warning(
'--all flag is obsolete. This is now the default behavior '
'of `docker-compose rm`'
)
one_off = OneOffFilter.include
if options.get('--stop'):
self.project.stop(service_names=options['SERVICE'], one_off=one_off)
all_containers = self.project.containers(
service_names=options['SERVICE'], stopped=True, one_off=one_off
)
stopped_containers = [c for c in all_containers if not c.is_running]
if len(stopped_containers) > 0:
print("Going to remove", list_containers(stopped_containers))
if options.get('--force') \
or yesno("Are you sure? [yN] ", default=False):
self.project.remove_stopped(
service_names=options['SERVICE'],
v=options.get('-v', False),
one_off=one_off
)
else:
print("No stopped containers")
@metrics()
def run(self, options):
"""
Run a one-off command on a service.
For example:
$ docker-compose run web python manage.py shell
By default, linked services will be started, unless they are already
running. If you do not want to start linked services, use
`docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
Usage:
run [options] [-v VOLUME...] [-p PORT...] [-e KEY=VAL...] [-l KEY=VALUE...] [--]
SERVICE [COMMAND] [ARGS...]
Options:
-d, --detach Detached mode: Run container in the background, print
new container name.
--name NAME Assign a name to the container
--entrypoint CMD Override the entrypoint of the image.
-e KEY=VAL Set an environment variable (can be used multiple times)
-l, --label KEY=VAL Add or override a label (can be used multiple times)
-u, --user="" Run as specified username or uid
--no-deps Don't start linked services.
--rm Remove container after run. Ignored in detached mode.
-p, --publish=[] Publish a container's port(s) to the host
--service-ports Run command with the service's ports enabled and mapped
to the host.
--use-aliases Use the service's network aliases in the network(s) the
container connects to.
-v, --volume=[] Bind mount a volume (default [])
-T Disable pseudo-tty allocation. By default `docker-compose run`
allocates a TTY.
-w, --workdir="" Working directory inside the container
"""
service = self.project.get_service(options['SERVICE'])
detach = options.get('--detach')
if options['--publish'] and options['--service-ports']:
raise UserError(
'Service port mapping and manual port mapping '
'can not be used together'
)
if options['COMMAND'] is not None:
command = [options['COMMAND']] + options['ARGS']
elif options['--entrypoint'] is not None:
command = []
else:
command = service.options.get('command')
options['stdin_open'] = service.options.get('stdin_open', True)
container_options = build_one_off_container_options(options, detach, command)
run_one_off_container(
container_options, self.project, service, options,
self.toplevel_options, self.toplevel_environment
)
@metrics()
def scale(self, options):
"""
Set number of containers to run for a service.
Numbers are specified in the form `service=num` as arguments.
For example:
$ docker-compose scale web=2 worker=3
This command is deprecated. Use the up command with the `--scale` flag
instead.
Usage: scale [options] [SERVICE=NUM...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = timeout_from_opts(options)
log.warning(
'The scale command is deprecated. '
'Use the up command with the --scale flag instead.'
)
for service_name, num in parse_scale_args(options['SERVICE=NUM']).items():
self.project.get_service(service_name).scale(num, timeout=timeout)
@metrics()
def start(self, options):
"""
Start existing containers.
Usage: start [SERVICE...]
"""
containers = self.project.start(service_names=options['SERVICE'])
exit_if(not containers, 'No containers to start', 1)
@metrics()
def stop(self, options):
"""
Stop running containers without removing them.
They can be started again with `docker-compose start`.
Usage: stop [options] [--] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = timeout_from_opts(options)
self.project.stop(service_names=options['SERVICE'], timeout=timeout)
@metrics()
def restart(self, options):
"""
Restart running containers.
Usage: restart [options] [--] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = timeout_from_opts(options)
containers = self.project.restart(service_names=options['SERVICE'], timeout=timeout)
exit_if(not containers, 'No containers to restart', 1)
@metrics()
def top(self, options):
"""
Display the running processes
Usage: top [SERVICE...]
"""
containers = sorted(
self.project.containers(service_names=options['SERVICE'], stopped=False) +
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
key=attrgetter('name')
)
for idx, container in enumerate(containers):
if idx > 0:
print()
top_data = self.project.client.top(container.name)
headers = top_data.get("Titles")
rows = []
for process in top_data.get("Processes", []):
rows.append(process)
print(container.name)
print(Formatter.table(headers, rows))
@metrics()
def unpause(self, options):
"""
Unpause services.
Usage: unpause [SERVICE...]
"""
containers = self.project.unpause(service_names=options['SERVICE'])
exit_if(not containers, 'No containers to unpause', 1)
@metrics()
def up(self, options):
"""
Builds, (re)creates, starts, and attaches to containers for a service.
Unless they are already running, this command also starts any linked services.
The `docker-compose up` command aggregates the output of each container. When
the command exits, all containers are stopped. Running `docker-compose up -d`
starts the containers in the background and leaves them running.
If there are existing containers for a service, and the service's configuration
or image was changed after the container's creation, `docker-compose up` picks
up the changes by stopping and recreating the containers (preserving mounted
volumes). To prevent Compose from picking up changes, use the `--no-recreate`
flag.
If you want to force Compose to stop and recreate all containers, use the
`--force-recreate` flag.
Usage: up [options] [--scale SERVICE=NUM...] [--] [SERVICE...]
Options:
-d, --detach Detached mode: Run containers in the background,
print new container names. Incompatible with
--abort-on-container-exit.
--no-color Produce monochrome output.
--quiet-pull Pull without printing progress information
--no-deps Don't start linked services.
--force-recreate Recreate containers even if their configuration
and image haven't changed.
--always-recreate-deps Recreate dependent containers.
Incompatible with --no-recreate.
--no-recreate If containers already exist, don't recreate
them. Incompatible with --force-recreate and -V.
--no-build Don't build an image, even if it's missing.
--no-start Don't start the services after creating them.
--build Build images before starting containers.
--abort-on-container-exit Stops all containers if any container was
stopped. Incompatible with -d.
--attach-dependencies Attach to dependent containers.
-t, --timeout TIMEOUT Use this timeout in seconds for container
shutdown when attached or when containers are
already running. (default: 10)
-V, --renew-anon-volumes Recreate anonymous volumes instead of retrieving
data from the previous containers.
--remove-orphans Remove containers for services not defined
in the Compose file.
--exit-code-from SERVICE Return the exit code of the selected service
container. Implies --abort-on-container-exit.
--scale SERVICE=NUM Scale SERVICE to NUM instances. Overrides the
`scale` setting in the Compose file if present.
--no-log-prefix Don't print prefix in logs.
"""
start_deps = not options['--no-deps']
always_recreate_deps = options['--always-recreate-deps']
exit_value_from = exitval_from_opts(options, self.project)
cascade_stop = options['--abort-on-container-exit']
service_names = options['SERVICE']
timeout = timeout_from_opts(options)
remove_orphans = options['--remove-orphans']
detached = options.get('--detach')
no_start = options.get('--no-start')
attach_dependencies = options.get('--attach-dependencies')
keep_prefix = not options['--no-log-prefix']
if detached and (cascade_stop or exit_value_from or attach_dependencies):
raise UserError(
"-d cannot be combined with --abort-on-container-exit or --attach-dependencies.")
ignore_orphans = self.toplevel_environment.get_boolean('COMPOSE_IGNORE_ORPHANS')
if ignore_orphans and remove_orphans:
raise UserError("COMPOSE_IGNORE_ORPHANS and --remove-orphans cannot be combined.")
opts = ['--detach', '--abort-on-container-exit', '--exit-code-from', '--attach-dependencies']
for excluded in [x for x in opts if options.get(x) and no_start]:
raise UserError('--no-start and {} cannot be combined.'.format(excluded))
native_builder = self.toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD', True)
with up_shutdown_context(self.project, service_names, timeout, detached):
warn_for_swarm_mode(self.project.client)
def up(rebuild):
return self.project.up(
service_names=service_names,
start_deps=start_deps,
strategy=convergence_strategy_from_opts(options),
do_build=build_action_from_opts(options),
timeout=timeout,
detached=detached,
remove_orphans=remove_orphans,
ignore_orphans=ignore_orphans,
scale_override=parse_scale_args(options['--scale']),
start=not no_start,
always_recreate_deps=always_recreate_deps,
reset_container_image=rebuild,
renew_anonymous_volumes=options.get('--renew-anon-volumes'),
silent=options.get('--quiet-pull'),
cli=native_builder,
attach_dependencies=attach_dependencies,
)
try:
to_attach = up(False)
except docker.errors.ImageNotFound as e:
log.error(
"The image for the service you're trying to recreate has been removed. "
"If you continue, volume data could be lost. Consider backing up your data "
"before continuing.\n"
)
res = yesno("Continue with the new image? [yN]", False)
if res is None or not res:
raise e
to_attach = up(True)
if detached or no_start:
return
attached_containers = filter_attached_containers(
to_attach,
service_names,
attach_dependencies)
log_printer = log_printer_from_project(
self.project,
attached_containers,
options['--no-color'],
{'follow': True},
cascade_stop,
event_stream=self.project.events(service_names=service_names),
keep_prefix=keep_prefix)
print("Attaching to", list_containers(log_printer.containers))
cascade_starter = log_printer.run()
if cascade_stop:
print("Aborting on container exit...")
all_containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
exit_code = compute_exit_code(
exit_value_from, attached_containers, cascade_starter, all_containers
)
self.project.stop(service_names=service_names, timeout=timeout)
if exit_value_from:
exit_code = compute_service_exit_code(exit_value_from, attached_containers)
sys.exit(exit_code)
@classmethod
@metrics()
def version(cls, options):
"""
Show version information and quit.
Usage: version [--short]
Options:
--short Shows only Compose's version number.
"""
if options['--short']:
print(__version__)
else:
print(get_version_info('full'))
def compute_service_exit_code(exit_value_from, attached_containers):
candidates = list(filter(
lambda c: c.service == exit_value_from,
attached_containers))
if not candidates:
log.error(
'No containers matching the spec "{}" '
'were run.'.format(exit_value_from)
)
return 2
if len(candidates) > 1:
exit_values = filter(
lambda e: e != 0,
[c.inspect()['State']['ExitCode'] for c in candidates]
)
return exit_values[0]
return candidates[0].inspect()['State']['ExitCode']
def compute_exit_code(exit_value_from, attached_containers, cascade_starter, all_containers):
exit_code = 0
for e in all_containers:
if (not e.is_running and cascade_starter == e.name):
if not e.exit_code == 0:
exit_code = e.exit_code
break
return exit_code
def convergence_strategy_from_opts(options):
no_recreate = options['--no-recreate']
force_recreate = options['--force-recreate']
renew_anonymous_volumes = options.get('--renew-anon-volumes')
if force_recreate and no_recreate:
raise UserError("--force-recreate and --no-recreate cannot be combined.")
if no_recreate and renew_anonymous_volumes:
raise UserError('--no-recreate and --renew-anon-volumes cannot be combined.')
if force_recreate or renew_anonymous_volumes:
return ConvergenceStrategy.always
if no_recreate:
return ConvergenceStrategy.never
return ConvergenceStrategy.changed
def timeout_from_opts(options):
timeout = options.get('--timeout')
return None if timeout is None else int(timeout)
def image_digests_for_project(project):
try:
return get_image_digests(project)
except MissingDigests as e:
def list_images(images):
return "\n".join(" {}".format(name) for name in sorted(images))
paras = ["Some images are missing digests."]
if e.needs_push:
command_hint = (
"Use `docker push {}` to push them. "
.format(" ".join(sorted(e.needs_push)))
)
paras += [
"The following images can be pushed:",
list_images(e.needs_push),
command_hint,
]
if e.needs_pull:
command_hint = (
"Use `docker pull {}` to pull them. "
.format(" ".join(sorted(e.needs_pull)))
)
paras += [
"The following images need to be pulled:",
list_images(e.needs_pull),
command_hint,
]
raise UserError("\n\n".join(paras))
def exitval_from_opts(options, project):
exit_value_from = options.get('--exit-code-from')
if exit_value_from:
if not options.get('--abort-on-container-exit'):
log.warning('using --exit-code-from implies --abort-on-container-exit')
options['--abort-on-container-exit'] = True
if exit_value_from not in [s.name for s in project.get_services()]:
log.error('No service named "%s" was found in your compose file.',
exit_value_from)
sys.exit(2)
return exit_value_from
def image_type_from_opt(flag, value):
if not value:
return ImageType.none
try:
return ImageType[value]
except KeyError:
raise UserError("%s flag must be one of: all, local" % flag)
def build_action_from_opts(options):
if options['--build'] and options['--no-build']:
raise UserError("--build and --no-build can not be combined.")
if options['--build']:
return BuildAction.force
if options['--no-build']:
return BuildAction.skip
return BuildAction.none
def build_one_off_container_options(options, detach, command):
container_options = {
'command': command,
'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
'stdin_open': options.get('stdin_open'),
'detach': detach,
}
if options['-e']:
container_options['environment'] = Environment.from_command_line(
parse_environment(options['-e'])
)
if options['--label']:
container_options['labels'] = parse_labels(options['--label'])
if options.get('--entrypoint') is not None:
container_options['entrypoint'] = (
[""] if options['--entrypoint'] == '' else options['--entrypoint']
)
# Ensure that run command remains one-off (issue #6302)
container_options['restart'] = None
if options['--user']:
container_options['user'] = options.get('--user')
if not options['--service-ports']:
container_options['ports'] = []
if options['--publish']:
container_options['ports'] = options.get('--publish')
if options['--name']:
container_options['name'] = options['--name']
if options['--workdir']:
container_options['working_dir'] = options['--workdir']
if options['--volume']:
volumes = [VolumeSpec.parse(i) for i in options['--volume']]
container_options['volumes'] = volumes
return container_options
def run_one_off_container(container_options, project, service, options, toplevel_options,
toplevel_environment):
native_builder = toplevel_environment.get_boolean('COMPOSE_DOCKER_CLI_BUILD')
detach = options.get('--detach')
use_network_aliases = options.get('--use-aliases')
service.scale_num = 1
containers = project.up(
service_names=[service.name],
start_deps=not options['--no-deps'],
strategy=ConvergenceStrategy.never,
detached=True,
rescale=False,
cli=native_builder,
one_off=True,
override_options=container_options,
)
try:
container = next(c for c in containers if c.service == service.name)
except StopIteration:
raise OperationFailedError('Could not bring up the requested service')
if detach:
service.start_container(container, use_network_aliases)
print(container.name)
return
def remove_container():
if options['--rm']:
project.client.remove_container(container.id, force=True, v=True)
use_cli = not toplevel_environment.get_boolean('COMPOSE_INTERACTIVE_NO_CLI')
signals.set_signal_handler_to_shutdown()
signals.set_signal_handler_to_hang_up()
try:
try:
if IS_WINDOWS_PLATFORM or use_cli:
service.connect_container_to_networks(container, use_network_aliases)
exit_code = call_docker(
get_docker_start_call(container_options, container.id),
toplevel_options, toplevel_environment
)
else:
operation = RunOperation(
project.client,
container.id,
interactive=not options['-T'],
logs=False,
)
pty = PseudoTerminal(project.client, operation)
sockets = pty.sockets()
service.start_container(container, use_network_aliases)
pty.start(sockets)
exit_code = container.wait()
except (signals.ShutdownException):
project.client.stop(container.id)
exit_code = 1
except (signals.ShutdownException, signals.HangUpException):
project.client.kill(container.id)
remove_container()
sys.exit(2)
remove_container()
sys.exit(exit_code)
def get_docker_start_call(container_options, container_id):
docker_call = ["start"]
if not container_options.get('detach'):
docker_call.append("--attach")
if container_options.get('stdin_open'):
docker_call.append("--interactive")
docker_call.append(container_id)
return docker_call
def log_printer_from_project(
project,
containers,
monochrome,
log_args,
cascade_stop=False,
event_stream=None,
keep_prefix=True,
):
return LogPrinter(
containers,
build_log_presenters(project.service_names, monochrome, keep_prefix),
event_stream or project.events(),
cascade_stop=cascade_stop,
log_args=log_args)
def filter_attached_containers(containers, service_names, attach_dependencies=False):
return filter_attached_for_up(
containers,
service_names,
attach_dependencies,
lambda container: container.service)
@contextlib.contextmanager
def up_shutdown_context(project, service_names, timeout, detached):
if detached:
yield
return
signals.set_signal_handler_to_shutdown()
try:
try:
yield
except signals.ShutdownException:
print("Gracefully stopping... (press Ctrl+C again to force)")
project.stop(service_names=service_names, timeout=timeout)
except signals.ShutdownException:
project.kill(service_names=service_names)
sys.exit(2)
def list_containers(containers):
return ", ".join(c.name for c in containers)
def exit_if(condition, message, exit_code):
if condition:
log.error(message)
raise SystemExit(exit_code)
def call_docker(args, dockeropts, environment):
executable_path = find_executable('docker')
if not executable_path:
raise UserError(errors.docker_not_found_msg("Couldn't find `docker` binary."))
tls = dockeropts.get('--tls', False)
ca_cert = dockeropts.get('--tlscacert')
cert = dockeropts.get('--tlscert')
key = dockeropts.get('--tlskey')
verify = dockeropts.get('--tlsverify')
host = dockeropts.get('--host')
context = dockeropts.get('--context')
tls_options = []
if tls:
tls_options.append('--tls')
if ca_cert:
tls_options.extend(['--tlscacert', ca_cert])
if cert:
tls_options.extend(['--tlscert', cert])
if key:
tls_options.extend(['--tlskey', key])
if verify:
tls_options.append('--tlsverify')
if host:
tls_options.extend(
['--host', re.sub(r'^https?://', 'tcp://', host.lstrip('='))]
)
if context:
tls_options.extend(
['--context', context]
)
args = [executable_path] + tls_options + args
log.debug(" ".join(map(pipes.quote, args)))
filtered_env = {k: v for k, v in environment.items() if v is not None}
return subprocess.call(args, env=filtered_env)
def parse_scale_args(options):
res = {}
for s in options:
if '=' not in s:
raise UserError('Arguments to scale should be in the form service=num')
service_name, num = s.split('=', 1)
try:
num = int(num)
except ValueError:
raise UserError(
'Number of containers for service "%s" is not a number' % service_name
)
res[service_name] = num
return res
def build_exec_command(options, container_id, command):
args = ["exec"]
if options["--detach"]:
args += ["--detach"]
else:
args += ["--interactive"]
if not options["-T"]:
args += ["--tty"]
if options["--privileged"]:
args += ["--privileged"]
if options["--user"]:
args += ["--user", options["--user"]]
if options["--env"]:
for env_variable in options["--env"]:
args += ["--env", env_variable]
if options["--workdir"]:
args += ["--workdir", options["--workdir"]]
args += [container_id]
args += command
return args
def has_container_with_state(containers, state):
states = {
'running': lambda c: c.is_running,
'stopped': lambda c: not c.is_running,
'paused': lambda c: c.is_paused,
'restarting': lambda c: c.is_restarting,
}
for container in containers:
if state not in states:
raise UserError("Invalid state: %s" % state)
if states[state](container):
return True
def filter_services(filt, services, project):
def should_include(service):
for f in filt:
if f == 'status':
state = filt[f]
containers = project.containers([service.name], stopped=True)
if not has_container_with_state(containers, state):
return False
elif f == 'source':
source = filt[f]
if source == 'image' or source == 'build':
if source not in service.options:
return False
else:
raise UserError("Invalid value for source filter: %s" % source)
else:
raise UserError("Invalid filter: %s" % f)
return True
return filter(should_include, services)
def build_filter(arg):
filt = {}
if arg is not None:
if '=' not in arg:
raise UserError("Arguments to --filter should be in form KEY=VAL")
key, val = arg.split('=', 1)
filt[key] = val
return filt
def warn_for_swarm_mode(client):
info = client.info()
if info.get('Swarm', {}).get('LocalNodeState') == 'active':
if info.get('ServerVersion', '').startswith('ucp'):
# UCP does multi-node scheduling with traditional Compose files.
return
log.warning(
"The Docker Engine you're using is running in swarm mode.\n\n"
"Compose does not use swarm mode to deploy services to multiple nodes in a swarm. "
"All containers will be scheduled on the current node.\n\n"
"To deploy your application across the swarm, "
"use `docker stack deploy`.\n"
)
| 37.165775 | 105 | 0.578961 |
825ae99956e03ab2cd6892857b7f07af4bfb90a5 | 15,663 | py | Python | query/query.py | domielias/PyQuery | de61e14da9e0a92f2772e1a5d6ef4db476caa362 | [
"MIT"
] | 4 | 2020-06-19T20:21:32.000Z | 2020-09-25T03:34:44.000Z | query/query.py | domielias/PyQuery | de61e14da9e0a92f2772e1a5d6ef4db476caa362 | [
"MIT"
] | 2 | 2020-09-25T03:07:23.000Z | 2020-10-09T18:20:48.000Z | query/query.py | domielias/PyQuery | de61e14da9e0a92f2772e1a5d6ef4db476caa362 | [
"MIT"
] | 1 | 2020-09-26T03:51:49.000Z | 2020-09-26T03:51:49.000Z | from .settings import (
WHERE_SPECIAL_ARGUMENTS, AUTOMATIC_JOINS_PLACEHOLDER,
FIELD_FORMAT, SELECT_FORMAT, JOIN_CLAUSE_FORMAT, WHERE_CLAUSE_FORMAT, WHERE_AND_CONNECTOR_FORMAT,
WHERE_EQUAL_OPERATION_FORMAT, ORDER_BY_CLAUSE_FORMAT, ORDER_BY_ASC_FORMAT, ORDER_BY_DESC_FORMAT,
LIMIT_FORMAT,VALUE_STRING_FORMAT, VALUE_LIST_FORMAT, VALUE_TUPLE_FORMAT, VALUE_NULL_FORMAT, VALUE_DATETIME_FORMAT,
VALUE_SINGLE_QUOTE_FORMAT, DISTINCT_CLAUSE_FORMAT, FIELD_OR_TABLES_FORMAT
)
from datetime import datetime
import math
import random
class BaseQuery:
def __init__(self, on_table, engine):
self.engine = engine
self.on_table = on_table
# first value of the tuple is the original table name, the second, the value we are using
self.fields_table_relations={
'':dict(table_name=on_table, is_alias=False)
}
def _format_db_tables_names(self, value):
"""
A single key of fields_table_relations dict
Args:
value (dict) a singe dict of the fields_table_relations dict
"""
if value['is_alias']:
return value['table_name']
else:
return self._format_field_or_tables(value['table_name'])
def _format_field_or_tables(self, value):
return FIELD_OR_TABLES_FORMAT.format(value)
def __create_table_name_alias(self, table_name):
"""
Creates a new alias for the table
"""
alias = 'TA{}'.format(str(random.randint(1,100)))
invalid_aliases = [value['table_name'] for value in self.fields_table_relations.values()]
while alias in invalid_aliases:
alias = 'TA{}'.format(str(random.randint(1,100)))
return alias
def _get_table_name_or_alias(self, query_path, table_name):
if query_path in self.fields_table_relations:
return self.fields_table_relations[query_path]
if table_name in [value['table_name'] for value in self.fields_table_relations.values()]:
self.fields_table_relations[query_path] = {
'table_name': self.__create_table_name_alias(table_name),
'is_alias': True
}
else:
self.fields_table_relations[query_path] = {
'table_name': table_name,
'is_alias': False
}
return self.fields_table_relations[query_path]
def __format_joins(self, joins):
"""
Handle all of the joins it must do for the query to succeed
The user can set dynamic join_relations to get the right table. Let's go with the following example
>>> {
"form_value": {
"form": "dynamic_forms"
}
}
The above example means: "When you are making a join with the `form` field and the table is `form_value`, we use the value `dynamic_forms` instead""
WHAT?
Let's dig deeper:
When you do something like this:
>>> connection.query('form_value').filter(form__form__id=2).run()
Let's separate the string by each duble underscore, we get something like this: [form, form, id]
The first `form` is the name of the field in `form_value`, but this field is not from `form` database, instead
it is from `dynamic_forms`, we get the correct value on each join
SO we get something like this
INNER JOIN "dynamic_forms" ON "dyanmic_forms"."id" = "form_value"."form_id"
INNER JOIN "form" ON "form"."id" = "dynamic_forms"."form_id"
Look that the second field, correctly references to "form" table, so we don't need to set any join relation for
this field. Okay, but what if `dynamic_forms` `form` field references `foo` table?
We would do something like the following:
>>> {
"form_value": {
"form": "dynamic_forms"
}
"dynamic_forms": {
"form": "foo"
}
}
"""
to_table_join = self.fields_table_relations['']
reference_string_list = list()
for index, join in enumerate(joins):
# creates a reference of the path to the fields so something like
# depends_on__group__company and so on, with this path we can reuse the created aliases
reference_string_list.append(join)
reference_string = '__'.join(reference_string_list)
from_table_join = to_table_join
# automatically creates alias
to_table_join_name = self.join_relations.get(from_table_join['table_name'], {}).get(join, join)
to_table_join = self._get_table_name_or_alias(reference_string, to_table_join_name)
join_clause = JOIN_CLAUSE_FORMAT.format(
join=join,
from_table_join=self._format_db_tables_names(from_table_join),
to_table_join=FIELD_OR_TABLES_FORMAT.format(to_table_join_name),
to_table_join_name_or_alias=self._format_db_tables_names(to_table_join),
alias=to_table_join['table_name'] if to_table_join['is_alias'] else ''
)
if join_clause not in self.query_joins:
self.query_joins.append(join_clause)
return self._format_db_tables_names(to_table_join)
def _format_db_fields(self, value):
"""
Formats each database field based on a default VALUE_CLAUSE
"""
table_name = self._format_db_tables_names(self.fields_table_relations[''])
splitted_value = value.split(AUTOMATIC_JOINS_PLACEHOLDER)
if len(splitted_value) > 1:
# Handle automatic join operations
joins = splitted_value[:-1]
table_name = self.__format_joins(joins)
values_to_use = splitted_value[-2:]
value = FIELD_FORMAT.format(
table=table_name,
field=self._format_field_or_tables(values_to_use[-1])
)
return value
def format_db_values(self, value):
if type(value) == str:
value = VALUE_STRING_FORMAT.format(value.replace("'",VALUE_SINGLE_QUOTE_FORMAT))
if type(value) == list:
value = VALUE_LIST_FORMAT.format(', '.join([str(self.format_db_values(val)) for val in value]))
if type(value) == datetime:
value= VALUE_STRING_FORMAT.format(value.strftime(VALUE_DATETIME_FORMAT))
if type(value) == tuple:
value = '{}'.format(', '.join([str(self.format_db_values(val)) for val in value]))
if type(value) == self.__class__:
value = self.format_db_values(list(value))
if value == None:
value = VALUE_NULL_FORMAT
return value
class Insert(BaseQuery):
def bulk_insert(self, values, column_names=None):
"""
This is optimize to be quicker than insert, all your arguments EXCEPTS column names must be a list of values
To be easier you can use it like this:
>>> connection.query('form_value').bulk_insert(values=[[1,2], [3,4], [4,5]], column_names=['column_a', 'column_b'])
Use with the * for positional arguments
Args:
column_names (list): the column names as a list
Returns:
bool: returns True if everything went fine
"""
values = tuple(list(value) for value in values)
columns = column_names if column_names else self.columns
maximum_number_of_values_per_iteration = 999
iterations = math.ceil(len(values)/maximum_number_of_values_per_iteration)
self.engine.connect()
for iteration in range(0, iterations):
iteration_values = values[iteration*maximum_number_of_values_per_iteration : (iteration+1)*maximum_number_of_values_per_iteration]
query = self._format_insert(tuple(iteration_values), columns)
self.engine.execute(query)
self.engine.commit()
return True
def insert(self, **kwargs):
"""
Inserts an handful amount of data in the database
Returns:
[type]: [description]
"""
columns = kwargs.keys()
values = list(kwargs.values())
query = self._format_insert(values, columns)
print(query)
#self.engine.save(query)
return True
def _format_insert(self, values, columns):
INSERT_CLAUSE = 'INSERT INTO "{}" ({}) VALUES {}'
return INSERT_CLAUSE.format(
self.on_table,
', '.join(['"{}"'.format(column) for column in columns]),
self.format_db_values(values)
)
class Select(BaseQuery):
"""
Class responsible for handling select statements.
"""
def __init__(self, join_relations, *args, **kwargs):
self.join_relations = join_relations
self.query_select = ['*']
self.query_distinct = ''
self.query_orders = []
self.query_where = []
self.query_limit = ''
self.query_joins = []
super(Select, self).__init__(*args, **kwargs)
@property
def __get_query(self):
query = SELECT_FORMAT.format(
select=', '.join(self.query_select),
distinct=DISTINCT_CLAUSE_FORMAT,
froms=self.on_table
)
joins = '{} '.format(' '.join(self.query_joins)) if self.query_joins else ''
where = WHERE_CLAUSE_FORMAT.format(where_conditions=WHERE_AND_CONNECTOR_FORMAT.join(self.query_where)) if self.query_where else ''
orders = ORDER_BY_CLAUSE_FORMAT.format(order_by_conditions=', '.join(self.query_orders)) if self.query_orders else ''
limit = self.query_limit
query = query + joins + where + orders + limit
return query
@property
def query(self):
return self.__get_query
def first(self):
"""
Returns the first element of the query, sets limit as 1
"""
return self.limit(1)
def limit(self, number):
"""
Sets your desired limit to the query
Args:
number (int): the limit number
Returns:
self: this object so you can concatenate with other functions
"""
self.query_limit = LIMIT_FORMAT.format(num=number)
return self
def distinct(self):
self.query_distinct = DISTINCT_CLAUSE_FORMAT
return self
def select(self, *args, **kwargs):
"""
Expects the each column names as string. You can also make joins in your select using double undersocores
like '__'
You need to define order_by like the following example:
>>> connection.query('example_db_name').select('id').run()
>>> connection.query('example_db_name').select('id', 'name').run()
Or if you need to order by joins you define it like this:
>>> connection.query('example_db_name').select('connectedfield__id').run()
In this example `connectedfield` would be a field of `example_db_name` table, and the `id` you are ordering
is the id on `connectedfield` table.
Args:
flat (bool, optional): You can set flat=True if you are retrieving only one option field. Defaults to False
"""
# you can retrieve flat values so instead of tuples like this [(1,), (2,)]
# you get your results as a nice flat list like [1,2]
# this just works if you only set ONE argument in the select
self._flat = kwargs.get('flat', False) and len(args) == 1
# you can obviously have multiple selects, but everytime you do it resets the select clause
# so use just one
self.query_select = []
for value in args:
select_clause = self._format_db_fields(value)
if select_clause not in self.query_select:
self.query_select.append(select_clause)
return self
def filter(self, **kwargs):
"""
You need to define filters like the following example:
>>> connection.query('example_db_name').filter(id=2).run()
Or if you need to make any joins you define it like this:
>>> connection.query('example_db_name').filter(connectedfield__id=2).run()
In this example `connectedfield` would be a field of `example_db_name` table, and the `id` you are making
where condition is the id on `connectedfield` table.
"""
for key, value in kwargs.items():
where_operation = WHERE_SPECIAL_ARGUMENTS.get(key.split(AUTOMATIC_JOINS_PLACEHOLDER)[-1], WHERE_EQUAL_OPERATION_FORMAT)
if where_operation != WHERE_EQUAL_OPERATION_FORMAT:
key = AUTOMATIC_JOINS_PLACEHOLDER.join(key.split(AUTOMATIC_JOINS_PLACEHOLDER)[:-1])
where_field = self._format_db_fields(key)
value = self.format_db_values(value)
if where_field not in self.query_where:
self.query_where.append(where_field + where_operation + str(value))
return self
def order_by(self, *args):
"""
Expects the each column names as string. You can also make joins in your order using double undersocores
like '__'
You need to define order_by like the following example:
>>> connection.query('example_db_name').order_by('id').run()
>>> connection.query('example_db_name').order_by('id', 'name').run()
Or if you need to order by joins you define it like this:
>>> connection.query('example_db_name').order_by('connectedfield__id').run()
In this example `connectedfield` would be a field of `example_db_name` table, and the `id` you are ordering
is the id on `connectedfield` table.
"""
if any([type(value) != str for value in args]):
raise TypeError('Your arguments MUST be str type')
for value in args:
asc_or_desc = ORDER_BY_ASC_FORMAT
if value[0] == '-':
asc_or_desc = ORDER_BY_DESC_FORMAT
value = value[1:]
order_clause = self._format_db_fields(value)
order_clause = '{} {}'.format(order_clause, asc_or_desc)
if order_clause not in self.query_orders:
self.query_orders.append(order_clause)
return self
def force(self):
"""
Runs a SELECT type of query
Returns:
list/tuple: List or tuple of results
"""
query = self.__get_query
result = self.engine.fetch(query)
if getattr(self, '_flat', False):
result = [value[0] for value in result]
return result
class Query(Insert, Select):
def __repr__(self):
return str(self.force())
def __getstate__(self):
return self.force()
def __iter__(self):
return iter(self.force())
def __bool__(self):
return bool(self.force())
def __getitem__(self, k):
return self.force()[k]
@property
def columns(self):
"""
Returns all of the columns of the current table that you are connected to
Returns:
list: list with each column_name of your table as string
"""
query = SELECT_FORMAT.format(
select='*',
distinct='',
froms=self.on_table
)
self.engine.connect()
query = query + LIMIT_FORMAT.format(num=0)
cursor = self.engine.execute(query)
column_names = [description[0] for description in cursor.description]
self.engine.close()
return column_names
| 37.471292 | 156 | 0.624401 |
1e5af5e3a3bb32b7344bf26c01b55ea03eec2ed4 | 1,551 | py | Python | src/python/bot/tokenizer/antlr_tokenizer.py | eepeep/clusterfuzz | b48371466493ba1f46bd2769f01eadcf49d0cff1 | [
"Apache-2.0"
] | 3 | 2020-12-30T07:00:55.000Z | 2021-03-16T10:55:05.000Z | src/python/bot/tokenizer/antlr_tokenizer.py | eepeep/clusterfuzz | b48371466493ba1f46bd2769f01eadcf49d0cff1 | [
"Apache-2.0"
] | 34 | 2020-08-18T18:47:00.000Z | 2021-07-14T07:47:35.000Z | src/python/bot/tokenizer/antlr_tokenizer.py | eepeep/clusterfuzz | b48371466493ba1f46bd2769f01eadcf49d0cff1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Antlr Tokenizer"""
from builtins import object
import antlr4
class AntlrTokenizer(object):
"""Tokenizer. Takes an Antlr Lexer created using
$ antlr4 -Dlanguage=Pythonn <AntlrGrammar.g4>
and allows user to tokenize files using that grammar."""
def __init__(self, lexer):
self._lexer = lexer
def fill(self, stream):
"""Helper function. antlr4.CommonTokenStream.fill should work, but
it does not fetch all of the tokens. This is a replacement that works."""
i = 0
while stream.fetch(1):
i += 1
return i
def tokenize(self, data):
"""Takes in a file and uses the antlr lexer to return a list of tokens"""
lexer_input = antlr4.InputStream(data)
stream = antlr4.CommonTokenStream(self._lexer(lexer_input))
end = self.fill(stream)
tokens = stream.getTokens(0, end)
return [token.text for token in tokens]
def combine(self, tokens):
"""Token combiner passed to minimizer"""
return ''.join(tokens)
| 31.653061 | 77 | 0.717602 |
6ec2fc81f992cc5a36b98f1af9425060ace971ad | 20,639 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/operations/_dedicated_host_groups_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2019-05-17T21:24:53.000Z | 2020-02-12T11:13:42.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/operations/_dedicated_host_groups_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 15 | 2019-07-12T18:18:04.000Z | 2019-07-25T20:55:51.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/operations/_dedicated_host_groups_operations.py | LianwMS/azure-sdk-for-python | 612d7bca9de86ee1bd1fa59291d7bf897ba9213f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DedicatedHostGroupsOperations(object):
"""DedicatedHostGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_update(
self,
resource_group_name, # type: str
host_group_name, # type: str
parameters, # type: "models.DedicatedHostGroup"
**kwargs # type: Any
):
# type: (...) -> "models.DedicatedHostGroup"
"""Create or update a dedicated host group. For details of Dedicated Host and Dedicated Host
Groups please see [Dedicated Host Documentation]
(https://go.microsoft.com/fwlink/?linkid=2082596).
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param parameters: Parameters supplied to the Create Dedicated Host Group.
:type parameters: ~azure.mgmt.compute.v2019_03_01.models.DedicatedHostGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHostGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_03_01.models.DedicatedHostGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DedicatedHostGroup"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DedicatedHostGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DedicatedHostGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DedicatedHostGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
host_group_name, # type: str
parameters, # type: "models.DedicatedHostGroupUpdate"
**kwargs # type: Any
):
# type: (...) -> "models.DedicatedHostGroup"
"""Update an dedicated host group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:param parameters: Parameters supplied to the Update Dedicated Host Group operation.
:type parameters: ~azure.mgmt.compute.v2019_03_01.models.DedicatedHostGroupUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHostGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_03_01.models.DedicatedHostGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DedicatedHostGroup"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DedicatedHostGroupUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DedicatedHostGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
host_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Delete a dedicated host group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
host_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.DedicatedHostGroup"
"""Retrieves information about a dedicated host group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group.
:type host_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHostGroup, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_03_01.models.DedicatedHostGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DedicatedHostGroup"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'hostGroupName': self._serialize.url("host_group_name", host_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DedicatedHostGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.DedicatedHostGroupListResult"]
"""Lists all of the dedicated host groups in the specified resource group. Use the nextLink
property in the response to get the next page of dedicated host groups.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DedicatedHostGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_03_01.models.DedicatedHostGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DedicatedHostGroupListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DedicatedHostGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups'} # type: ignore
def list_by_subscription(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.DedicatedHostGroupListResult"]
"""Lists all of the dedicated host groups in the subscription. Use the nextLink property in the
response to get the next page of dedicated host groups.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DedicatedHostGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_03_01.models.DedicatedHostGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DedicatedHostGroupListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DedicatedHostGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/hostGroups'} # type: ignore
| 47.997674 | 180 | 0.665827 |
35511581f9e4688e36ca86ca30874863536c8600 | 1,327 | py | Python | StackApp/env/lib/python2.7/site-packages/flask_api/response.py | jonathanmusila/StackOverflow-Lite | a9a03f129592c6f741eb4d1e608ca2db0e40bf11 | [
"MIT"
] | 2 | 2017-11-24T12:44:30.000Z | 2020-04-11T17:28:43.000Z | StackApp/env/lib/python2.7/site-packages/flask_api/response.py | jonathanmusila/StackOverflow-Lite | a9a03f129592c6f741eb4d1e608ca2db0e40bf11 | [
"MIT"
] | 6 | 2017-11-10T19:45:18.000Z | 2017-11-12T14:50:42.000Z | StackApp/env/lib/python2.7/site-packages/flask_api/response.py | jonathanmusila/StackOverflow-Lite | a9a03f129592c6f741eb4d1e608ca2db0e40bf11 | [
"MIT"
] | 9 | 2018-01-04T05:32:39.000Z | 2018-03-24T02:41:28.000Z | # coding: utf8
from __future__ import unicode_literals
from flask import request, Response
from flask._compat import text_type
class APIResponse(Response):
api_return_types = (list, dict)
def __init__(self, content=None, *args, **kwargs):
super(APIResponse, self).__init__(None, *args, **kwargs)
media_type = None
if isinstance(content, self.api_return_types) or content == '':
renderer = request.accepted_renderer
if content != '' or renderer.handles_empty_responses:
media_type = request.accepted_media_type
options = self.get_renderer_options()
content = renderer.render(content, media_type, **options)
if self.status_code == 204:
self.status_code = 200
# From `werkzeug.wrappers.BaseResponse`
if content is None:
content = []
if isinstance(content, (text_type, bytes, bytearray)):
self.set_data(content)
else:
self.response = content
if media_type is not None:
self.headers['Content-Type'] = str(media_type)
def get_renderer_options(self):
return {
'status': self.status,
'status_code': self.status_code,
'headers': self.headers
}
| 32.365854 | 73 | 0.607385 |
ec743bd95f5027c8b883c45d261c00a010193900 | 316 | py | Python | Python/ex048.py | MarcosRibas/Projeto100Exercicios | 15c16eb0d9c4182d93e4bb83e11acad0728f5ec9 | [
"MIT"
] | null | null | null | Python/ex048.py | MarcosRibas/Projeto100Exercicios | 15c16eb0d9c4182d93e4bb83e11acad0728f5ec9 | [
"MIT"
] | null | null | null | Python/ex048.py | MarcosRibas/Projeto100Exercicios | 15c16eb0d9c4182d93e4bb83e11acad0728f5ec9 | [
"MIT"
] | null | null | null | #Ex048 Faça um programa que calcule a soma entre todos os números impares que são múltiplos de três e que se
# encontram no intervalo de 1 até 500.
s = 0
for c in range(0,500,3):
if c % 2 == 1:
s = s + c
print(f'A soma de somados todos os números impáres múltiplos de três no intervalo de 1 a 500 é {s}') | 45.142857 | 108 | 0.689873 |
60cfa3dcdd9f5a10de5f54603e4b45c806a30108 | 2,891 | py | Python | backend/services/google_manager.py | bshimanuki/checkmate | d68eb5d49d42a83ea32dcd32a21b8e7533e6eb5d | [
"MIT"
] | 1 | 2022-01-08T06:03:48.000Z | 2022-01-08T06:03:48.000Z | backend/services/google_manager.py | bshimanuki/checkmate | d68eb5d49d42a83ea32dcd32a21b8e7533e6eb5d | [
"MIT"
] | null | null | null | backend/services/google_manager.py | bshimanuki/checkmate | d68eb5d49d42a83ea32dcd32a21b8e7533e6eb5d | [
"MIT"
] | null | null | null | from aiogoogle import Aiogoogle
from aiogoogle.auth.creds import ServiceAccountCreds
from django.conf import settings
scopes = [
'https://www.googleapis.com/auth/drive',
]
class GoogleManager:
__instance = None
@classmethod
def instance(cls):
'''
Get a single instance per process.
'''
if cls.__instance is None:
cls.__instance = cls()
return cls.__instance
def __init__(self):
self.creds = ServiceAccountCreds(
scopes=scopes,
**settings.DRIVE_SETTINGS['credentials'],
)
self.template_id = settings.DRIVE_SETTINGS['template_id']
self.puzzle_folder_id = settings.DRIVE_SETTINGS['puzzle_folder_id']
self.owner_id = str(settings.DRIVE_SETTINGS['owner_id'])
self.client = Aiogoogle(service_account_creds=self.creds)
self.drive = None
self.sheets = None
async def setup(self):
if self.drive is None:
self.drive = await self.client.discover('drive', 'v3')
self.sheets = await self.client.discover('sheets', 'v4')
await self.client._ensure_session_set()
async def create(self, name):
await self.setup()
sheet_file = await self.client.as_service_account(
self.drive.files.copy(
fileId=self.template_id,
json={
'name': name,
'parents': [self.puzzle_folder_id],
},
),
)
sheet_id = sheet_file['id']
await self.client.as_service_account(
self.drive.permissions.update(
fileId=sheet_id,
permissionId=self.owner_id,
transferOwnership=True,
json={
'role': 'owner',
},
),
)
return sheet_id
async def add_links(self, sheet_id, checkmate_link=None, puzzle_link=None):
if not checkmate_link or not puzzle_link:
return
await self.setup()
await self.client.as_service_account(
self.sheets.spreadsheets.values.update(
spreadsheetId=sheet_id,
range='A1:B1',
valueInputOption='USER_ENTERED',
json={
'values': [[
f'=HYPERLINK("{checkmate_link}", "Checkmate Link")' if checkmate_link else None,
f'=HYPERLINK("{puzzle_link}", "Puzzle Link")' if puzzle_link else None,
]],
},
),
)
async def rename(self, file_id, name):
await self.setup()
await self.client.as_service_account(
self.drive.files.update(
fileId=file_id,
json={
'name': name,
},
)
)
| 31.423913 | 104 | 0.536493 |
546c6176b4496540f8e9583bde3b9403ffca376b | 939 | py | Python | setup.py | antoniodimariano/metrics_consumer | 5c485f3b6c2b6788f947c02b49083ce237424bfc | [
"Apache-2.0"
] | null | null | null | setup.py | antoniodimariano/metrics_consumer | 5c485f3b6c2b6788f947c02b49083ce237424bfc | [
"Apache-2.0"
] | null | null | null | setup.py | antoniodimariano/metrics_consumer | 5c485f3b6c2b6788f947c02b49083ce237424bfc | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
from codecs import open
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='websites_metrics_consumer',
version='0.0.4',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=["psycopg2","confluent_kafka","avro-python3","requests"],
url='https://github.com/antoniodimariano/metrics_consumer',
license='Apache 2.0',
python_requires='>=3.6',
author='Antonio Di Mariano',
author_email='antonio.dimariano@gmail.com',
description='An application that consumes metrics from Kafka messages and store the results into ta postgres db',
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
]
)
| 36.115385 | 117 | 0.699681 |
1a40ea74ac546102e2b6bbeeb183104322c9292c | 6,987 | py | Python | pipeline_withFRD.py | vikasg603/Vehicle-Front-Rear-Detection-for-License-Plate-Detection-Enhancement | afc70fc41633df4f47326b43e28cd596e0cff5e6 | [
"MIT"
] | 29 | 2019-07-22T01:57:42.000Z | 2022-03-31T15:51:42.000Z | pipeline_withFRD.py | vikasg603/Vehicle-Front-Rear-Detection-for-License-Plate-Detection-Enhancement | afc70fc41633df4f47326b43e28cd596e0cff5e6 | [
"MIT"
] | 6 | 2020-02-13T06:11:55.000Z | 2021-06-17T18:13:08.000Z | pipeline_withFRD.py | vikasg603/Vehicle-Front-Rear-Detection-for-License-Plate-Detection-Enhancement | afc70fc41633df4f47326b43e28cd596e0cff5e6 | [
"MIT"
] | 17 | 2019-09-19T09:56:16.000Z | 2021-06-12T22:42:30.000Z | '''
FRD Net, the function 'detect' in darknet has been modified to be able to receive cv2.imread as an input
see darknet.py for more information
'''
from os.path import splitext, basename, isdir
from os import makedirs, remove
import sys
import cv2
import numpy as np
import traceback
from src import FRD
from src.draw_BB import draw_bb
from WPOD_src.drawing_utils import draw_losangle
from WPOD_src.keras_utils import load_model, detect_lp
from WPOD_src.label import Label, lwrite, lread, Shape
from WPOD_src.utils import crop_region, image_files_from_folder, im2single
from darknet.python.darknet import detect
import src.quadrilateral_calculation as qucal
import darknet.python.darknet as dn
if __name__ == '__main__':
# vehicle detection
input_dir = 'samples/overlap_case'
output_dir = 'output'
vehicle_threshold = .5
vehicle_weights = 'data/vehicle-detector/yolo-voc.weights'
vehicle_netcfg = 'data/vehicle-detector/yolo-voc.cfg'
vehicle_dataset = 'data/vehicle-detector/voc.data'
vehicle_net = dn.load_net(vehicle_netcfg, vehicle_weights, 0)
vehicle_meta = dn.load_meta(vehicle_dataset)
imgs_paths = image_files_from_folder(input_dir)
imgs_paths.sort()
if not isdir(output_dir):
makedirs(output_dir)
print '\tSearching for vehicles using YOLO...'
for i, img_path in enumerate(imgs_paths):
print '\tScanning %s' % img_path
img = cv2.imread(img_path)
bname = basename(splitext(img_path)[0])
R, _ = detect(vehicle_net, vehicle_meta, img, thresh=vehicle_threshold)
R = [r for r in R if r[0] in ['car', 'bus']]
print '\t\t%d cars found' % len(R)
if len(R):
WH = np.array(img.shape[1::-1], dtype=float)
Lcars = []
for i, r in enumerate(R):
cx, cy, w, h = (np.array(r[2]) / np.concatenate((WH, WH))).tolist()
tl = np.array([cx - w / 2., cy - h / 2.])
br = np.array([cx + w / 2., cy + h / 2.])
label = Label(0, tl, br)
Lcars.append(label)
lwrite('%s/%s_cars.txt' % (output_dir, bname), Lcars)
# license plate detection
try:
# colors are BGR in opencv
YELLOW = (0, 255, 255)
RED = (0, 0, 255)
PINK = (232, 28, 232)
input_dir = output_dir
lp_threshold = 0.5
wpod_net_path = "data/lp-detector/wpod-net_update1.h5"
wpod_net = load_model(wpod_net_path)
print 'Searching for license plates using WPOD-NET'
for i, img_path in enumerate(imgs_paths):
print '\t Processing %s' % img_path
bname = splitext(basename(img_path))[0]
img = cv2.imread(img_path)
label_path = '%s/%s_cars.txt' % (output_dir, bname)
plates = []
car_labels = lread(label_path)
# remove the LP position information txt
remove('%s/%s_cars.txt' % (output_dir, bname))
for j, car_label in enumerate(car_labels):
car = crop_region(img, car_label)
ratio = float(max(car.shape[:2])) / min(car.shape[:2])
side = int(ratio * 288.)
bound_dim = min(side + (side % (2 ** 4)), 608)
print "\t\tvehicle %d, Bound dim: %d, ratio: %f" % (j, bound_dim, ratio)
Llp, LlpImgs, _ = detect_lp(wpod_net, im2single(car), bound_dim, 2 ** 4, (240, 80), lp_threshold)
if len(LlpImgs):
Ilp = LlpImgs[0]
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_BGR2GRAY)
Ilp = cv2.cvtColor(Ilp, cv2.COLOR_GRAY2BGR)
s = Shape(Llp[0].pts)
# s.pts is the points for LP, it is a numpy array with shape(2, 4)
# this part is used to reconstruct the coordinates of LP into original image pixel scale
# also append j into the plates_cor to record its corresponding car
pts = s.pts * car_label.wh().reshape(2, 1) + car_label.tl().reshape(2, 1)
ptspx = pts * np.array(img.shape[1::-1], dtype=float).reshape(2, 1)
plates.append([j, ptspx])
# draw_losangle(img, ptspx, RED, 3)
# cv2.imwrite('%s/%s_lp.png' % (output_dir, bname), Ilp * 255.)
# writeShapes('%s/%s_lp.txt' % (output_dir, bname), [s])
# this part is used to detect the overlapped LP
plates_cor = [i[1] for i in plates]
non_overlap_plates = []
cars_processed = []
if len(plates) > 1 and qucal.overlap(np.array(plates_cor)):
FRD_record = open(output_dir + '/%s.txt' % bname, 'w')
for ele in qucal.overlap(np.array(plates_cor)):
print '\t\t\toverlapped LP found:', ele.couple()
FRD_record.write('%s %s\n' % ('overlapped LP found:', ele.couple()))
car_1 = plates[ele.couple()[0]][0]
car_2 = plates[ele.couple()[1]][0]
cars_processed.append(car_1)
cars_processed.append(car_2)
print '\t\t\trelated car:', car_1, 'with', car_2
FRD_record.write('%s %d %s %d\n' % ('related car:', car_1, 'with', car_2))
uni_area = qucal.union_area(np.array([car_labels[car_1].tl(), car_labels[car_1].br()]),
np.array([car_labels[car_2].tl(), car_labels[car_2].br()]))
uni_img = crop_region(img, uni_area)
try:
frs, cate = FRD.fr_detect(uni_img)
fr_lst = []
for fr in frs:
fr_lst.append(Label(tl=fr.tl()*uni_area.wh() + uni_area.tl(),
br=fr.br()*uni_area.wh() + uni_area.tl()))
for k, fr in enumerate(fr_lst):
owner_car = None
if qucal.FRCar(fr, car_labels[car_1]).cover_rate() >= \
qucal.FRCar(fr, car_labels[car_2]).cover_rate():
print '\t\t\tfr:', k, 'car:', car_1, 'has better cover rate'
FRD_record.write('%s %d %s %d %s \n' % ('fr:', k, 'car:', car_1, 'has better cover rate'))
owner_car = car_1
non_overlap_plates.append(ele.larger_plate)
if qucal.overlap(np.array([ele.larger_plate, fr.quadrilateral_format() *
np.array(img.shape[1::-1], dtype=float).reshape(2, 1)])):
print '\t\t\tthis plate belongs to car:', car_1
FRD_record.write('%s %d\n' % ('this plate belongs to car:', car_1))
else:
print '\t\t\tfr:', k, 'car:', car_2, 'has better cover rate'
FRD_record.write('%s %d %s %d %s \n' % ('fr:', k, 'car:', car_2, 'has better cover rate'))
owner_car = car_2
non_overlap_plates.append(ele.larger_plate)
if qucal.overlap(np.array([ele.larger_plate, fr.quadrilateral_format() *
np.array(img.shape[1::-1], dtype=float).reshape(2, 1)])):
print '\t\t\tthis plate belongs to car:', car_2
FRD_record.write('%s %d\n' % ('this plate belongs to car:', car_2))
# draw front & rear BB
draw_bb(img, fr, cate=cate[k], index=str(owner_car), text_color=(255, 255, 255))
except:
traceback.print_exc()
FRD_record.close()
# put the other plates into the list
for plate in plates:
if plate[0] in cars_processed:
continue
else:
non_overlap_plates.append(plate[1])
for plate_cor in non_overlap_plates:
# draw plates
draw_losangle(img, plate_cor, RED, 3)
for j, car_label in enumerate(car_labels):
# draw car BB
draw_bb(img, car_label, cate='car', index=str(j), bg_color=YELLOW, text_color=(0, 0, 0))
cv2.imwrite('%s/%s_output.png' % (output_dir, bname), img)
except:
traceback.print_exc()
sys.exit(1)
sys.exit(0)
| 31.759091 | 104 | 0.649778 |
68e74caf269bc6c301232411e0ce4f6664e07a4c | 3,167 | py | Python | api/rateyard_api/admin/__init__.py | scriptium/rateyard | 4ac72b9bd5cabea461bd7941dd1ad28d4d08003d | [
"MIT"
] | 7 | 2021-01-15T18:01:13.000Z | 2021-04-03T22:00:24.000Z | api/rateyard_api/admin/__init__.py | scriptium/rateyard | 4ac72b9bd5cabea461bd7941dd1ad28d4d08003d | [
"MIT"
] | null | null | null | api/rateyard_api/admin/__init__.py | scriptium/rateyard | 4ac72b9bd5cabea461bd7941dd1ad28d4d08003d | [
"MIT"
] | null | null | null | from functools import wraps
from flask import Blueprint, jsonify
from flask_jwt_extended import verify_jwt_in_request, get_jwt_identity
import db
def admin_token_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
verify_jwt_in_request()
identity = get_jwt_identity()
if identity['type'] != 'admin':
return jsonify(msg='Admins only!'), 403
else:
return fn(*args, **kwargs)
return wrapper
bp = Blueprint("admin", __name__)
@bp.route("check_token", methods=("GET",))
@admin_token_required
def check_token():
return jsonify(msg='ok')
from . import teachers
from . import groups
from . import students
from . import classes
from . import subjects
bp.add_url_rule("/create_students", methods=("POST", ), view_func=students.create_students)
bp.add_url_rule("/delete_students", methods=("POST", ), view_func=students.delete_students)
bp.add_url_rule("/edit_students", methods=("POST", ), view_func=students.edit_students)
bp.add_url_rule("/get_students", methods=("POST", ), view_func=students.get_students)
bp.add_url_rule("/import_from_excel", methods=("POST", ), view_func=students.import_from_excel)
bp.add_url_rule("/get_classes_short", methods=("GET", ), view_func=classes.get_classes_short)
bp.add_url_rule("/get_class_full", methods=("POST", ), view_func=classes.get_class_full)
bp.add_url_rule("/delete_students_from_class", methods=("POST", ), view_func=classes.delete_students_from_class)
bp.add_url_rule("/move_students_to_class", methods=("POST", ), view_func=classes.move_students_to_class)
bp.add_url_rule("/create_class", methods=("POST", ), view_func=classes.create_class)
bp.add_url_rule("/edit_class", methods=("POST", ), view_func=classes.edit_class)
bp.add_url_rule("/delete_class", methods=("POST", ), view_func=classes.delete_class)
bp.add_url_rule("/get_groups_short", methods=("POST", "GET"), view_func=groups.get_groups_short)
bp.add_url_rule("/get_group_full", methods=("POST", ), view_func=groups.get_group_full)
bp.add_url_rule("/create_group", methods=("POST", ), view_func=groups.create_group)
bp.add_url_rule("/edit_group", methods=("POST", ), view_func=groups.edit_group)
bp.add_url_rule("/delete_group", methods=("POST", ), view_func=groups.delete_group)
bp.add_url_rule("/create_teachers", methods=("POST", ), view_func=teachers.create_teachers)
bp.add_url_rule("/get_teachers", methods=("POST", ), view_func=teachers.get_teachers)
bp.add_url_rule("/delete_teachers", methods=("POST", ), view_func=teachers.delete_teachers)
bp.add_url_rule("/edit_teachers", methods=("POST", ), view_func=teachers.edit_teachers)
bp.add_url_rule("/create_lecturer", methods=("POST", ), view_func=teachers.create_lecturer)
bp.add_url_rule("/delete_lecturer", methods=("POST", ), view_func=teachers.delete_lecturer)
bp.add_url_rule("/create_subject", methods=("POST", ), view_func=subjects.create_subject)
bp.add_url_rule("/get_subjects", methods=("GET", ), view_func=subjects.get_subjects)
bp.add_url_rule("/delete_subjects", methods=("POST", ), view_func=subjects.delete_subjects)
bp.add_url_rule("/edit_subjects", methods=("POST", ), view_func=subjects.edit_subjects)
| 48.723077 | 112 | 0.755605 |
8343a584e0b48e1cf42cb5d038316f1f0f217d9b | 13,649 | py | Python | release/stubs.min/System/Windows/Media/Animation_parts/CharKeyFrameCollection.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/Windows/Media/Animation_parts/CharKeyFrameCollection.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | release/stubs.min/System/Windows/Media/Animation_parts/CharKeyFrameCollection.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | class CharKeyFrameCollection(Freezable, ISealable, IList, ICollection, IEnumerable):
"""
Represents a collection of System.Windows.Media.Animation.CharKeyFrame objects.
CharKeyFrameCollection()
"""
def Add(self, keyFrame):
"""
Add(self: CharKeyFrameCollection,keyFrame: CharKeyFrame) -> int
Adds a System.Windows.Media.Animation.CharKeyFrame to the end of the collection.
keyFrame: The System.Windows.Media.Animation.CharKeyFrame to add to the end of the collection.
Returns: The index at which the keyFrame was added.
"""
pass
def Clear(self):
"""
Clear(self: CharKeyFrameCollection)
Removes all System.Windows.Media.Animation.CharKeyFrame objects from the collection.
"""
pass
def Clone(self):
"""
Clone(self: CharKeyFrameCollection) -> CharKeyFrameCollection
Creates a modifiable clone of this System.Windows.Media.Animation.CharKeyFrameCollection,making
deep copies of this object's values. When copying dependency properties,this method copies
resource references and data bindings (but they might no longer resolve) but not animations or
their current values.
Returns: A modifiable clone of the current object. The cloned object's System.Windows.Freezable.IsFrozen
property will be false even if the source's System.Windows.Freezable.IsFrozen property was true.
"""
pass
def CloneCore(self, *args):
"""
CloneCore(self: CharKeyFrameCollection,sourceFreezable: Freezable)
Makes this instance a deep copy of the specified
System.Windows.Media.Animation.CharKeyFrameCollection. When copying dependency properties,this
method copies resource references and data bindings (but they might no longer resolve) but not
animations or their current values.
sourceFreezable: The System.Windows.Media.Animation.CharKeyFrameCollection to clone.
"""
pass
def CloneCurrentValueCore(self, *args):
"""
CloneCurrentValueCore(self: CharKeyFrameCollection,sourceFreezable: Freezable)
Makes this instance a modifiable deep copy of the specified
System.Windows.Media.Animation.CharKeyFrameCollection using current property values. Resource
references,data bindings,and animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.CharKeyFrameCollection to clone.
"""
pass
def Contains(self, keyFrame):
"""
Contains(self: CharKeyFrameCollection,keyFrame: CharKeyFrame) -> bool
Gets a value that indicates whether the collection contains the specified
System.Windows.Media.Animation.CharKeyFrame.
keyFrame: The System.Windows.Media.Animation.CharKeyFrame to locate in the collection.
Returns: true if the collection contains keyFrame; otherwise,false.
"""
pass
def CopyTo(self, array, index):
"""
CopyTo(self: CharKeyFrameCollection,array: Array[CharKeyFrame],index: int)
Copies all of the System.Windows.Media.Animation.CharKeyFrame objects in a collection to a
specified array.
array: Identifies the array to which content is copied.
index: Index position in the array to which the contents of the collection are copied.
"""
pass
def CreateInstance(self, *args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self, *args):
"""
CreateInstanceCore(self: CharKeyFrameCollection) -> Freezable
Creates a new,frozen instance of System.Windows.Media.Animation.CharKeyFrameCollection.
Returns: A frozen instance of System.Windows.Media.Animation.CharKeyFrameCollection.
"""
pass
def FreezeCore(self, *args):
"""
FreezeCore(self: CharKeyFrameCollection,isChecking: bool) -> bool
Makes this System.Windows.Media.Animation.CharKeyFrameCollection read-only or determines whether
it can be made read-only.
isChecking: true if this method should simply determine whether this instance can be frozen. false if this
instance should actually freeze itself when this method is called.
Returns: If isChecking is true,this method returns true if this instance can be made read-only,or false
if it cannot be made read-only. If isChecking is false,this method returns true if this
instance is now read-only,or false if it cannot be made read-only,with the side effect of
having begun to change the frozen status of this object.
"""
pass
def GetAsFrozenCore(self, *args):
"""
GetAsFrozenCore(self: CharKeyFrameCollection,sourceFreezable: Freezable)
Makes this instance a clone of the specified
System.Windows.Media.Animation.CharKeyFrameCollection object.
sourceFreezable: The System.Windows.Media.Animation.CharKeyFrameCollection object to clone and freeze.
"""
pass
def GetCurrentValueAsFrozenCore(self, *args):
"""
GetCurrentValueAsFrozenCore(self: CharKeyFrameCollection,sourceFreezable: Freezable)
Makes this instance a frozen clone of the specified
System.Windows.Media.Animation.CharKeyFrameCollection. Resource references,data bindings,and
animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.CharKeyFrameCollection to copy and freeze.
"""
pass
def GetEnumerator(self):
"""
GetEnumerator(self: CharKeyFrameCollection) -> IEnumerator
Returns an enumerator that can iterate through the collection.
Returns: An System.Collections.IEnumerator that can iterate through the collection.
"""
pass
def IndexOf(self, keyFrame):
"""
IndexOf(self: CharKeyFrameCollection,keyFrame: CharKeyFrame) -> int
Searches for the specified System.Windows.Media.Animation.CharKeyFrame and returns the
zero-based index of the first occurrence within the entire collection.
keyFrame: The System.Windows.Media.Animation.CharKeyFrame to locate in the collection.
Returns: The zero-based index of the first occurrence of keyFrame within the entire collection,if found;
otherwise,-1.
"""
pass
def Insert(self, index, keyFrame):
"""
Insert(self: CharKeyFrameCollection,index: int,keyFrame: CharKeyFrame)
Inserts a System.Windows.Media.Animation.CharKeyFrame into a specific location within the
collection.
index: The index position at which the System.Windows.Media.Animation.CharKeyFrame is inserted.
keyFrame: The System.Windows.Media.Animation.CharKeyFrame object to insert in the collection.
"""
pass
def OnChanged(self, *args):
"""
OnChanged(self: Freezable)
Called when the current System.Windows.Freezable object is modified.
"""
pass
def OnFreezablePropertyChanged(self, *args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure and is not
intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self, *args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing
dependency property of type System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old and new values.
"""
pass
def ReadPreamble(self, *args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of
System.Windows.Freezable must call this method at the beginning of any API that reads data
members that are not dependency properties.
"""
pass
def Remove(self, keyFrame):
"""
Remove(self: CharKeyFrameCollection,keyFrame: CharKeyFrame)
Removes a System.Windows.Media.Animation.CharKeyFrame object from the collection.
keyFrame: Identifies the System.Windows.Media.Animation.CharKeyFrame to remove from the collection.
"""
pass
def RemoveAt(self, index):
"""
RemoveAt(self: CharKeyFrameCollection,index: int)
Removes the System.Windows.Media.Animation.CharKeyFrame at the specified index position from the
collection.
index: Index position of the System.Windows.Media.Animation.CharKeyFrame to be removed.
"""
pass
def ShouldSerializeProperty(self, *args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def WritePostscript(self, *args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes
its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable
should call this method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self, *args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a
valid threading context. System.Windows.Freezable inheritors should call this method at the
beginning of any API that writes to data members that are not dependency properties.
"""
pass
def __add__(self, *args):
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, *args):
"""
__contains__(self: IList,value: object) -> bool
Determines whether the System.Collections.IList contains a specific value.
value: The object to locate in the System.Collections.IList.
Returns: true if the System.Object is found in the System.Collections.IList; otherwise,false.
"""
pass
def __getitem__(self, *args):
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self, *args):
""" x.__len__() <==> len(x) """
pass
def __setitem__(self, *args):
""" x.__setitem__(i,y) <==> x[i]= """
pass
Count = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the number of key frames contained in the System.Windows.Media.Animation.CharKeyFrameCollection.
Get: Count(self: CharKeyFrameCollection) -> int
"""
IsFixedSize = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value that indicates if the collection size can ever change.
Get: IsFixedSize(self: CharKeyFrameCollection) -> bool
"""
IsReadOnly = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value that indicates if the collection is read-only.
Get: IsReadOnly(self: CharKeyFrameCollection) -> bool
"""
IsSynchronized = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets a value that indicates whether access to the collection is synchronized (thread-safe).
Get: IsSynchronized(self: CharKeyFrameCollection) -> bool
"""
SyncRoot = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets an object that can be used to synchronize access to the collection.
Get: SyncRoot(self: CharKeyFrameCollection) -> object
"""
Empty = None
| 26.14751 | 221 | 0.683127 |
3062ffa07024e124ab195f916b75a8af60f431cb | 437 | py | Python | ObjectOrientedProgramming/classes.py | theprogrammingthinker/Python-practice | fef11a7fbd5082a0614b01f88a13ea29d68860bf | [
"Unlicense"
] | 1 | 2017-05-02T10:28:36.000Z | 2017-05-02T10:28:36.000Z | ObjectOrientedProgramming/classes.py | theprogrammingthinker/Python-practice | fef11a7fbd5082a0614b01f88a13ea29d68860bf | [
"Unlicense"
] | null | null | null | ObjectOrientedProgramming/classes.py | theprogrammingthinker/Python-practice | fef11a7fbd5082a0614b01f88a13ea29d68860bf | [
"Unlicense"
] | null | null | null | class Sample(object):
pass
x = Sample()
print(type(x))
class Dog(object):
# Class object Attribute
species = 'mamal'
def __init__(self, breed, name, fur = True):
self.breed = breed
self.name = name
self.fur = fur
sam = Dog(breed="lab", name='sas')
print(sam)
print(sam.breed)
print(sam.name)
print(sam.species)
print(sam.fur)
dmm = Dog(breed="lab", name='sas', fur=False)
print(dmm.fur)
| 14.566667 | 48 | 0.620137 |
aea090f8ddab98a8f8c4d143f52b9a9293e3ef99 | 6,866 | py | Python | manager/projects/ui/urls.py | jlbrewe/hub | c737669e6493ad17536eaa240bed3394b20c6b7d | [
"Apache-2.0"
] | null | null | null | manager/projects/ui/urls.py | jlbrewe/hub | c737669e6493ad17536eaa240bed3394b20c6b7d | [
"Apache-2.0"
] | null | null | null | manager/projects/ui/urls.py | jlbrewe/hub | c737669e6493ad17536eaa240bed3394b20c6b7d | [
"Apache-2.0"
] | null | null | null | from django.urls import include, path, re_path
from manager.paths import RootPaths
from projects.paths import ProjectPaths
from projects.ui.views import files, jobs, projects, reviews, snapshots, sources
# URLs that must go before `accounts.ui.urls`
before_account_urls = [
path(RootPaths.projects.value + "/", projects.list, name="ui-projects-list"),
path(
RootPaths.projects.value + "/new/", projects.create, name="ui-projects-create",
),
path("open/", projects.open, name="ui-projects-open",),
# Redirect /projects/<project_id>/<rest> to <account_name>/<project_name>
re_path(
RootPaths.projects.value + r"/(?P<project>\d+)/(?P<rest>.*)",
projects.redirect,
name="ui-projects-redirect",
),
# Redirect /<account_name>/<project_id>/<rest> to <account_name>/<project_name>
re_path(
r"^(?P<account>[^/]+)/(?P<project>\d+)/(?P<rest>.*)",
projects.redirect,
name="ui-projects-redirect",
),
]
# URLs that must go after `accounts.ui.urls`
after_account_urls = [
path(
"<slug:account>/<slug:project>/",
include(
[
path("", projects.retrieve, name="ui-projects-retrieve",),
path(
ProjectPaths.claim.value + "/",
projects.claim,
name="ui-projects-claim",
),
path(
ProjectPaths.sharing.value + "/",
projects.sharing,
name="ui-projects-sharing",
),
path(
ProjectPaths.settings.value + "/",
projects.update,
name="ui-projects-update",
),
path(
ProjectPaths.image.value + "/",
projects.image,
name="ui-projects-image",
),
path(
ProjectPaths.files.value + "/",
include(
[
re_path(
r"(?P<file>.+?)!upload",
files.upload,
name="ui-projects-files-upload",
),
re_path(
r"(?P<file>.+?)!convert",
files.convert,
name="ui-projects-files-convert",
),
re_path(
r"(?P<file>.+?)!delete",
files.destroy,
name="ui-projects-files-destroy",
),
re_path(
r"(?P<file>.+?)!details",
files.retrieve,
name="ui-projects-files-retrieve",
),
re_path(
r"(?P<file>.+?)!highlight",
files.highlight,
name="ui-projects-files-highlight",
),
re_path(
r"(?P<prefix>.*)?",
files.list,
name="ui-projects-files-list",
),
]
),
),
path(
ProjectPaths.sources.value + "/",
include(
[
path("", sources.list, name="ui-projects-sources-list",),
path(
"new/<str:type>",
sources.create,
name="ui-projects-sources-create",
),
path(
"upload",
sources.upload,
name="ui-projects-sources-upload",
),
re_path(
r"(?P<source>.+?)!rename",
sources.rename,
name="ui-projects-sources-rename",
),
re_path(
r"(?P<source>.+?)!delete",
sources.destroy,
name="ui-projects-sources-destroy",
),
re_path(
r"(?P<source>.+)",
sources.retrieve,
name="ui-projects-sources-retrieve",
),
]
),
),
path(
ProjectPaths.snapshots.value + "/",
include(
[
path(
"", snapshots.list, name="ui-projects-snapshots-list",
),
path(
"<int:snapshot>",
snapshots.retrieve,
name="ui-projects-snapshots-retrieve",
),
]
),
),
path(
ProjectPaths.reviews.value + "/",
include(
[
path("", reviews.list, name="ui-projects-reviews-list"),
path(
"<int:review>",
reviews.retrieve,
name="ui-projects-reviews-retrieve",
),
path(
"new",
reviews.create,
name="ui-projects-reviews-create",
),
]
),
),
path(
ProjectPaths.jobs.value + "/",
include(
[
path("", jobs.list, name="ui-projects-jobs-list"),
path(
"<str:job>",
jobs.retrieve,
name="ui-projects-jobs-retrieve",
),
]
),
),
]
),
)
]
| 39.234286 | 87 | 0.318526 |
69f81cf9a9a8798bd87a26eb702d18a49dcda138 | 2,337 | py | Python | rmgweb/rmg/urls.py | KEHANG/RMG-website | a3b8cdf4b1fb4b47c584f03fac7d72f9ee6d038c | [
"X11",
"Unlicense",
"MIT"
] | null | null | null | rmgweb/rmg/urls.py | KEHANG/RMG-website | a3b8cdf4b1fb4b47c584f03fac7d72f9ee6d038c | [
"X11",
"Unlicense",
"MIT"
] | null | null | null | rmgweb/rmg/urls.py | KEHANG/RMG-website | a3b8cdf4b1fb4b47c584f03fac7d72f9ee6d038c | [
"X11",
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG Website - A Django-powered website for Reaction Mechanism Generator
#
# Copyright (c) 2011 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
from django.conf.urls.defaults import *
urlpatterns = patterns('rmgweb.rmg',
# RMG Simulation Homepage
(r'^$', 'views.index'),
# Convert Chemkin File to Output File
(r'^chemkin','views.convertChemkin'),
# Compare 2 RMG Models
(r'^compare','views.compareModels'),
# Compare 2 RMG Models
(r'^adjlist_conversion','views.convertAdjlists'),
# Merge 2 RMG Models
(r'^merge_models','views.mergeModels'),
# Generate Flux Diagram
(r'^flux','views.generateFlux'),
# Populate Reactions with an Input File
(r'^populate_reactions','views.runPopulateReactions'),
# Plot Kinetics
(r'^plot_kinetics', 'views.plotKinetics'),
# Generate RMG-Java Kinetics Library
(r'^java_kinetics_library', 'views.javaKineticsLibrary'),
# Evaluate NASA Polynomial
(r'^evaluate_nasa', 'views.evaluateNASA')
)
| 34.880597 | 80 | 0.668378 |
7b8db5481df114ec12bf395944d8927f0d6f3ec3 | 10,332 | py | Python | mantel/_test.py | Mancheron/mantel | 3c6e0ce02225cbe073d4dc935f788cb26f2c3f0f | [
"MIT"
] | 20 | 2015-03-16T17:23:55.000Z | 2020-10-13T19:58:19.000Z | mantel/_test.py | Mancheron/mantel | 3c6e0ce02225cbe073d4dc935f788cb26f2c3f0f | [
"MIT"
] | 5 | 2015-05-16T14:25:19.000Z | 2020-08-16T12:23:05.000Z | mantel/_test.py | Mancheron/mantel | 3c6e0ce02225cbe073d4dc935f788cb26f2c3f0f | [
"MIT"
] | 15 | 2015-05-29T23:56:40.000Z | 2021-01-06T06:49:39.000Z | from itertools import permutations
import numpy as np
from scipy import spatial, stats
def test(X, Y, perms=10000, method="pearson", tail="two-tail", ignore_nans=False):
"""
Takes two distance matrices (either redundant matrices or condensed vectors)
and performs a Mantel test. The Mantel test is a significance test of the
correlation between two distance matrices.
Parameters
----------
X : array_like
First distance matrix (condensed or redundant).
Y : array_like
Second distance matrix (condensed or redundant), where the order of
elements corresponds to the order of elements in the first matrix.
perms : int, optional
The number of permutations to perform (default: 10000). A larger
number gives more reliable results but takes longer to run. If the
number of possible permutations is smaller, all permutations will
be tested. This can be forced by setting perms to 0.
method : str, optional
Type of correlation coefficient to use; either 'pearson' or 'spearman'
(default: 'pearson').
tail : str, optional
Which tail to test in the calculation of the empirical p-value; either
'upper', 'lower', or 'two-tail' (default: 'two-tail').
ignore_nans : bool, optional
Ignore NaN values in the Y matrix (default: False). This can be
useful if you have missing values in one of the matrices.
Returns
-------
r : float
Veridical correlation
p : float
Empirical p-value
z : float
Standard score (z-score)
"""
# Ensure that X and Y are represented as Numpy arrays.
X = np.asarray(X)
Y = np.asarray(Y)
# Check that X and Y are valid distance matrices.
if (
spatial.distance.is_valid_dm(np.nan_to_num(X)) == False
and spatial.distance.is_valid_y(X) == False
):
raise ValueError("X is not a valid condensed or redundant distance matrix")
if (
spatial.distance.is_valid_dm(np.nan_to_num(Y)) == False
and spatial.distance.is_valid_y(Y) == False
):
raise ValueError("Y is not a valid condensed or redundant distance matrix")
# If X or Y is a redundant distance matrix, reduce it to a condensed distance matrix.
if len(X.shape) == 2:
X = spatial.distance.squareform(X, force="tovector", checks=False)
if len(Y.shape) == 2:
Y = spatial.distance.squareform(Y, force="tovector", checks=False)
# Check for size equality.
if len(X) != len(Y):
raise ValueError("X and Y are not of equal size")
# Check for minimum size.
if len(X) < 3:
raise ValueError("X and Y should represent at least 3 objects")
# Check finiteness of X and Y
if not np.isfinite(X).all():
raise ValueError(
"X cannot contain NaNs (but Y may contain NaNs, so consider reordering X and Y)"
)
finite_Y = np.isfinite(Y)
if not ignore_nans and not finite_Y.all():
raise ValueError('Y may contain NaNs, but "ignore_nans" must be set to True')
if ignore_nans and finite_Y.all():
ignore_nans = False # ignore_nans is True but Y contains no nans
# If Spearman correlation is requested, convert X and Y to ranks.
method = method.lower()
if method == "spearman":
X, Y = stats.rankdata(X), stats.rankdata(Y)
Y[~finite_Y] = np.nan # retain any nans, so that these can be ignored later
# Check for valid method parameter.
elif method != "pearson":
raise ValueError('The method should be set to "pearson" or "spearman"')
# Check for valid tail parameter.
tail = tail.lower()
if tail not in ["upper", "lower", "two-tail"]:
raise ValueError('The tail should be set to "upper", "lower", or "two-tail"')
# Now we're ready to start the Mantel test using a number of optimizations:
#
# 1. Rather than compute correlation coefficients, we'll just compute the
# covariances. This works because the denominator in the equation for the
# correlation coefficient will yield the same result however the objects
# are permuted, making it redundant. Removing the denominator leaves us
# with the covariance.
#
# 2. Rather than permute the Y distances and derive the residuals to calculate
# the covariance with the X distances, we'll represent the Y residuals in
# the matrix and shuffle those directly.
#
# 3. If the number of possible permutations is less than the number of
# permutations that were requested, we'll run a deterministic test where
# we try all possible permutations rather than sample the permutation
# space. This gives a faster, deterministic result.
# Calculate the X and Y residuals, which will be used to compute the
# covariance under each permutation.
X_residuals = X - np.mean(X[finite_Y])
Y_residuals = Y - np.mean(Y[finite_Y])
# Expand the Y residuals to a redundant matrix.
Y_residuals_as_matrix = spatial.distance.squareform(
Y_residuals, force="tomatrix", checks=False
)
m = len(Y_residuals_as_matrix) # number of objects
n = np.math.factorial(m) # number of possible matrix permutations
# If the number of requested permutations is greater than the number of
# possible permutations (m!) or the perms parameter is set to 0, then run a
# deterministic Mantel test
if perms >= n or perms == 0:
if ignore_nans:
correlations = deterministic_test_with_nans(m, n, X, Y_residuals_as_matrix)
else:
correlations = deterministic_test(m, n, X_residuals, Y_residuals_as_matrix)
# correlations[0] is the veridical correlation
else:
if ignore_nans:
correlations = stochastic_test_with_nans(m, perms, X, Y_residuals_as_matrix)
else:
correlations = stochastic_test(m, perms, X_residuals, Y_residuals_as_matrix)
correlations[0] = sum(X_residuals[finite_Y] * Y_residuals[finite_Y]) / np.sqrt(
sum(X_residuals[finite_Y] ** 2) * sum(Y_residuals[finite_Y] ** 2)
) # compute veridical correlation and place in positon 0
r = correlations[0]
if tail == "upper":
p = sum(correlations >= r) / len(correlations)
elif tail == "lower":
p = sum(correlations <= r) / len(correlations)
elif tail == "two-tail":
p = sum(abs(correlations) >= abs(r)) / len(correlations)
z = (r - np.mean(correlations)) / np.std(correlations)
return r, p, z
def deterministic_test(m, n, X_residuals, Y_residuals_as_matrix):
Y_residuals_permuted = np.zeros((m ** 2 - m) // 2)
covariances = np.zeros(n)
for i, order in enumerate(permutations(range(m))):
Y_residuals_as_matrix_permuted = Y_residuals_as_matrix[order, :][:, order]
spatial.distance._distance_wrap.to_vector_from_squareform_wrap(
Y_residuals_as_matrix_permuted, Y_residuals_permuted
)
covariances[i] = (X_residuals * Y_residuals_permuted).sum()
denominator = np.sqrt(sum(X_residuals ** 2) * sum(Y_residuals_permuted ** 2))
return covariances / denominator
def deterministic_test_with_nans(m, n, X, Y_residuals_as_matrix):
Y_residuals_permuted = np.zeros((m ** 2 - m) // 2)
correlations = np.zeros(n)
for i, order in enumerate(permutations(range(m))):
Y_residuals_as_matrix_permuted = Y_residuals_as_matrix[order, :][:, order]
spatial.distance._distance_wrap.to_vector_from_squareform_wrap(
Y_residuals_as_matrix_permuted, Y_residuals_permuted
)
# Since on each permutation we will be ignoring different values in X,
# the X_residuals need to be recomputed each time depending on which
# values in permuted Y are finite.
finite_Y_permuted = np.isfinite(Y_residuals_permuted)
reduced_X = X[finite_Y_permuted]
reduced_X_residuals = reduced_X - reduced_X.mean()
reduced_Y_residuals = Y_residuals_permuted[finite_Y_permuted]
covariance = (reduced_X_residuals * reduced_Y_residuals).sum()
# The denominator will be different on each permutation
denominator = np.sqrt(
sum(reduced_X_residuals ** 2) * sum(reduced_Y_residuals ** 2)
)
correlations[i] = covariance / denominator
return correlations
def stochastic_test(m, n, X_residuals, Y_residuals_as_matrix):
Y_residuals_permuted = np.zeros((m ** 2 - m) // 2)
covariances = np.zeros(n)
order = np.arange(m)
for i in range(1, n):
np.random.shuffle(order)
Y_residuals_as_matrix_permuted = Y_residuals_as_matrix[order, :][:, order]
spatial.distance._distance_wrap.to_vector_from_squareform_wrap(
Y_residuals_as_matrix_permuted, Y_residuals_permuted
)
covariances[i] = (X_residuals * Y_residuals_permuted).sum()
denominator = np.sqrt(sum(X_residuals ** 2) * sum(Y_residuals_permuted ** 2))
return covariances / denominator
def stochastic_test_with_nans(m, n, X, Y_residuals_as_matrix):
Y_residuals_permuted = np.zeros((m ** 2 - m) // 2)
correlations = np.zeros(n)
order = np.arange(m)
for i in range(1, n):
np.random.shuffle(order)
Y_residuals_as_matrix_permuted = Y_residuals_as_matrix[order, :][:, order]
spatial.distance._distance_wrap.to_vector_from_squareform_wrap(
Y_residuals_as_matrix_permuted, Y_residuals_permuted
)
# Since on each permutation we will be ignoring different values in X,
# the X_residuals need to be recomputed each time depending on which
# values in permuted Y are finite.
finite_Y_permuted = np.isfinite(Y_residuals_permuted)
reduced_X = X[finite_Y_permuted]
reduced_X_residuals = reduced_X - reduced_X.mean()
reduced_Y_residuals = Y_residuals_permuted[finite_Y_permuted]
covariance = (reduced_X_residuals * reduced_Y_residuals).sum()
# The denominator will be different on each permutation
denominator = np.sqrt(
sum(reduced_X_residuals ** 2) * sum(reduced_Y_residuals ** 2)
)
correlations[i] = covariance / denominator
return correlations
| 43.230126 | 92 | 0.670248 |
7278b8e5e098280d153cdea69b18e30259427db0 | 443 | py | Python | Qshop/Seller/migrations/0007_goodstype_type_picture.py | songdanlee/DjangoWorkSpace | 5dea8601f21f5408797a8801f74b76c696a33d83 | [
"MIT"
] | null | null | null | Qshop/Seller/migrations/0007_goodstype_type_picture.py | songdanlee/DjangoWorkSpace | 5dea8601f21f5408797a8801f74b76c696a33d83 | [
"MIT"
] | 1 | 2021-05-10T11:45:52.000Z | 2021-05-10T11:45:52.000Z | Qshop/Seller/migrations/0007_goodstype_type_picture.py | songdanlee/DjangoWorkSpace | 5dea8601f21f5408797a8801f74b76c696a33d83 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.8 on 2019-09-09 22:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Seller', '0006_auto_20190909_2131'),
]
operations = [
migrations.AddField(
model_name='goodstype',
name='type_picture',
field=models.ImageField(default='seller/images/banner01.jpg', upload_to='seller/images'),
),
]
| 23.315789 | 101 | 0.625282 |
42ae7677e20427de66b90c47fd5c9cca03bd5396 | 6,597 | py | Python | automl/google/cloud/automl_v1beta1/proto/video_pb2.py | olumighty1/google-cloud-python | 021e3fbef2fec6af38182bd37b5609022898f5ae | [
"Apache-2.0"
] | null | null | null | automl/google/cloud/automl_v1beta1/proto/video_pb2.py | olumighty1/google-cloud-python | 021e3fbef2fec6af38182bd37b5609022898f5ae | [
"Apache-2.0"
] | null | null | null | automl/google/cloud/automl_v1beta1/proto/video_pb2.py | olumighty1/google-cloud-python | 021e3fbef2fec6af38182bd37b5609022898f5ae | [
"Apache-2.0"
] | 1 | 2021-07-21T17:59:33.000Z | 2021-07-21T17:59:33.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/automl_v1beta1/proto/video.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.cloud.automl_v1beta1.proto import (
classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2,
)
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/automl_v1beta1/proto/video.proto",
package="google.cloud.automl.v1beta1",
syntax="proto3",
serialized_options=_b(
"\n\037com.google.cloud.automl.v1beta1B\nVideoProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1"
),
serialized_pb=_b(
'\n-google/cloud/automl_v1beta1/proto/video.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto"$\n"VideoClassificationDatasetMetadata"$\n"VideoObjectTrackingDatasetMetadata""\n VideoClassificationModelMetadata""\n VideoObjectTrackingModelMetadataB\x90\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nVideoProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1b\x06proto3'
),
dependencies=[
google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_VIDEOCLASSIFICATIONDATASETMETADATA = _descriptor.Descriptor(
name="VideoClassificationDatasetMetadata",
full_name="google.cloud.automl.v1beta1.VideoClassificationDatasetMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=164,
serialized_end=200,
)
_VIDEOOBJECTTRACKINGDATASETMETADATA = _descriptor.Descriptor(
name="VideoObjectTrackingDatasetMetadata",
full_name="google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=202,
serialized_end=238,
)
_VIDEOCLASSIFICATIONMODELMETADATA = _descriptor.Descriptor(
name="VideoClassificationModelMetadata",
full_name="google.cloud.automl.v1beta1.VideoClassificationModelMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=240,
serialized_end=274,
)
_VIDEOOBJECTTRACKINGMODELMETADATA = _descriptor.Descriptor(
name="VideoObjectTrackingModelMetadata",
full_name="google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadata",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=276,
serialized_end=310,
)
DESCRIPTOR.message_types_by_name[
"VideoClassificationDatasetMetadata"
] = _VIDEOCLASSIFICATIONDATASETMETADATA
DESCRIPTOR.message_types_by_name[
"VideoObjectTrackingDatasetMetadata"
] = _VIDEOOBJECTTRACKINGDATASETMETADATA
DESCRIPTOR.message_types_by_name[
"VideoClassificationModelMetadata"
] = _VIDEOCLASSIFICATIONMODELMETADATA
DESCRIPTOR.message_types_by_name[
"VideoObjectTrackingModelMetadata"
] = _VIDEOOBJECTTRACKINGMODELMETADATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VideoClassificationDatasetMetadata = _reflection.GeneratedProtocolMessageType(
"VideoClassificationDatasetMetadata",
(_message.Message,),
dict(
DESCRIPTOR=_VIDEOCLASSIFICATIONDATASETMETADATA,
__module__="google.cloud.automl_v1beta1.proto.video_pb2",
__doc__="""Dataset metadata specific to video classification. All Video
Classification datasets are treated as multi label.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationDatasetMetadata)
),
)
_sym_db.RegisterMessage(VideoClassificationDatasetMetadata)
VideoObjectTrackingDatasetMetadata = _reflection.GeneratedProtocolMessageType(
"VideoObjectTrackingDatasetMetadata",
(_message.Message,),
dict(
DESCRIPTOR=_VIDEOOBJECTTRACKINGDATASETMETADATA,
__module__="google.cloud.automl_v1beta1.proto.video_pb2",
__doc__="""Dataset metadata specific to video object tracking.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadata)
),
)
_sym_db.RegisterMessage(VideoObjectTrackingDatasetMetadata)
VideoClassificationModelMetadata = _reflection.GeneratedProtocolMessageType(
"VideoClassificationModelMetadata",
(_message.Message,),
dict(
DESCRIPTOR=_VIDEOCLASSIFICATIONMODELMETADATA,
__module__="google.cloud.automl_v1beta1.proto.video_pb2",
__doc__="""Model metadata specific to video classification.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationModelMetadata)
),
)
_sym_db.RegisterMessage(VideoClassificationModelMetadata)
VideoObjectTrackingModelMetadata = _reflection.GeneratedProtocolMessageType(
"VideoObjectTrackingModelMetadata",
(_message.Message,),
dict(
DESCRIPTOR=_VIDEOOBJECTTRACKINGMODELMETADATA,
__module__="google.cloud.automl_v1beta1.proto.video_pb2",
__doc__="""Model metadata specific to video object tracking.
""",
# @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadata)
),
)
_sym_db.RegisterMessage(VideoObjectTrackingModelMetadata)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 34.721053 | 530 | 0.771411 |
1166a8568b13f1cbdded140cbd55920e72413d6c | 341 | py | Python | pinry/core/tests/forms.py | Stackato-Apps/pinry | 0404f3deaeb4221357f9e34bc04d5283d580fbec | [
"BSD-2-Clause"
] | 1 | 2015-02-02T02:48:12.000Z | 2015-02-02T02:48:12.000Z | pinry/core/tests/forms.py | wids-eria/pinry | bde1aa109c162a24fca0d41d3b5cadf0c433c21b | [
"BSD-2-Clause"
] | 7 | 2016-07-19T00:19:21.000Z | 2016-07-19T09:06:13.000Z | pinry/core/tests/forms.py | wids-eria/pinry | bde1aa109c162a24fca0d41d3b5cadf0c433c21b | [
"BSD-2-Clause"
] | 1 | 2019-02-03T03:36:19.000Z | 2019-02-03T03:36:19.000Z | from django.test import TestCase
from ..forms import ImageForm
__all__ = ['ImageFormTest']
class ImageFormTest(TestCase):
def test_image_field_prefix(self):
"""Assert that the image field has a proper name"""
form = ImageForm()
self.assertInHTML("<input id='id_qqfile' name='qqfile' type='file' />", str(form)) | 31 | 90 | 0.68915 |
3372e6e7fad74fc3686c1d9ca3f41b668367ae02 | 1,562 | py | Python | cCdbWrapper_fsHTMLEncode.py | SkyLined/cBugId | fde6d3eff9d73c71fe6ac81c975c1a3e43afad3c | [
"CC-BY-4.0"
] | 22 | 2016-08-11T14:50:55.000Z | 2021-06-06T09:39:26.000Z | cCdbWrapper_fsHTMLEncode.py | SkyLined/cBugId | fde6d3eff9d73c71fe6ac81c975c1a3e43afad3c | [
"CC-BY-4.0"
] | 19 | 2016-09-07T05:54:40.000Z | 2020-07-02T07:46:38.000Z | cCdbWrapper_fsHTMLEncode.py | SkyLined/cBugId | fde6d3eff9d73c71fe6ac81c975c1a3e43afad3c | [
"CC-BY-4.0"
] | 11 | 2016-09-03T22:42:50.000Z | 2018-10-01T18:28:59.000Z | import re;
from .fsHTMLCP437 import fsHTMLCP437;
def fsHTMLEncodeLine(sString, uTabStop, bCP437):
asResult = [];
uLineIndex = 0;
for sChar in sString:
if uTabStop is not None and sChar == "\t":
while 1:
asResult.append(" ");
uLineIndex += 1;
if uLineIndex % uTabStop == 0: break;
else:
asResult.append(
fsHTMLCP437(sChar) if bCP437 else
sChar if (0x20 <= ord(sChar) <= 0x7E) else
"&#%d;" % ord(sChar)
);
uLineIndex += 1;
return "".join(asResult);
def cCdbWrapper_fsHTMLEncode(oCdbWrapper, sxLine, uTabStop = None):
if isinstance(sxLine, str):
sLine = sxLine;
bCP437 = False;
else:
sLine = str(sxLine, 'latin1');
bCP437 = True;
# Convert to HTML and add links to the first reference to source code. Adding links to all references would be rather
# complex and since I've not encountered situations where this is needed, so I've kept it simple.
for (srSourceFilePath, sURLTemplate) in oCdbWrapper.dsURLTemplate_by_srSourceFilePath.items():
oMatch = re.search(srSourceFilePath, sLine);
if oMatch:
sBefore = sLine[:oMatch.start()];
sPath = oMatch.group(0);
sURL = (sURLTemplate % oMatch.groupdict()).replace("\\", "/");
sAfter = sLine[oMatch.end():];
return '%s<a target="_blank" href="%s">%s</a>%s' % (fsHTMLEncodeLine(sBefore, uTabStop, bCP437), sURL, \
fsHTMLEncodeLine(sPath, uTabStop, bCP437), fsHTMLEncodeLine(sAfter, uTabStop, bCP437));
return fsHTMLEncodeLine(sLine, uTabStop, bCP437);
| 38.097561 | 119 | 0.65493 |
36f5ad5123713dc5dceff7185d0e5f11052857ce | 332 | py | Python | code_all/day18/demo03.py | testcg/python | 4db4bd5d0e44af807d2df80cf8c8980b40cc03c4 | [
"MIT"
] | null | null | null | code_all/day18/demo03.py | testcg/python | 4db4bd5d0e44af807d2df80cf8c8980b40cc03c4 | [
"MIT"
] | null | null | null | code_all/day18/demo03.py | testcg/python | 4db4bd5d0e44af807d2df80cf8c8980b40cc03c4 | [
"MIT"
] | null | null | null | """
函数式编程
"""
from common.iterable_tools import IterableHelper
list01 = [342, 4, 54, 56, 6776]
# 定义函数,在列表中查找所有大于100的数
def condition01(number):
return number > 100
# 定义函数,在列表中查找所有偶数
def condition02(number):
return number % 2 == 0
for item in IterableHelper.find_all(list01,condition01):
print(item)
| 10.375 | 56 | 0.680723 |
12c38aea8495d6c184035fdf21ecafbd4f88ff84 | 1,160 | py | Python | setup.py | NielsTilch/torchxrayvision | 5a5a51feaf3d24e4b2c6a056528ea3e70db82758 | [
"Apache-2.0"
] | 1 | 2022-03-09T15:43:35.000Z | 2022-03-09T15:43:35.000Z | setup.py | NielsTilch/torchxrayvision | 5a5a51feaf3d24e4b2c6a056528ea3e70db82758 | [
"Apache-2.0"
] | null | null | null | setup.py | NielsTilch/torchxrayvision | 5a5a51feaf3d24e4b2c6a056528ea3e70db82758 | [
"Apache-2.0"
] | null | null | null | import setuptools
from setuptools import setup, find_packages
from torchxrayvision import _version
with open("README.md", "r") as fh:
long_description = fh.read()
REQUIREMENTS = [i.strip() for i in open("requirements.txt").readlines()]
setuptools.setup(
name="torchxrayvision",
version=_version.__version__,
author="Joseph Paul Cohen",
author_email="joseph@josephpcohen.com",
description="TorchXRayVision: A library of chest X-ray datasets and models",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mlmed/torchxrayvision",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Medical Science Apps."
],
python_requires='>=3.6',
install_requires=REQUIREMENTS,
packages=find_packages(),
package_dir={'torchxrayvision': 'torchxrayvision'},
package_data={'torchxrayvision': ['data/*.zip','data/*.gz','data/*.tgz','baseline_models/*/*.json','baseline_models/*/*/*.json']},
)
| 37.419355 | 134 | 0.696552 |
fed88892a6b56b5b0d646c9d958dc5ce5b3c7136 | 977 | py | Python | XGBClassifier.py | AyberkCakar/ML-method-software-metric-estimation-algorithms | 49df42953ae1468ff69847ba4c47b3f96983af3e | [
"MIT"
] | 1 | 2021-12-28T20:10:18.000Z | 2021-12-28T20:10:18.000Z | XGBClassifier.py | AyberkCakar/ML-method-software-metric-estimation-algorithms | 49df42953ae1468ff69847ba4c47b3f96983af3e | [
"MIT"
] | null | null | null | XGBClassifier.py | AyberkCakar/ML-method-software-metric-estimation-algorithms | 49df42953ae1468ff69847ba4c47b3f96983af3e | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
from sklearn import metrics
data = pd.read_csv("BugHunterDataset/subtract/all/method.csv")
def mapNumberOfBugs(row):
return 1 if row['Number of Bugs'] > 0 else 0
data['Number of Bugs'] = data.apply(mapNumberOfBugs, axis=1)
data = data.drop(columns=['Project','Hash', 'LongName', 'Vulnerability Rules', 'Finalizer Rules', 'Migration15 Rules', 'Migration14 Rules', 'Migration13 Rules', 'MigratingToJUnit4 Rules', 'JavaBean Rules', 'Coupling Rules', 'WarningBlocker', 'Code Size Rules', 'WarningInfo', 'Android Rules', 'Clone Implementation Rules', 'Comment Rules'])
X=data.iloc[:,0:58].values
Y=data.iloc[:,58:59].values
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.4, random_state = 10)
xgbModel = XGBClassifier()
xgbModel.fit(X_train,Y_train)
Y_pred=xgbModel.predict(X_test)
print("Accuracy:",metrics.accuracy_score(Y_test, Y_pred)) | 39.08 | 340 | 0.758444 |
305630d91141ff448d5894b6163224bc1898de43 | 981 | py | Python | allennlp/allennlp/modules/masked_layer_norm.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | allennlp/allennlp/modules/masked_layer_norm.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | allennlp/allennlp/modules/masked_layer_norm.py | rahular/joint-coref-srl | cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be | [
"MIT"
] | null | null | null | import torch
class MaskedLayerNorm(torch.nn.Module):
"""
See LayerNorm for details.
Note, however, that unlike LayerNorm this norm includes a batch component.
"""
def __init__(self, size: int, gamma0: float = 0.1, eps: float = 1e-6) -> None:
super().__init__()
self.gamma = torch.nn.Parameter(torch.ones(1, 1, size) * gamma0)
self.beta = torch.nn.Parameter(torch.zeros(1, 1, size))
self.size = size
self.eps = eps
def forward(self, tensor: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
broadcast_mask = mask.unsqueeze(-1).float()
num_elements = broadcast_mask.sum() * self.size
mean = (tensor * broadcast_mask).sum() / num_elements
masked_centered = (tensor - mean) * broadcast_mask
std = torch.sqrt(
(masked_centered * masked_centered).sum() / num_elements + self.eps
)
return self.gamma * (tensor - mean) / (std + self.eps) + self.beta
| 35.035714 | 82 | 0.621814 |
3beafe70659f60fc7cc639e016725417785c7312 | 18,574 | py | Python | cms/menu.py | isotoma/django-cms | 90203d9c5fb8acbb2f11f6a23193e75c7705b43d | [
"BSD-3-Clause"
] | null | null | null | cms/menu.py | isotoma/django-cms | 90203d9c5fb8acbb2f11f6a23193e75c7705b43d | [
"BSD-3-Clause"
] | null | null | null | cms/menu.py | isotoma/django-cms | 90203d9c5fb8acbb2f11f6a23193e75c7705b43d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from collections import defaultdict
from django.contrib.sites.models import Site
from django.utils.translation import get_language
from cms.apphook_pool import apphook_pool
from cms.models.permissionmodels import ACCESS_DESCENDANTS
from cms.models.permissionmodels import ACCESS_PAGE_AND_DESCENDANTS
from cms.models.permissionmodels import ACCESS_CHILDREN
from cms.models.permissionmodels import ACCESS_PAGE_AND_CHILDREN
from cms.models.permissionmodels import ACCESS_PAGE
from cms.models.permissionmodels import PagePermission, GlobalPagePermission
from cms.utils import get_language_from_request
from cms.utils.compat.dj import user_related_name
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import get_fallback_languages, hide_untranslated
from cms.utils.page_resolver import get_page_queryset
from cms.utils.moderator import get_title_queryset, use_draft
from cms.utils.plugins import current_site
from menus.base import Menu, NavigationNode, Modifier
from menus.menu_pool import menu_pool
def get_visible_pages(request, pages, site=None):
"""
This code is basically a many-pages-at-once version of
Page.has_view_permission.
pages contains all published pages
check if there is ANY restriction
that needs a permission page visibility calculation
"""
public_for = get_cms_setting('PUBLIC_FOR')
is_setting_public_all = public_for == 'all'
is_setting_public_staff = public_for == 'staff'
is_auth_user = request.user.is_authenticated()
visible_page_ids = []
restricted_pages = defaultdict(list)
page_permissions = PagePermission.objects.filter(
can_view=True
).select_related(
'page'
).prefetch_related('group__' + user_related_name)
for perm in page_permissions:
# collect the pages that are affected by permissions
if site and perm.page.site_id != site.pk:
continue
if perm is not None and perm not in restricted_pages[perm.page.pk]:
# affective restricted pages gathering
# using mptt functions
# add the page with the perm itself
if perm.grant_on in [
ACCESS_PAGE, ACCESS_PAGE_AND_CHILDREN,
ACCESS_PAGE_AND_DESCENDANTS]:
restricted_pages[perm.page.pk].append(perm)
restricted_pages[perm.page.publisher_public_id].append(perm)
# add children
if perm.grant_on in [ACCESS_CHILDREN, ACCESS_PAGE_AND_CHILDREN]:
child_ids = perm.page.get_children().values_list(
'id', 'publisher_public_id')
for id, public_id in child_ids:
restricted_pages[id].append(perm)
restricted_pages[public_id].append(perm)
# add descendants
elif perm.grant_on in [
ACCESS_DESCENDANTS, ACCESS_PAGE_AND_DESCENDANTS]:
child_ids = perm.page.get_descendants().values_list(
'id', 'publisher_public_id')
for id, public_id in child_ids:
restricted_pages[id].append(perm)
restricted_pages[public_id].append(perm)
# anonymous
# no restriction applied at all
if (not is_auth_user and is_setting_public_all and
not restricted_pages):
return [page.pk for page in pages]
if site is None:
site = current_site(request)
# authenticated user and global permission
if is_auth_user:
global_view_perms = GlobalPagePermission.objects.user_has_view_permission(request.user, site.pk).exists()
# no page perms edge case - all visible
if ((is_setting_public_all or (
is_setting_public_staff and request.user.is_staff)) and
not restricted_pages and
not global_view_perms):
return [page.pk for page in pages]
# no page perms edge case - none visible
elif (is_setting_public_staff and
not request.user.is_staff and
not restricted_pages and
not global_view_perms):
return []
def has_global_perm():
if has_global_perm.cache < 0:
if request.user.has_perm('cms.view_page'):
has_global_perm.cache = 1
else:
has_global_perm.cache = 0
return bool(has_global_perm.cache)
has_global_perm.cache = -1
def has_permission_membership(page):
"""
PagePermission user group membership tests
"""
user_pk = request.user.pk
page_pk = page.pk
for perm in restricted_pages[page_pk]:
if perm.user_id == user_pk:
return True
if not perm.group_id:
continue
user_set = getattr(perm.group, user_related_name)
# Optimization equivalent to
# if user_pk in user_set.values_list('pk', flat=True)
if any(user_pk == user.pk for user in user_set.all()):
return True
return False
for page in pages:
to_add = False
# default to false, showing a restricted page is bad
# explicitly check all the conditions
# of settings and permissions
is_restricted = page.pk in restricted_pages
# restricted_pages contains as key any page.pk that is
# affected by a permission grant_on
if is_auth_user:
# a global permission was given to the request's user
if global_view_perms:
to_add = True
# setting based handling of unrestricted pages
elif not is_restricted and (
is_setting_public_all or (
is_setting_public_staff and request.user.is_staff)
):
# authenticated user, no restriction and public for all
# or
# authenticated staff user, no restriction and public for staff
to_add = True
# check group and user memberships to restricted pages
elif is_restricted and has_permission_membership(page):
to_add = True
elif has_global_perm():
to_add = True
# anonymous user, no restriction
elif not is_restricted and is_setting_public_all:
to_add = True
# store it
if to_add:
visible_page_ids.append(page.pk)
return visible_page_ids
def page_to_node(page, home, cut):
"""
Transform a CMS page into a navigation node.
:param page: the page you wish to transform
:param home: a reference to the "home" page (the page with tree_id=1)
:param cut: Should we cut page from its parent pages? This means the node
will not have a parent anymore.
"""
# Theses are simple to port over, since they are not calculated.
# Other attributes will be added conditionnally later.
attr = {
'page': page,
'soft_root': page.soft_root,
'auth_required': page.login_required,
'reverse_id': page.reverse_id,
}
parent_id = page.parent_id
# Should we cut the Node from its parents?
if home and page.parent_id == home.pk and cut:
parent_id = None
# possible fix for a possible problem
# if parent_id and not page.parent.get_calculated_status():
# parent_id = None # ????
if page.limit_visibility_in_menu is None:
attr['visible_for_authenticated'] = True
attr['visible_for_anonymous'] = True
else:
attr['visible_for_authenticated'] = page.limit_visibility_in_menu == 1
attr['visible_for_anonymous'] = page.limit_visibility_in_menu == 2
attr['is_home'] = page.is_home
# Extenders can be either navigation extenders or from apphooks.
extenders = []
if page.navigation_extenders:
extenders.append(page.navigation_extenders)
# Is this page an apphook? If so, we need to handle the apphooks's nodes
lang = get_language()
# Only run this if we have a translation in the requested language for this
# object. The title cache should have been prepopulated in CMSMenu.get_nodes
# but otherwise, just request the title normally
if not hasattr(page, 'title_cache') or lang in page.title_cache:
app_name = page.get_application_urls(fallback=False)
if app_name: # it means it is an apphook
app = apphook_pool.get_apphook(app_name)
extenders += app.menus
exts = []
for ext in extenders:
if hasattr(ext, "get_instances"):
# CMSAttachMenus are treated a bit differently to allow them to be
# able to be attached to multiple points in the navigation.
exts.append("{0}:{1}".format(ext.__name__, page.pk))
elif hasattr(ext, '__name__'):
exts.append(ext.__name__)
else:
exts.append(ext)
if exts:
attr['navigation_extenders'] = exts
# Do we have a redirectURL?
attr['redirect_url'] = page.get_redirect() # save redirect URL if any
# Now finally, build the NavigationNode object and return it.
ret_node = NavigationNode(
page.get_menu_title(),
page.get_absolute_url(),
page.pk,
parent_id,
attr=attr,
visible=page.in_navigation,
)
return ret_node
class CMSMenu(Menu):
def get_nodes(self, request):
page_queryset = get_page_queryset(request)
site = Site.objects.get_current()
lang = get_language_from_request(request)
filters = {
'site': site,
}
if hide_untranslated(lang, site.pk):
filters['title_set__language'] = lang
if not use_draft(request):
page_queryset = page_queryset.published()
pages = page_queryset.filter(**filters).order_by("tree_id", "lft")
ids = {}
nodes = []
first = True
home_cut = False
home_children = []
home = None
actual_pages = []
# cache view perms
visible_pages = get_visible_pages(request, pages, site)
for page in pages:
# Pages are ordered by tree_id, therefore the first page is the root
# of the page tree (a.k.a "home")
if page.pk not in visible_pages:
# Don't include pages the user doesn't have access to
continue
if not home:
home = page
if first and page.pk != home.pk:
home_cut = True
if (page.parent_id == home.pk or page.parent_id in home_children) and home_cut:
home_children.append(page.pk)
if (page.pk == home.pk and home.in_navigation) or page.pk != home.pk:
first = False
ids[page.id] = page
actual_pages.append(page)
page.title_cache = {}
langs = [lang]
if not hide_untranslated(lang):
langs.extend(get_fallback_languages(lang))
titles = list(get_title_queryset(request).filter(page__in=ids, language__in=langs))
for title in titles: # add the title and slugs and some meta data
page = ids[title.page_id]
page.title_cache[title.language] = title
for page in actual_pages:
if page.title_cache:
nodes.append(page_to_node(page, home, home_cut))
return nodes
menu_pool.register_menu(CMSMenu)
class NavExtender(Modifier):
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
if post_cut:
return nodes
exts = []
# rearrange the parent relations
home = None
for node in nodes:
if node.attr.get("is_home", False):
home = node
extenders = node.attr.get("navigation_extenders", None)
if extenders:
for ext in extenders:
if ext not in exts:
exts.append(ext)
for extnode in nodes:
if extnode.namespace == ext and not extnode.parent_id:
# if home has nav extenders but home is not visible
if (node.attr.get("is_home", False)
and not node.visible):
extnode.parent_id = None
extnode.parent_namespace = None
extnode.parent = None
else:
extnode.parent_id = node.id
extnode.parent_namespace = node.namespace
extnode.parent = node
node.children.append(extnode)
removed = []
# find all not assigned nodes
for menu in menu_pool.menus.items():
if (hasattr(menu[1], 'cms_enabled')
and menu[1].cms_enabled and not menu[0] in exts):
for node in nodes:
if node.namespace == menu[0]:
removed.append(node)
if breadcrumb:
# if breadcrumb and home not in navigation add node
if breadcrumb and home and not home.visible:
home.visible = True
if request.path_info == home.get_absolute_url():
home.selected = True
else:
home.selected = False
# remove all nodes that are nav_extenders and not assigned
for node in removed:
nodes.remove(node)
return nodes
menu_pool.register_modifier(NavExtender)
class SoftRootCutter(Modifier):
"""
Ask evildmp/superdmp if you don't understand softroots!
Softroot description from the docs:
A soft root is a page that acts as the root for a menu navigation tree.
Typically, this will be a page that is the root of a significant new
section on your site.
When the soft root feature is enabled, the navigation menu for any page
will start at the nearest soft root, rather than at the real root of
the site’s page hierarchy.
This feature is useful when your site has deep page hierarchies (and
therefore multiple levels in its navigation trees). In such a case, you
usually don’t want to present site visitors with deep menus of nested
items.
For example, you’re on the page -Introduction to Bleeding-?, so the menu
might look like this:
School of Medicine
Medical Education
Departments
Department of Lorem Ipsum
Department of Donec Imperdiet
Department of Cras Eros
Department of Mediaeval Surgery
Theory
Cures
Bleeding
Introduction to Bleeding <this is the current page>
Bleeding - the scientific evidence
Cleaning up the mess
Cupping
Leaches
Maggots
Techniques
Instruments
Department of Curabitur a Purus
Department of Sed Accumsan
Department of Etiam
Research
Administration
Contact us
Impressum
which is frankly overwhelming.
By making -Department of Mediaeval Surgery-? a soft root, the menu
becomes much more manageable:
Department of Mediaeval Surgery
Theory
Cures
Bleeding
Introduction to Bleeding <current page>
Bleeding - the scientific evidence
Cleaning up the mess
Cupping
Leaches
Maggots
Techniques
Instruments
"""
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
# only apply this modifier if we're pre-cut (since what we do is cut)
if post_cut:
return nodes
selected = None
root_nodes = []
# find the selected node as well as all the root nodes
for node in nodes:
if node.selected:
selected = node
if not node.parent:
root_nodes.append(node)
# if we found a selected ...
if selected:
# and the selected is a softroot
if selected.attr.get("soft_root", False):
# get it's descendants
nodes = selected.get_descendants()
# remove the link to parent
selected.parent = None
# make the selected page the root in the menu
nodes = [selected] + nodes
else:
# if it's not a soft root, walk ancestors (upwards!)
nodes = self.find_ancestors_and_remove_children(selected, nodes)
return nodes
def find_and_remove_children(self, node, nodes):
for child in node.children:
if child.attr.get("soft_root", False):
self.remove_children(child, nodes)
return nodes
def remove_children(self, node, nodes):
for child in node.children:
nodes.remove(child)
self.remove_children(child, nodes)
node.children = []
def find_ancestors_and_remove_children(self, node, nodes):
"""
Check ancestors of node for soft roots
"""
if node.parent:
if node.parent.attr.get("soft_root", False):
nodes = node.parent.get_descendants()
node.parent.parent = None
nodes = [node.parent] + nodes
else:
nodes = self.find_ancestors_and_remove_children(node.parent, nodes)
else:
for newnode in nodes:
if newnode != node and not newnode.parent:
self.find_and_remove_children(newnode, nodes)
for child in node.children:
if child != node:
self.find_and_remove_children(child, nodes)
return nodes
menu_pool.register_modifier(SoftRootCutter)
| 38.455487 | 113 | 0.591741 |
86919681591f5da1419dbe3b6359735be86d3f9e | 12,631 | py | Python | gsextract/gse_parser.py | newspaperman/gsextract-b8 | 84935984648f82dd4d78aaa7abae8892788fe4f7 | [
"Unlicense"
] | 1 | 2020-11-21T08:20:02.000Z | 2020-11-21T08:20:02.000Z | gsextract/gse_parser.py | newspaperman/gsextract-b8 | 84935984648f82dd4d78aaa7abae8892788fe4f7 | [
"Unlicense"
] | null | null | null | gsextract/gse_parser.py | newspaperman/gsextract-b8 | 84935984648f82dd4d78aaa7abae8892788fe4f7 | [
"Unlicense"
] | 1 | 2020-11-22T09:17:10.000Z | 2020-11-22T09:17:10.000Z | from .parsers.pure_bb import PureBb
from .parsers.pure_gse import PureGse
from .parsers.ipv4_packet import Ipv4Packet
from kaitaistruct import KaitaiStream, BytesIO
from .pcaplib import Writer
from datetime import datetime
from scapy.layers.all import IP, TCP, Ether, ICMP
from scapy.all import send, sendp, sendpfast
import time
import socket
defrag_dict = {}
counters = {
'gse_start_packets': 0,
'gse_end_packets': 0,
'gse_mid_packets': 0,
'gse_full_packets': 0,
'gse_padding_packets': 0,
'defragmented_gse_packets': 0,
'salvage_gse_packets': 0,
'truncated_gse_packets': 0,
'broken_bbframes': 0,
'ip_recovered': 0,
'non_ip_or_corrupt_gse': 0,
}
sync_dict = {}
FIN = 0x01
SYN = 0x02
RST = 0x04
PSH = 0x08
ACK = 0x10
URG = 0x20
ECE = 0x40
CWR = 0x80
def gse_parse(file, outfile, bbsync=int(0xB8), stream=False, tcp_hijack=False, tcp_hijack_ips=None, reliable=True):
with open(outfile, 'wb') as pcap_file:
io = KaitaiStream(open(file, 'rb'))
pcap_writer = Writer()
pcap_writer.create_header(pcap_file)
bbframe_count = 1
pkt_count = 0
eof_count = 0
while True:
try:
# we record the last io position for kaitai so we can recover from EOF errors in streaming mode
last_pos = io.pos()
# prints the first BBframe we find at the current IO position
# this throws EOF if there's no bytes left in the file
current_bbframe = PureBb(io, bbsync=bbsync).bbframe
if eof_count > 0:
print("new frames found, continuing...")
eof_count = 0
except EOFError:
if not stream:
# if we're reading from a static file EOFError is sufficient reason to stop
break
elif eof_count == 0:
pass
# otherwise we will wait progressively longer whenever there isn't data in the streamed file
elif eof_count % 10000 == 0:
time.sleep(1)
elif eof_count > 1000000:
time.sleep(10)
elif eof_count > 1000600:
# after an hour of no fresh bytes (plus a little bit more), gsextract will exit and clean up buffers
# this normally means something has broken in the satellite hardware side
print("No new data received for at least 1 hour. Exiting gsextract.")
eof_count += 1
io.seek(last_pos)
continue
except:
# we want to maximize recovery in the case of stream parsing errors so we will just keep trying
continue
bbframe_count += 1
# record stats on corrupt BBframes and then move to the next frame
if hasattr(current_bbframe, 'corrupt_data'):
counters['broken_bbframes'] += 1
tmp=current_bbframe.bbheader.matype_1.ts_gs_field
tmp<<=2
tmp=current_bbframe.bbheader.matype_1.sis_mis_field
tmp<<=1
tmp=current_bbframe.bbheader.matype_1.ccm_acm_field
tmp<<=1
tmp=current_bbframe.bbheader.matype_1.issyi
tmp<<=1
tmp=current_bbframe.bbheader.matype_1.npd
tmp<<=1
tmp=current_bbframe.bbheader.matype_1.ro
tmp<<=2
print("BBFrame", bbframe_count, " contains corrupt data, (BBSYNC, MA1, MA2:", current_bbframe.bbheader.bbsync, " ", tmp, " ", current_bbframe.bbheader.matype_2, ") attempting to recover")
else:
# for valid BBFrames
# next extract gse packets from the bbframe and try to make them into IP payloads
gse_packets = get_gse_from_bbdata(current_bbframe.data_field)
raw_packets = parse_gse_packet_array(gse_packets, bbframe_count, reliable=reliable)
# if we get any IP packets, write them to a pcap file
if len(raw_packets) > 0:
pcap_writer.write(raw_packets, pcap_file)
pkt_count += len(raw_packets)
# print some progress stats
if pkt_count % 10000 == 0:
print(pkt_count, "packets parsed")
print(counters)
# Clean up any lingering fragments when GSExtract closes
# these would be partially filled buffers from end of recording
raw_packets = parse_gse_packet_array([],0, cleanup=True,reliable=reliable)
if len(raw_packets) > 0:
pcap_writer.write(raw_packets, pcap_file)
# Print some basic stats before finishing
print(counters)
def get_gse_from_bbdata(bbdata):
# Loop through the Bytes a bbframe payload and attempt to parse them as GSEpackets
io = KaitaiStream(BytesIO(bbdata))
gse_packets = []
while not io.is_eof():
try:
current_gse = PureGse(io).gse_packet
gse_packets.append(current_gse)
except EOFError:
counters['truncated_gse_packets'] += 1
except ValueError:
counters['truncated_gse_packets'] += 1
return gse_packets
def parse_gse_packet_array(gse_packets, frame_number, cleanup=False, reliable=True):
scapy_packets = []
# Loop through the GSE packets and assemble fragments into their buffers
for gse in gse_packets:
s_packets = None
if gse.gse_header.end_indicator and not gse.gse_header.start_indicator:
counters['gse_end_packets'] += 1
elif not gse.gse_header.end_indicator and not gse.gse_header.start_indicator:
counters['gse_mid_packets'] += 1
if gse.gse_header.is_padding_packet:
counters['gse_padding_packets'] += 1
pass
elif gse.gse_header.start_indicator and gse.gse_header.end_indicator:
# complete gse packet
counters['gse_full_packets'] += 1
s_packets = extract_ip_from_gse_data(gse.gse_payload.data, high_reliability=reliable)
else:
frag_id = str(gse.gse_header.frag_id)
if gse.gse_header.start_indicator and not gse.gse_header.end_indicator:
counters['gse_start_packets'] += 1
# start of gse fragment
if frag_id in defrag_dict:
# if this interrupts an already started fragment
# parse the existing (partial) fragment to the best of our ability before overwriting
s_packets = extract_ip_from_gse_data(defrag_dict[frag_id][1], high_reliability=reliable)
counters['salvage_gse_packets'] += 1
# we add a tuple with the index of the new fragment to the defragment dictionary
defrag_dict[frag_id] = (frame_number, gse.gse_payload.data)
elif frag_id in defrag_dict:
# if it's a middle or end packet and we've seen the start packet, lets try to add it to the existing data
if frame_number - defrag_dict[frag_id][0] > 256:
# if the frame number where we caught this is more than 256 frames from the start packet it cannot be a valid fragment
# we'll make a best effort to recover the existing partial fragment with dummy data
s_packets = extract_ip_from_gse_data(defrag_dict[frag_id][1], high_reliability=reliable)
counters['salvage_gse_packets'] += 1
# then we'll delete the expired data from the fragment dictionary
defrag_dict.pop(frag_id, None)
else:
# we can append the current chunk of the frame to our already recorded data
defrag_dict[frag_id] = (defrag_dict[frag_id][0], defrag_dict[frag_id][1] + gse.gse_payload.data)
# if this is the end of a fragment, we can go ahead and attempt to parse out packets and then clear the dictionary
if gse.gse_header.end_indicator:
extracted_ip_packets = extract_ip_from_gse_data(defrag_dict[frag_id][1], high_reliability=reliable)
if extracted_ip_packets is not None:
scapy_packets.append(extract_ip_from_gse_data(defrag_dict[frag_id][1], high_reliability=reliable))
counters['defragmented_gse_packets'] += 1
defrag_dict.pop(frag_id, None)
if s_packets is not None:
# build an array of packets we've extracted so far
scapy_packets.append(s_packets)
if cleanup:
# in cleanup mode, we parse through anything left over in the buffer
for _, entry in defrag_dict.values():
if len(entry) >= 2:
extracted_ip_packets = extract_ip_from_gse_data(entry[1], high_reliability=reliable)
if extracted_ip_packets is not None:
scapy_packets.append(extracted_ip_packets)
counters['salvage_gse_packets'] += 1
return scapy_packets
def extract_ip_from_gse_data(raw_data, high_reliability=True, tcp_hijack=False, tcp_hijack_ips=[None, None]):
ip_packet = None
simple_packet = None
try:
ip_packet = Ipv4Packet.from_bytes(raw_data)
except EOFError:
# if there's just not enough bytes, we can try adding a small number of padding bytes to see if it makes for a mostly recovered packet
# we'll try to recover up to 3x the length of the original packet
try:
raw_data = raw_data + (3*len(raw_data))*b"\x00"
ip_packet = Ipv4Packet.from_bytes(raw_data)
except:
counters['non_ip_or_corrupt_gse'] += 1
except ValueError:
# we can try and force a typical first two bytes of an IPV4 header to bully GSExtract into making a packet
# this runs the risk of invalid IP headers but catches some packets when there are undocument proprietary GSE extensions
if not high_reliability:
try:
raw_data = b"\x45" + raw_data + (3 * len(raw_data)) * b"\x00"
ip_packet = Ipv4Packet.from_bytes(raw_data)
except:
try:
raw_data = b"\x45\x00" + raw_data[1:] + (3 * len(raw_data)) * b"\x00"
ip_packet = Ipv4Packet.from_bytes(raw_data)
except:
pass
counters['non_ip_or_corrupt_gse'] += 1
except:
pass
if ip_packet is not None:
seconds_time = time.time()
dt = datetime.now()
simple_packet = (int(seconds_time), dt.microsecond, len(raw_data), len(raw_data), raw_data)
# This is a very simple example of TCP hijacking
# You would need to pass both a target and destination IP address through to the parent function
# This is not implemented in the command-line tool but the modifications should be straightforward
if tcp_hijack and (ip_packet.src_ip_addr == tcp_hijack_ips[0] or ip_packet.dst_ip_addr == tcp_hijack_ips[0]) and (ip_packet.src_ip_addr == tcp_hijack_ips[1] or ip_packet.dst_ip_addr == tcp_hijack_ips[1]):
html = "<b>Hijacked TCP Session</b>"
p = IP(raw_data)
if "TCP" in p:
F = p[TCP].flags
forgery_ip = IP(src=p[IP].dst, dst=p[IP].src)
response = "HTTP/1.1 200 OK\n"
response += "Server: MyServer\n"
response += "Content-Type: text/html\n"
response += "Content-Length: " + str(len(html)) + "\n"
response += "Connection: close"
response += "\n\n"
response += html
if F & SYN and not F & ACK:
forgery_tcp = TCP(sport=p[TCP].dport, dport=p[TCP].sport, seq=123, ack=p[TCP].seq + 1,
flags="AS")
forgery = Ether()/forgery_ip/forgery_tcp/response
sendp(forgery)
elif F & ACK and F & PSH:
forgery_tcp = TCP(sport=p[TCP].dport, dport=p[TCP].sport, seq=p[TCP].ack+1, ack=p[TCP].seq,
flags="PA", options=p[TCP].options)
forgery = Ether()/forgery_ip/forgery_tcp/response
sendp(forgery)
forgery.show()
counters['ip_recovered'] += 1
return simple_packet
| 48.394636 | 212 | 0.600032 |
80279acdd7984ac6d5efd33c73c55dc2e063652a | 5,532 | py | Python | src/test/data/pa2/student_contributed/good.py | Leo-Enrique-Wu/chocopy_compiler_semantic_analysis | e89f6434dd7d274d4838457316143f312226495f | [
"BSD-2-Clause"
] | null | null | null | src/test/data/pa2/student_contributed/good.py | Leo-Enrique-Wu/chocopy_compiler_semantic_analysis | e89f6434dd7d274d4838457316143f312226495f | [
"BSD-2-Clause"
] | null | null | null | src/test/data/pa2/student_contributed/good.py | Leo-Enrique-Wu/chocopy_compiler_semantic_analysis | e89f6434dd7d274d4838457316143f312226495f | [
"BSD-2-Clause"
] | null | null | null | boolVar : bool = True
bool1Var : bool = True
bool2Var : bool = True
objVar : object = None
int1Var : int = 0
int2Var : int = 0
str1Var : str = "apple"
str2Var : str = "banana"
intList : [int] = None
int1List : [int] = None
# Type checking rule: Literals > bool-false
boolFalseLiteral : bool = False
# Type checking rule: Literals > bool-true
boolTrueLiteral : bool = True
# Type checking rule: Literals > int
intLiteral : int = 1
# Type checking rule: Literals > str
strLiteral : str = "Hello"
# Type checking rule: Literals > None
noneIntList : [int] = None
# Type checking rule: Function Definitions > [func-def] > with return
def func1(int1Var : str) -> str:
# type checking every statements inside the body,
# and infer its type
# In the scope of function definition, shadow variable `int1Var`
int1Var = int1Var[1]
return int1Var
# Type checking rule: Function Definitions > [func-def] > without return
def func2(int1Var : str, int2Var: str):
# type checking every statements inside the body,
# and infer its type
# In the scope of function definition, shadow variable `int1Var`
int1Var = int1Var[1]
# Type checking rule: Class Definitions
class class1(object):
# [ATTR-INIT]
int1Var : object = None
# Type checking rule: Function Definitions > [method-def] > with return
def class1Func1(self : "class1", int1Var : str, int2Var: str) -> str:
# type checking every statements inside the body,
# and infer its type
# In the scope of function definition, shadow variable `int1Var`
int1Var = int1Var[1]
return int1Var
# Type checking rule: Function Definitions > [method-def] > without return
def class1Func2(self : "class1", int1Var : str):
# type checking every statements inside the body,
# and infer its type
# In the scope of function definition, shadow variable `int1Var`
int1Var = int1Var[1]
# object of class1
class1obj1:class1 = None
class1obj2:class1 = None
# V==== global statements =====
# Type checking rule: Literals > int > range from (2^31 - 1) to -2^31
intLiteral = 2147483647
intLiteral = -2147483648
# Type checking rule: Arithmetic Operators > [negate]
intLiteral = - intLiteral
# Type checking rule: Arithmetic Operators > [arith] > +
intLiteral = intLiteral + 9
# Type checking rule: Arithmetic Operators > [arith] > -
intLiteral = intLiteral - 9
# Type checking rule: Arithmetic Operators > [arith] > *
intLiteral = intLiteral * 9
# Type checking rule: Arithmetic Operators > [arith] > //
intLiteral = intLiteral // 9
# Type checking rule: Arithmetic Operators > [arith] > %
intLiteral = intLiteral % 2
# Type checking rule: Logical Operators > [bool-compare] > ==
boolVar = (1 == 1)
# Type checking rule: Logical Operators > [bool-compare] > !=
boolVar = (1 != 1)
# Type checking rule: Logical Operators > [bool-compare] > [and]
boolVar = (True and True)
# Type checking rule: Logical Operators > [bool-compare] > [or]
boolVar = (True or False)
# Type checking rule: Logical Operators > [bool-compare] > [not]
boolVar = (not True)
# Type checking rule: Conditional Expressions > [cond]
objVar = "Hello" if boolVar else 0
# Type checking rule: String Operations > [str-concat]
strLiteral = strLiteral + "World"
# Type checking rule: String Operations > [str-select]
strLiteral = strLiteral[1]
# Type checking rule: Conditional Statements
# type checking conditon statements and infer its type
if intLiteral > 10:
print(strLiteral)
elif intLiteral > 0:
# type checking every statements inside the body,
# and infer its type
strLiteral = strLiteral + "XXX"
print(strLiteral)
else:
strLiteral = strLiteral + "YYY"
# Type checking rule: The Global Typing Environment > default function: print
print(strLiteral)
# Type checking rule: While Statements
# type checking conditon statements and infer its type
while intLiteral != 9:
# type checking every statements inside the body,
# and infer its type
strLiteral = strLiteral + "YYY"
print(strLiteral)
# Type checking rule: For Statements > [for-str]
for objVar in strLiteral:
# type checking every statements inside the body,
# and infer its type
objVar = objVar
# Type checking rule: For Statements > [for-list]
for int1Var in intList:
# type checking every statements inside the body,
# and infer its type
intLiteral = intLiteral + int1Var
# Type checking rule: The Global Typing Environment > default function: len
intLiteral = len(intList)
# Type checking rule: The Global Typing Environment > default function: input
strLiteral = input()
# Type checking rule: Relational Operations
# operator: "=="
int1Var == int2Var
bool1Var== bool2Var
str1Var == str2Var
# operator: "!="
int1Var != int2Var
bool1Var!= bool2Var
str1Var != str2Var
# operator: "<, <=, >, >="
int1Var < int2Var
int1Var <= int2Var
int1Var > int2Var
int1Var >= int2Var
# {expr} is {expr}
None is None
class1obj1 is class1obj2
# Type checking: Object Construction
class1obj1 = class1()
# Type checking: List Displays
int1List = [1,2,3,4]
# special case
intList = []
# Type checking: List Operators
# [LIST-CONCAT]
intList = intList + int1List
# [LIST-SELECT]
int1Var = intList[1]
# [LIST-ASSIGN-STMT]
intList[1] = 0
# Type checking: Attribute Access, Assignment, and Initialization
# [ATTR-READ]
class1obj1.int1Var
# [ATTR-ASSIGN-STMT]
class1obj1.int1Var = 0
# Type checking: Multiple Assignments
int1Var = int2Var = 0
# <Empty> <= [T]
int1List = intList = []
# Type checking: Function Application
func2("a", "b")
class1obj1.class1Func1("b", "c")
| 25.260274 | 78 | 0.713124 |
89a4e4367aaec3d52d06228912d5739b94525d1a | 14,763 | py | Python | official/benchmark/bert_benchmark.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 5 | 2020-11-16T06:26:19.000Z | 2022-03-27T02:01:40.000Z | official/benchmark/bert_benchmark.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 5 | 2020-11-13T18:50:30.000Z | 2022-02-10T01:42:36.000Z | official/benchmark/bert_benchmark.py | zcdzcdzcd/models | a31b526a7617a152a138a865b5689bf5b59f655d | [
"Apache-2.0"
] | 1 | 2020-06-24T08:17:17.000Z | 2020-06-24T08:17:17.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes BERT benchmarks and accuracy tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import math
import os
import time
# pylint: disable=g-bad-import-order
from absl import flags
from absl.testing import flagsaver
import tensorflow as tf
# pylint: enable=g-bad-import-order
from official.benchmark import bert_benchmark_utils as benchmark_utils
from official.nlp import bert_modeling as modeling
from official.nlp.bert import run_classifier
from official.utils.misc import distribution_utils
from official.utils.testing import benchmark_wrappers
# pylint: disable=line-too-long
PRETRAINED_CHECKPOINT_PATH = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16/bert_model.ckpt'
CLASSIFIER_TRAIN_DATA_PATH = 'gs://tf-perfzero-data/bert/classification/mrpc_train.tf_record'
CLASSIFIER_EVAL_DATA_PATH = 'gs://tf-perfzero-data/bert/classification/mrpc_eval.tf_record'
CLASSIFIER_INPUT_META_DATA_PATH = 'gs://tf-perfzero-data/bert/classification/mrpc_meta_data'
MODEL_CONFIG_FILE_PATH = 'gs://cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16/bert_config.json'
# pylint: enable=line-too-long
TMP_DIR = os.getenv('TMPDIR')
FLAGS = flags.FLAGS
class BertClassifyBenchmarkBase(benchmark_utils.BertBenchmarkBase):
"""Base class to hold methods common to test classes in the module."""
def __init__(self, output_dir=None, tpu=None):
super(BertClassifyBenchmarkBase, self).__init__(output_dir)
self.num_epochs = None
self.num_steps_per_epoch = None
self.tpu = tpu
@flagsaver.flagsaver
def _run_bert_classifier(self, callbacks=None, use_ds=True):
"""Starts BERT classification task."""
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
epochs = self.num_epochs if self.num_epochs else FLAGS.num_train_epochs
if self.num_steps_per_epoch:
steps_per_epoch = self.num_steps_per_epoch
else:
train_data_size = input_meta_data['train_data_size']
steps_per_epoch = int(train_data_size / FLAGS.train_batch_size)
warmup_steps = int(epochs * steps_per_epoch * 0.1)
eval_steps = int(
math.ceil(input_meta_data['eval_data_size'] / FLAGS.eval_batch_size))
if self.tpu:
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy='tpu', tpu_address=self.tpu)
else:
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy='mirrored' if use_ds else 'off',
num_gpus=self.num_gpus)
steps_per_loop = 1
max_seq_length = input_meta_data['max_seq_length']
train_input_fn = run_classifier.get_dataset_fn(
FLAGS.train_data_path,
max_seq_length,
FLAGS.train_batch_size,
is_training=True)
eval_input_fn = run_classifier.get_dataset_fn(
FLAGS.eval_data_path,
max_seq_length,
FLAGS.eval_batch_size,
is_training=False)
run_classifier.run_bert_classifier(
strategy,
bert_config,
input_meta_data,
FLAGS.model_dir,
epochs,
steps_per_epoch,
steps_per_loop,
eval_steps,
warmup_steps,
FLAGS.learning_rate,
FLAGS.init_checkpoint,
train_input_fn,
eval_input_fn,
custom_callbacks=callbacks)
class BertClassifyBenchmarkReal(BertClassifyBenchmarkBase):
"""Short benchmark performance tests for BERT model.
Tests BERT classification performance in different GPU, TPU configurations.
The naming convention of below test cases follow
`benchmark_(number of gpus)_gpu_(dataset type)` for GPUs and
`benchmark_(topology)_tpu_(dataset type)` for TPUs.
"""
def __init__(self, output_dir=TMP_DIR, tpu=None, **kwargs):
super(BertClassifyBenchmarkReal, self).__init__(
output_dir=output_dir, tpu=tpu)
self.train_data_path = CLASSIFIER_TRAIN_DATA_PATH
self.eval_data_path = CLASSIFIER_EVAL_DATA_PATH
self.bert_config_file = MODEL_CONFIG_FILE_PATH
self.input_meta_data_path = CLASSIFIER_INPUT_META_DATA_PATH
# Since we only care about performance metrics, we limit
# the number of training steps and epochs to prevent unnecessarily
# long tests.
self.num_steps_per_epoch = 110
self.num_epochs = 1
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self,
training_summary_path,
min_accuracy=0,
max_accuracy=1,
use_ds=True):
"""Starts BERT performance benchmark test."""
start_time_sec = time.time()
self._run_bert_classifier(callbacks=[self.timer_callback], use_ds=use_ds)
wall_time_sec = time.time() - start_time_sec
with tf.io.gfile.GFile(training_summary_path, 'rb') as reader:
summary = json.loads(reader.read().decode('utf-8'))
# Since we do not load from any pretrained checkpoints, we ignore all
# accuracy metrics.
summary.pop('eval_metrics', None)
summary['start_time_sec'] = start_time_sec
super(BertClassifyBenchmarkReal, self)._report_benchmark(
stats=summary,
wall_time_sec=wall_time_sec,
min_accuracy=min_accuracy,
max_accuracy=max_accuracy)
def benchmark_1_gpu_mrpc(self):
"""Test BERT model performance with 1 GPU."""
self._setup()
self.num_gpus = 1
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_mrpc')
FLAGS.train_data_path = self.train_data_path
FLAGS.eval_data_path = self.eval_data_path
FLAGS.input_meta_data_path = self.input_meta_data_path
FLAGS.bert_config_file = self.bert_config_file
FLAGS.train_batch_size = 4
FLAGS.eval_batch_size = 4
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path)
def benchmark_1_gpu_mrpc_xla(self):
"""Test BERT model performance with 1 GPU."""
self._setup()
self.num_gpus = 1
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_mrpc_xla')
FLAGS.train_data_path = self.train_data_path
FLAGS.eval_data_path = self.eval_data_path
FLAGS.input_meta_data_path = self.input_meta_data_path
FLAGS.bert_config_file = self.bert_config_file
FLAGS.train_batch_size = 4
FLAGS.eval_batch_size = 4
FLAGS.enable_xla = True
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path)
def benchmark_1_gpu_mrpc_no_dist_strat(self):
"""Test BERT model performance with 1 GPU, no distribution strategy."""
self._setup()
self.num_gpus = 1
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_mrpc_no_dist_strat')
FLAGS.train_data_path = self.train_data_path
FLAGS.eval_data_path = self.eval_data_path
FLAGS.input_meta_data_path = self.input_meta_data_path
FLAGS.bert_config_file = self.bert_config_file
FLAGS.train_batch_size = 4
FLAGS.eval_batch_size = 4
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path, use_ds=False)
def benchmark_2_gpu_mrpc(self):
"""Test BERT model performance with 2 GPUs."""
self._setup()
self.num_gpus = 2
FLAGS.model_dir = self._get_model_dir('benchmark_2_gpu_mrpc')
FLAGS.train_data_path = self.train_data_path
FLAGS.eval_data_path = self.eval_data_path
FLAGS.input_meta_data_path = self.input_meta_data_path
FLAGS.bert_config_file = self.bert_config_file
FLAGS.train_batch_size = 8
FLAGS.eval_batch_size = 8
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path)
def benchmark_4_gpu_mrpc(self):
"""Test BERT model performance with 4 GPUs."""
self._setup()
self.num_gpus = 4
FLAGS.model_dir = self._get_model_dir('benchmark_4_gpu_mrpc')
FLAGS.train_data_path = self.train_data_path
FLAGS.eval_data_path = self.eval_data_path
FLAGS.input_meta_data_path = self.input_meta_data_path
FLAGS.bert_config_file = self.bert_config_file
FLAGS.train_batch_size = 16
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path)
def benchmark_8_gpu_mrpc(self):
"""Test BERT model performance with 8 GPUs."""
self._setup()
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mrpc')
FLAGS.train_data_path = self.train_data_path
FLAGS.eval_data_path = self.eval_data_path
FLAGS.input_meta_data_path = self.input_meta_data_path
FLAGS.bert_config_file = self.bert_config_file
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path)
def benchmark_1_gpu_amp_mrpc_no_dist_strat(self):
"""Performance for 1 GPU no DS with automatic mixed precision."""
self._setup()
self.num_gpus = 1
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_amp_mrpc_no_dist_strat')
FLAGS.train_data_path = self.train_data_path
FLAGS.eval_data_path = self.eval_data_path
FLAGS.input_meta_data_path = self.input_meta_data_path
FLAGS.bert_config_file = self.bert_config_file
FLAGS.train_batch_size = 4
FLAGS.eval_batch_size = 4
FLAGS.dtype = 'fp16'
FLAGS.fp16_implementation = 'graph_rewrite'
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path, use_ds=False)
def benchmark_8_gpu_amp_mrpc(self):
"""Test BERT model performance with 8 GPUs with automatic mixed precision.
"""
self._setup()
self.num_gpus = 8
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp_mrpc')
FLAGS.train_data_path = self.train_data_path
FLAGS.eval_data_path = self.eval_data_path
FLAGS.input_meta_data_path = self.input_meta_data_path
FLAGS.bert_config_file = self.bert_config_file
FLAGS.train_batch_size = 32
FLAGS.eval_batch_size = 32
FLAGS.dtype = 'fp16'
FLAGS.fp16_implementation = 'graph_rewrite'
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path, use_ds=False)
def benchmark_2x2_tpu_mrpc(self):
"""Test BERT model performance with 2x2 TPU."""
self._setup()
FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu_mrpc')
FLAGS.train_data_path = self.train_data_path
FLAGS.eval_data_path = self.eval_data_path
FLAGS.input_meta_data_path = self.input_meta_data_path
FLAGS.bert_config_file = self.bert_config_file
FLAGS.train_batch_size = 32
FLAGS.eval_batch_size = 32
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path, use_ds=False)
class BertClassifyAccuracy(BertClassifyBenchmarkBase):
"""Short accuracy test for BERT model.
Tests BERT classification task model accuracy. The naming
convention of below test cases follow
`benchmark_(number of gpus)_gpu_(dataset type)` format.
"""
def __init__(self, output_dir=TMP_DIR, **kwargs):
self.train_data_path = CLASSIFIER_TRAIN_DATA_PATH
self.eval_data_path = CLASSIFIER_EVAL_DATA_PATH
self.bert_config_file = MODEL_CONFIG_FILE_PATH
self.input_meta_data_path = CLASSIFIER_INPUT_META_DATA_PATH
self.pretrained_checkpoint_path = PRETRAINED_CHECKPOINT_PATH
super(BertClassifyAccuracy, self).__init__(output_dir=output_dir)
@benchmark_wrappers.enable_runtime_flags
def _run_and_report_benchmark(self,
training_summary_path,
min_accuracy=0.84,
max_accuracy=0.88):
"""Starts BERT accuracy benchmark test."""
start_time_sec = time.time()
self._run_bert_classifier(callbacks=[self.timer_callback])
wall_time_sec = time.time() - start_time_sec
with tf.io.gfile.GFile(training_summary_path, 'rb') as reader:
summary = json.loads(reader.read().decode('utf-8'))
super(BertClassifyAccuracy, self)._report_benchmark(
stats=summary,
wall_time_sec=wall_time_sec,
min_accuracy=min_accuracy,
max_accuracy=max_accuracy)
def _setup(self):
super(BertClassifyAccuracy, self)._setup()
FLAGS.train_data_path = self.train_data_path
FLAGS.eval_data_path = self.eval_data_path
FLAGS.input_meta_data_path = self.input_meta_data_path
FLAGS.bert_config_file = self.bert_config_file
FLAGS.init_checkpoint = self.pretrained_checkpoint_path
def benchmark_8_gpu_mrpc(self):
"""Run BERT model accuracy test with 8 GPUs.
Due to comparatively small cardinality of MRPC dataset, training
accuracy metric has high variance between trainings. As so, we
set the wide range of allowed accuracy (84% to 88%).
"""
self._setup()
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mrpc')
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path)
def benchmark_8_gpu_mrpc_xla(self):
"""Run BERT model accuracy test with 8 GPUs with XLA."""
self._setup()
FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mrpc_xla')
FLAGS.enable_xla = True
summary_path = os.path.join(FLAGS.model_dir,
'summaries/training_summary.txt')
self._run_and_report_benchmark(summary_path)
if __name__ == '__main__':
tf.test.main()
| 37.951157 | 114 | 0.723837 |
1612c40bef16aa9ac5621ab241da283aab2292a0 | 1,430 | py | Python | libbgp/tests/unit/bgp/attribute/test_origin.py | smartbgp/libbgp | 371d43b3655c40067725f5066c50391606154c60 | [
"Apache-2.0"
] | 14 | 2017-01-20T11:26:28.000Z | 2021-05-12T22:38:48.000Z | libbgp/tests/unit/bgp/attribute/test_origin.py | smartbgp/libbgp | 371d43b3655c40067725f5066c50391606154c60 | [
"Apache-2.0"
] | null | null | null | libbgp/tests/unit/bgp/attribute/test_origin.py | smartbgp/libbgp | 371d43b3655c40067725f5066c50391606154c60 | [
"Apache-2.0"
] | 2 | 2017-09-04T05:54:10.000Z | 2017-11-15T17:02:46.000Z | # Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import struct
from libbgp.bgp.update.attribute.origin import Origin
from libbgp.exception import BGPNotification
class TestOrigin(unittest.TestCase):
def test_unpack(self):
for data in [b'\x00', b'\x01', b'\x02']:
self.assertEqual(ord(data[0:1]), Origin.unpack(data=data, capability={}).value)
def test_bad_origin_type(self):
data_hex = b'\x05'
self.assertRaises(BGPNotification, Origin.unpack, data_hex, {})
def test_pack_bad_origin_type(self):
data = 5
self.assertRaises(BGPNotification, Origin.pack, data, {})
def test_pack(self):
for data in [0, 1, 2]:
self.assertEqual(struct.pack('!B', data), Origin.pack(data, {}).hex_value)
if __name__ == '__main__':
unittest.main()
| 31.777778 | 91 | 0.691608 |
268935c08949fe99f93a9f5582adce2ebcf25785 | 1,415 | py | Python | DLCoursera_part1_week2_logistic_regression.py | zhouhan921001/DeepLearning-homework | 20562dc49ca5898b531a678c0e54c8d985fcc72f | [
"MIT"
] | null | null | null | DLCoursera_part1_week2_logistic_regression.py | zhouhan921001/DeepLearning-homework | 20562dc49ca5898b531a678c0e54c8d985fcc72f | [
"MIT"
] | null | null | null | DLCoursera_part1_week2_logistic_regression.py | zhouhan921001/DeepLearning-homework | 20562dc49ca5898b531a678c0e54c8d985fcc72f | [
"MIT"
] | null | null | null | import numpy as np
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
#reshape data
train_set_x = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
test_set_x = test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T
def sigmoid(z):
sig = 1 / (1 + np.exp(-z))
return sig
def initialize(dim):
w = np.zeros(shape=(dim,1))
b = 0
return w,b
def propagate(w,b,X,Y):
m = X.shape[1]
# forward propagate
z = np.dot(w.T,X) + b
A = sigmoid(z)
#cost = (-1/m) * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
# backward propagate
dw = 1/m * np.dot(X,(A - Y).T)
db = 1/m * np.sum(A - Y)
return dw,db
def optimize(X,Y,w,b,num_iter,learning_rate):
for i in range(num_iter):
dw,db = propagate(w,b,X,Y)
w = w - learning_rate * dw
b = b - learning_rate * db
return w,b
def predict(w,b,X):
z = np.dot(w.T,X) + b
A = sigmoid(z)
for i in range(A.shape[1]):
Y_predict = 1 if A[0,i] > 0.5 else 0
return Y_predict
def model(X_train,Y_train,X_test,Y_test,num_iter,learning_rate):
w,b = initialize(train_set_x.shape[0])
w,b = optimize(X_train,Y_train,w,b,num_iter,learning_rate)
Y_predict = predict(w,b,X_test)
Y_predict_train = predict(w,b,X_train)
print("test_accuracy: {} %".format(np.mean(np.abs(Y_test - Y_predict)) * 100))
print("train_accuracy: {} %".format(np.mean(np.abs(Y_train - Y_predict_train)) * 100))
| 19.383562 | 87 | 0.660777 |
5f0afb02668ec10cb4a208cc1b75543ffaa591ad | 2,459 | py | Python | YOLOw-Keras/yad2k/models/keras_darknet19.py | yehudabetito/yolo_nets_uda | a3ae826e69289fbc16a43b0644fb37063a1952fe | [
"MIT"
] | 2 | 2019-08-05T07:18:11.000Z | 2020-04-20T11:31:20.000Z | YOLOw-Keras/yad2k/models/keras_darknet19.py | yehudabetito/yolo_nets_uda | a3ae826e69289fbc16a43b0644fb37063a1952fe | [
"MIT"
] | null | null | null | YOLOw-Keras/yad2k/models/keras_darknet19.py | yehudabetito/yolo_nets_uda | a3ae826e69289fbc16a43b0644fb37063a1952fe | [
"MIT"
] | 1 | 2019-08-05T07:18:22.000Z | 2019-08-05T07:18:22.000Z | """Darknet19 Model Defined in Keras."""
import functools
from functools import partial
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l2
from ..utils import compose
# Partial wrapper for Convolution2D with static default argument.
_DarknetConv2D = partial(Conv2D, padding='same')
@functools.wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet weight regularizer for Convolution2D."""
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs.update(kwargs)
return _DarknetConv2D(*args, **darknet_conv_kwargs)
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(),
LeakyReLU(alpha=0.1))
def bottleneck_block(outer_filters, bottleneck_filters):
"""Bottleneck block of 3x3, 1x1, 3x3 convolutions."""
return compose(
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)),
DarknetConv2D_BN_Leaky(bottleneck_filters, (1, 1)),
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)))
def bottleneck_x2_block(outer_filters, bottleneck_filters):
"""Bottleneck block of 3x3, 1x1, 3x3, 1x1, 3x3 convolutions."""
return compose(
bottleneck_block(outer_filters, bottleneck_filters),
DarknetConv2D_BN_Leaky(bottleneck_filters, (1, 1)),
DarknetConv2D_BN_Leaky(outer_filters, (3, 3)))
def darknet_body():
"""Generate first 18 conv layers of Darknet-19."""
return compose(
DarknetConv2D_BN_Leaky(32, (3, 3)),
MaxPooling2D(),
DarknetConv2D_BN_Leaky(64, (3, 3)),
MaxPooling2D(),
bottleneck_block(128, 64),
MaxPooling2D(),
bottleneck_block(256, 128),
MaxPooling2D(),
bottleneck_x2_block(512, 256),
MaxPooling2D(),
bottleneck_x2_block(1024, 512))
def darknet19(inputs):
"""Generate Darknet-19 model for Imagenet classification."""
body = darknet_body()(inputs)
logits = DarknetConv2D(1000, (1, 1), activation='softmax')(body)
return Model(inputs, logits)
| 34.152778 | 78 | 0.690931 |
432137935672a4a4741ecf2b4de3ee7c6c01971f | 71 | py | Python | setup.py | fanninpm/xopen | a7f650f8fcde4972f889c27a2ae8a80e0c9a9b06 | [
"MIT"
] | 24 | 2017-12-09T01:27:59.000Z | 2020-09-28T16:26:00.000Z | setup.py | fanninpm/xopen | a7f650f8fcde4972f889c27a2ae8a80e0c9a9b06 | [
"MIT"
] | 61 | 2021-01-28T10:34:22.000Z | 2022-03-28T21:03:39.000Z | setup.py | fanninpm/xopen | a7f650f8fcde4972f889c27a2ae8a80e0c9a9b06 | [
"MIT"
] | 11 | 2016-09-15T18:53:50.000Z | 2021-01-19T12:58:36.000Z | from setuptools import setup
setup(setup_requires=["setuptools_scm"])
| 17.75 | 40 | 0.816901 |
c29c47a94d9ccf56c7f1f6092ca8accf5ade1341 | 8,079 | py | Python | tests/core/full_node/test_conditions.py | fugginoob/taco-blockchain | bee7b5d2ba555728438f7d13cd80f7f8ca3bd353 | [
"Apache-2.0"
] | null | null | null | tests/core/full_node/test_conditions.py | fugginoob/taco-blockchain | bee7b5d2ba555728438f7d13cd80f7f8ca3bd353 | [
"Apache-2.0"
] | null | null | null | tests/core/full_node/test_conditions.py | fugginoob/taco-blockchain | bee7b5d2ba555728438f7d13cd80f7f8ca3bd353 | [
"Apache-2.0"
] | null | null | null | """
These are quick-to-run test that check spends can be added to the blockchain when they're valid
or that they're failing for the right reason when they're invalid.
"""
import logging
import time
from typing import List, Optional
import pytest
from blspy import G2Element
from clvm_tools.binutils import assemble
from taco.consensus.blockchain import ReceiveBlockResult
from taco.consensus.constants import ConsensusConstants
from taco.types.announcement import Announcement
from taco.types.blockchain_format.program import Program
from taco.types.coin_solution import CoinSolution
from taco.types.condition_opcodes import ConditionOpcode
from taco.types.full_block import FullBlock
from taco.types.spend_bundle import SpendBundle
from taco.util.block_tools import BlockTools, test_constants
from taco.util.errors import Err
from .ram_db import create_ram_blockchain
bt = BlockTools(constants=test_constants)
log = logging.getLogger(__name__)
# This puzzle simply returns the solution as conditions.
# We call it the `EASY_PUZZLE` because it's pretty easy to solve.
EASY_PUZZLE = Program.to(assemble("1"))
EASY_PUZZLE_HASH = EASY_PUZZLE.get_tree_hash()
def initial_blocks(block_count: int = 4) -> List[FullBlock]:
blocks = bt.get_consecutive_blocks(
block_count,
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=EASY_PUZZLE_HASH,
pool_reward_puzzle_hash=EASY_PUZZLE_HASH,
)
return blocks
async def check_spend_bundle_validity(
constants: ConsensusConstants,
blocks: List[FullBlock],
spend_bundle: SpendBundle,
expected_err: Optional[Err] = None,
):
"""
This test helper create an extra block after the given blocks that contains the given
`SpendBundle`, and then invokes `receive_block` to ensure that it's accepted (if `expected_err=None`)
or fails with the correct error code.
"""
try:
connection, blockchain = await create_ram_blockchain(constants)
for block in blocks:
received_block_result, err, fork_height = await blockchain.receive_block(block)
assert err is None
additional_blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
guarantee_transaction_block=True,
transaction_data=spend_bundle,
)
newest_block = additional_blocks[-1]
received_block_result, err, fork_height = await blockchain.receive_block(newest_block)
if expected_err is None:
assert err is None
assert received_block_result == ReceiveBlockResult.NEW_PEAK
assert fork_height == len(blocks) - 1
else:
assert err == expected_err
assert received_block_result == ReceiveBlockResult.INVALID_BLOCK
assert fork_height is None
finally:
# if we don't close the connection, the test process doesn't exit cleanly
await connection.close()
# we must call `shut_down` or the executor in `Blockchain` doesn't stop
blockchain.shut_down()
async def check_conditions(
condition_solution: Program, expected_err: Optional[Err] = None, spend_reward_index: int = -2
):
blocks = initial_blocks()
coin = list(blocks[spend_reward_index].get_included_reward_coins())[0]
coin_solution = CoinSolution(coin, EASY_PUZZLE, condition_solution)
spend_bundle = SpendBundle([coin_solution], G2Element())
# now let's try to create a block with the spend bundle and ensure that it doesn't validate
await check_spend_bundle_validity(bt.constants, blocks, spend_bundle, expected_err=expected_err)
class TestConditions:
@pytest.mark.asyncio
async def test_invalid_block_age(self):
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_HEIGHT_RELATIVE[0]} 2))"))
await check_conditions(conditions, expected_err=Err.ASSERT_HEIGHT_RELATIVE_FAILED)
@pytest.mark.asyncio
async def test_valid_block_age(self):
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_HEIGHT_RELATIVE[0]} 1))"))
await check_conditions(conditions)
@pytest.mark.asyncio
async def test_invalid_block_height(self):
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE[0]} 4))"))
await check_conditions(conditions, expected_err=Err.ASSERT_HEIGHT_ABSOLUTE_FAILED)
@pytest.mark.asyncio
async def test_valid_block_height(self):
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE[0]} 3))"))
await check_conditions(conditions)
@pytest.mark.asyncio
async def test_invalid_my_id(self):
blocks = initial_blocks()
coin = list(blocks[-2].get_included_reward_coins())[0]
wrong_name = bytearray(coin.name())
wrong_name[-1] ^= 1
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_MY_COIN_ID[0]} 0x{wrong_name.hex()}))"))
await check_conditions(conditions, expected_err=Err.ASSERT_MY_COIN_ID_FAILED)
@pytest.mark.asyncio
async def test_valid_my_id(self):
blocks = initial_blocks()
coin = list(blocks[-2].get_included_reward_coins())[0]
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_MY_COIN_ID[0]} 0x{coin.name().hex()}))"))
await check_conditions(conditions)
@pytest.mark.asyncio
async def test_invalid_seconds_absolute(self):
# TODO: make the test suite not use `time.time` so we can more accurately
# set `time_now` to make it minimal while still failing
time_now = int(time.time()) + 3000
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_SECONDS_ABSOLUTE[0]} {time_now}))"))
await check_conditions(conditions, expected_err=Err.ASSERT_SECONDS_ABSOLUTE_FAILED)
@pytest.mark.asyncio
async def test_valid_seconds_absolute(self):
time_now = int(time.time())
conditions = Program.to(assemble(f"(({ConditionOpcode.ASSERT_SECONDS_ABSOLUTE[0]} {time_now}))"))
await check_conditions(conditions)
@pytest.mark.asyncio
async def test_invalid_coin_announcement(self):
blocks = initial_blocks()
coin = list(blocks[-2].get_included_reward_coins())[0]
announce = Announcement(coin.name(), b"test_bad")
conditions = Program.to(
assemble(
f"(({ConditionOpcode.CREATE_COIN_ANNOUNCEMENT[0]} 'test')"
f"({ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT[0]} 0x{announce.name().hex()}))"
)
)
await check_conditions(conditions, expected_err=Err.ASSERT_ANNOUNCE_CONSUMED_FAILED)
@pytest.mark.asyncio
async def test_valid_coin_announcement(self):
blocks = initial_blocks()
coin = list(blocks[-2].get_included_reward_coins())[0]
announce = Announcement(coin.name(), b"test")
conditions = Program.to(
assemble(
f"(({ConditionOpcode.CREATE_COIN_ANNOUNCEMENT[0]} 'test')"
f"({ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT[0]} 0x{announce.name().hex()}))"
)
)
await check_conditions(conditions)
@pytest.mark.asyncio
async def test_invalid_puzzle_announcement(self):
announce = Announcement(EASY_PUZZLE_HASH, b"test_bad")
conditions = Program.to(
assemble(
f"(({ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT[0]} 'test')"
f"({ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT[0]} 0x{announce.name().hex()}))"
)
)
await check_conditions(conditions, expected_err=Err.ASSERT_ANNOUNCE_CONSUMED_FAILED)
@pytest.mark.asyncio
async def test_valid_puzzle_announcement(self):
announce = Announcement(EASY_PUZZLE_HASH, b"test")
conditions = Program.to(
assemble(
f"(({ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT[0]} 'test')"
f"({ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT[0]} 0x{announce.name().hex()}))"
)
)
await check_conditions(conditions)
| 38.655502 | 110 | 0.700706 |
10360792cd34b91518fb64acce7d11733a671b54 | 5,398 | py | Python | neutron/plugins/ml2/drivers/type_gre.py | venkataanil/juno_neutron | 2e62e150c264ccae2dd75fb78caae453eaa77e9f | [
"Apache-2.0"
] | null | null | null | neutron/plugins/ml2/drivers/type_gre.py | venkataanil/juno_neutron | 2e62e150c264ccae2dd75fb78caae453eaa77e9f | [
"Apache-2.0"
] | null | null | null | neutron/plugins/ml2/drivers/type_gre.py | venkataanil/juno_neutron | 2e62e150c264ccae2dd75fb78caae453eaa77e9f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from oslo.db import exception as db_exc
from six import moves
import sqlalchemy as sa
from sqlalchemy import sql
from neutron.common import exceptions as exc
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.openstack.common.gettextutils import _LE, _LW
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers import type_tunnel
LOG = log.getLogger(__name__)
gre_opts = [
cfg.ListOpt('tunnel_id_ranges',
default=[],
help=_("Comma-separated list of <tun_min>:<tun_max> tuples "
"enumerating ranges of GRE tunnel IDs that are "
"available for tenant network allocation"))
]
cfg.CONF.register_opts(gre_opts, "ml2_type_gre")
class GreAllocation(model_base.BASEV2):
__tablename__ = 'ml2_gre_allocations'
gre_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False,
server_default=sql.false())
class GreEndpoints(model_base.BASEV2):
"""Represents tunnel endpoint in RPC mode."""
__tablename__ = 'ml2_gre_endpoints'
ip_address = sa.Column(sa.String(64), primary_key=True)
def __repr__(self):
return "<GreTunnelEndpoint(%s)>" % self.ip_address
class GreTypeDriver(type_tunnel.TunnelTypeDriver):
def __init__(self):
super(GreTypeDriver, self).__init__(GreAllocation)
def get_type(self):
return p_const.TYPE_GRE
def initialize(self):
try:
self._initialize(cfg.CONF.ml2_type_gre.tunnel_id_ranges)
except exc.NetworkTunnelRangeError:
LOG.exception(_("Failed to parse tunnel_id_ranges. "
"Service terminated!"))
raise SystemExit()
def sync_allocations(self):
# determine current configured allocatable gres
gre_ids = set()
for gre_id_range in self.tunnel_ranges:
tun_min, tun_max = gre_id_range
if tun_max + 1 - tun_min > 1000000:
LOG.error(_LE("Skipping unreasonable gre ID range "
"%(tun_min)s:%(tun_max)s"),
{'tun_min': tun_min, 'tun_max': tun_max})
else:
gre_ids |= set(moves.xrange(tun_min, tun_max + 1))
session = db_api.get_session()
try:
self._add_allocation(session, gre_ids)
except db_exc.DBDuplicateEntry:
# in case multiple neutron-servers start allocations could be
# already added by different neutron-server. because this function
# is called only when initializing this type driver, it's safe to
# assume allocations were added.
LOG.warning(_LW("Gre allocations were already created."))
def _add_allocation(self, session, gre_ids):
with session.begin(subtransactions=True):
# remove from table unallocated tunnels not currently allocatable
allocs = (session.query(GreAllocation).all())
for alloc in allocs:
try:
# see if tunnel is allocatable
gre_ids.remove(alloc.gre_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug("Removing tunnel %s from pool", alloc.gre_id)
session.delete(alloc)
# add missing allocatable tunnels to table
for gre_id in sorted(gre_ids):
alloc = GreAllocation(gre_id=gre_id)
session.add(alloc)
def get_endpoints(self):
"""Get every gre endpoints from database."""
LOG.debug("get_gre_endpoints() called")
session = db_api.get_session()
with session.begin(subtransactions=True):
gre_endpoints = session.query(GreEndpoints)
return [{'ip_address': gre_endpoint.ip_address}
for gre_endpoint in gre_endpoints]
def add_endpoint(self, ip):
LOG.debug("add_gre_endpoint() called for ip %s", ip)
session = db_api.get_session()
try:
gre_endpoint = GreEndpoints(ip_address=ip)
gre_endpoint.save(session)
except db_exc.DBDuplicateEntry:
gre_endpoint = (session.query(GreEndpoints).
filter_by(ip_address=ip).one())
LOG.warning(_("Gre endpoint with ip %s already exists"), ip)
return gre_endpoint
| 37.486111 | 79 | 0.634309 |
7e287e269fa87792abc5179bc73420f0bf52d752 | 14,064 | py | Python | test/base/test_batch.py | duburcqa/tianshou | 26fb87433de0d2604078ecbc99502efe0a815d5d | [
"MIT"
] | 1 | 2021-12-18T02:31:04.000Z | 2021-12-18T02:31:04.000Z | test/base/test_batch.py | duburcqa/tianshou | 26fb87433de0d2604078ecbc99502efe0a815d5d | [
"MIT"
] | null | null | null | test/base/test_batch.py | duburcqa/tianshou | 26fb87433de0d2604078ecbc99502efe0a815d5d | [
"MIT"
] | null | null | null | import torch
import copy
import pickle
import pytest
import numpy as np
from tianshou.data import Batch, to_torch
def test_batch():
assert list(Batch()) == []
assert Batch().is_empty()
assert Batch(b={'c': {}}).is_empty()
assert len(Batch(a=[1, 2, 3], b={'c': {}})) == 3
assert not Batch(a=[1, 2, 3]).is_empty()
b = Batch()
b.update()
assert b.is_empty()
b.update(c=[3, 5])
assert np.allclose(b.c, [3, 5])
# mimic the behavior of dict.update, where kwargs can overwrite keys
b.update({'a': 2}, a=3)
assert b.a == 3
with pytest.raises(AssertionError):
Batch({1: 2})
batch = Batch(a=[torch.ones(3), torch.ones(3)])
assert torch.allclose(batch.a, torch.ones(2, 3))
batch = Batch(obs=[0], np=np.zeros([3, 4]))
assert batch.obs == batch["obs"]
batch.obs = [1]
assert batch.obs == [1]
batch.cat_(batch)
assert np.allclose(batch.obs, [1, 1])
assert batch.np.shape == (6, 4)
assert np.allclose(batch[0].obs, batch[1].obs)
batch.obs = np.arange(5)
for i, b in enumerate(batch.split(1, shuffle=False)):
if i != 5:
assert b.obs == batch[i].obs
else:
with pytest.raises(AttributeError):
batch[i].obs
with pytest.raises(AttributeError):
b.obs
print(batch)
batch_dict = {'b': np.array([1.0]), 'c': 2.0, 'd': torch.Tensor([3.0])}
batch_item = Batch({'a': [batch_dict]})[0]
assert isinstance(batch_item.a.b, np.ndarray)
assert batch_item.a.b == batch_dict['b']
assert isinstance(batch_item.a.c, float)
assert batch_item.a.c == batch_dict['c']
assert isinstance(batch_item.a.d, torch.Tensor)
assert batch_item.a.d == batch_dict['d']
batch2 = Batch(a=[{
'b': np.float64(1.0),
'c': np.zeros(1),
'd': Batch(e=np.array(3.0))}])
assert len(batch2) == 1
assert Batch().shape == []
assert batch2.shape[0] == 1
with pytest.raises(IndexError):
batch2[-2]
with pytest.raises(IndexError):
batch2[1]
assert batch2[0].shape == []
with pytest.raises(IndexError):
batch2[0][0]
with pytest.raises(TypeError):
len(batch2[0])
assert isinstance(batch2[0].a.c, np.ndarray)
assert isinstance(batch2[0].a.b, np.float64)
assert isinstance(batch2[0].a.d.e, np.float64)
batch2_from_list = Batch(list(batch2))
batch2_from_comp = Batch([e for e in batch2])
assert batch2_from_list.a.b == batch2.a.b
assert batch2_from_list.a.c == batch2.a.c
assert batch2_from_list.a.d.e == batch2.a.d.e
assert batch2_from_comp.a.b == batch2.a.b
assert batch2_from_comp.a.c == batch2.a.c
assert batch2_from_comp.a.d.e == batch2.a.d.e
for batch_slice in [
batch2[slice(0, 1)], batch2[:1], batch2[0:]]:
assert batch_slice.a.b == batch2.a.b
assert batch_slice.a.c == batch2.a.c
assert batch_slice.a.d.e == batch2.a.d.e
batch2_sum = (batch2 + 1.0) * 2
assert batch2_sum.a.b == (batch2.a.b + 1.0) * 2
assert batch2_sum.a.c == (batch2.a.c + 1.0) * 2
assert batch2_sum.a.d.e == (batch2.a.d.e + 1.0) * 2
batch3 = Batch(a={
'c': np.zeros(1),
'd': Batch(e=np.array([0.0]), f=np.array([3.0]))})
batch3.a.d[0] = {'e': 4.0}
assert batch3.a.d.e[0] == 4.0
batch3.a.d[0] = Batch(f=5.0)
assert batch3.a.d.f[0] == 5.0
with pytest.raises(KeyError):
batch3.a.d[0] = Batch(f=5.0, g=0.0)
# auto convert
batch4 = Batch(a=np.array(['a', 'b']))
assert batch4.a.dtype == np.object # auto convert to np.object
batch4.update(a=np.array(['c', 'd']))
assert list(batch4.a) == ['c', 'd']
assert batch4.a.dtype == np.object # auto convert to np.object
batch5 = Batch(a=np.array([{'index': 0}]))
assert isinstance(batch5.a, Batch)
assert np.allclose(batch5.a.index, [0])
batch5.b = np.array([{'index': 1}])
assert isinstance(batch5.b, Batch)
assert np.allclose(batch5.b.index, [1])
def test_batch_over_batch():
batch = Batch(a=[3, 4, 5], b=[4, 5, 6])
batch2 = Batch({'c': [6, 7, 8], 'b': batch})
batch2.b.b[-1] = 0
print(batch2)
for k, v in batch2.items():
assert np.all(batch2[k] == v)
assert batch2[-1].b.b == 0
batch2.cat_(Batch(c=[6, 7, 8], b=batch))
assert np.allclose(batch2.c, [6, 7, 8, 6, 7, 8])
assert np.allclose(batch2.b.a, [3, 4, 5, 3, 4, 5])
assert np.allclose(batch2.b.b, [4, 5, 0, 4, 5, 0])
batch2.update(batch2.b, six=[6, 6, 6])
assert np.allclose(batch2.c, [6, 7, 8, 6, 7, 8])
assert np.allclose(batch2.a, [3, 4, 5, 3, 4, 5])
assert np.allclose(batch2.b, [4, 5, 0, 4, 5, 0])
assert np.allclose(batch2.six, [6, 6, 6])
d = {'a': [3, 4, 5], 'b': [4, 5, 6]}
batch3 = Batch(c=[6, 7, 8], b=d)
batch3.cat_(Batch(c=[6, 7, 8], b=d))
assert np.allclose(batch3.c, [6, 7, 8, 6, 7, 8])
assert np.allclose(batch3.b.a, [3, 4, 5, 3, 4, 5])
assert np.allclose(batch3.b.b, [4, 5, 6, 4, 5, 6])
batch4 = Batch(({'a': {'b': np.array([1.0])}},))
assert batch4.a.b.ndim == 2
assert batch4.a.b[0, 0] == 1.0
# advanced slicing
batch5 = Batch(a=[[1, 2]], b={'c': np.zeros([3, 2, 1])})
assert batch5.shape == [1, 2]
with pytest.raises(IndexError):
batch5[2]
with pytest.raises(IndexError):
batch5[:, 3]
with pytest.raises(IndexError):
batch5[:, :, -1]
batch5[:, -1] += 1
assert np.allclose(batch5.a, [1, 3])
assert np.allclose(batch5.b.c.squeeze(), [[0, 1]] * 3)
def test_batch_cat_and_stack():
# test cat with compatible keys
b1 = Batch(a=[{'b': np.float64(1.0), 'd': Batch(e=np.array(3.0))}])
b2 = Batch(a=[{'b': np.float64(4.0), 'd': {'e': np.array(6.0)}}])
b12_cat_out = Batch.cat([b1, b2])
b12_cat_in = copy.deepcopy(b1)
b12_cat_in.cat_(b2)
assert np.all(b12_cat_in.a.d.e == b12_cat_out.a.d.e)
assert np.all(b12_cat_in.a.d.e == b12_cat_out.a.d.e)
assert isinstance(b12_cat_in.a.d.e, np.ndarray)
assert b12_cat_in.a.d.e.ndim == 1
b12_stack = Batch.stack((b1, b2))
assert isinstance(b12_stack.a.d.e, np.ndarray)
assert b12_stack.a.d.e.ndim == 2
# test cat with incompatible keys
b1 = Batch(a=np.random.rand(3, 4), common=Batch(c=np.random.rand(3, 5)))
b2 = Batch(b=torch.rand(4, 3), common=Batch(c=np.random.rand(4, 5)))
test = Batch.cat([b1, b2])
ans = Batch(a=np.concatenate([b1.a, np.zeros((4, 4))]),
b=torch.cat([torch.zeros(3, 3), b2.b]),
common=Batch(c=np.concatenate([b1.common.c, b2.common.c])))
assert np.allclose(test.a, ans.a)
assert torch.allclose(test.b, ans.b)
assert np.allclose(test.common.c, ans.common.c)
# test stack with compatible keys
b3 = Batch(a=np.zeros((3, 4)),
b=torch.ones((2, 5)),
c=Batch(d=[[1], [2]]))
b4 = Batch(a=np.ones((3, 4)),
b=torch.ones((2, 5)),
c=Batch(d=[[0], [3]]))
b34_stack = Batch.stack((b3, b4), axis=1)
assert np.all(b34_stack.a == np.stack((b3.a, b4.a), axis=1))
assert np.all(b34_stack.c.d == list(map(list, zip(b3.c.d, b4.c.d))))
b5_dict = np.array([{'a': False, 'b': {'c': 2.0, 'd': 1.0}},
{'a': True, 'b': {'c': 3.0}}])
b5 = Batch(b5_dict)
assert b5.a[0] == np.array(False) and b5.a[1] == np.array(True)
assert np.all(b5.b.c == np.stack([e['b']['c'] for e in b5_dict], axis=0))
assert b5.b.d[0] == b5_dict[0]['b']['d']
assert b5.b.d[1] == 0.0
# test stack with incompatible keys
a = Batch(a=1, b=2, c=3)
b = Batch(a=4, b=5, d=6)
c = Batch(c=7, b=6, d=9)
d = Batch.stack([a, b, c])
assert np.allclose(d.a, [1, 4, 0])
assert np.allclose(d.b, [2, 5, 6])
assert np.allclose(d.c, [3, 0, 7])
assert np.allclose(d.d, [0, 6, 9])
b1 = Batch(a=np.random.rand(4, 4), common=Batch(c=np.random.rand(4, 5)))
b2 = Batch(b=torch.rand(4, 6), common=Batch(c=np.random.rand(4, 5)))
test = Batch.stack([b1, b2])
ans = Batch(a=np.stack([b1.a, np.zeros((4, 4))]),
b=torch.stack([torch.zeros(4, 6), b2.b]),
common=Batch(c=np.stack([b1.common.c, b2.common.c])))
assert np.allclose(test.a, ans.a)
assert torch.allclose(test.b, ans.b)
assert np.allclose(test.common.c, ans.common.c)
def test_batch_over_batch_to_torch():
batch = Batch(
a=np.float64(1.0),
b=Batch(
c=np.ones((1,), dtype=np.float32),
d=torch.ones((1,), dtype=torch.float64)
)
)
batch.to_torch()
assert isinstance(batch.a, torch.Tensor)
assert isinstance(batch.b.c, torch.Tensor)
assert isinstance(batch.b.d, torch.Tensor)
assert batch.a.dtype == torch.float64
assert batch.b.c.dtype == torch.float32
assert batch.b.d.dtype == torch.float64
batch.to_torch(dtype=torch.float32)
assert batch.a.dtype == torch.float32
assert batch.b.c.dtype == torch.float32
assert batch.b.d.dtype == torch.float32
def test_utils_to_torch():
batch = Batch(
a=np.float64(1.0),
b=Batch(
c=np.ones((1,), dtype=np.float32),
d=torch.ones((1,), dtype=torch.float64)
)
)
a_torch_float = to_torch(batch.a, dtype=torch.float32)
assert a_torch_float.dtype == torch.float32
a_torch_double = to_torch(batch.a, dtype=torch.float64)
assert a_torch_double.dtype == torch.float64
batch_torch_float = to_torch(batch, dtype=torch.float32)
assert batch_torch_float.a.dtype == torch.float32
assert batch_torch_float.b.c.dtype == torch.float32
assert batch_torch_float.b.d.dtype == torch.float32
array_list = [float('nan'), 1.0]
assert to_torch(array_list).dtype == torch.float64
def test_batch_pickle():
batch = Batch(obs=Batch(a=0.0, c=torch.Tensor([1.0, 2.0])),
np=np.zeros([3, 4]))
batch_pk = pickle.loads(pickle.dumps(batch))
assert batch.obs.a == batch_pk.obs.a
assert torch.all(batch.obs.c == batch_pk.obs.c)
assert np.all(batch.np == batch_pk.np)
def test_batch_from_to_numpy_without_copy():
batch = Batch(a=np.ones((1,)), b=Batch(c=np.ones((1,))))
a_mem_addr_orig = batch.a.__array_interface__['data'][0]
c_mem_addr_orig = batch.b.c.__array_interface__['data'][0]
batch.to_torch()
batch.to_numpy()
a_mem_addr_new = batch.a.__array_interface__['data'][0]
c_mem_addr_new = batch.b.c.__array_interface__['data'][0]
assert a_mem_addr_new == a_mem_addr_orig
assert c_mem_addr_new == c_mem_addr_orig
def test_batch_copy():
batch = Batch(a=np.array([3, 4, 5]), b=np.array([4, 5, 6]))
batch2 = Batch({'c': np.array([6, 7, 8]), 'b': batch})
orig_c_addr = batch2.c.__array_interface__['data'][0]
orig_b_a_addr = batch2.b.a.__array_interface__['data'][0]
orig_b_b_addr = batch2.b.b.__array_interface__['data'][0]
# test with copy=False
batch3 = Batch(copy=False, **batch2)
curr_c_addr = batch3.c.__array_interface__['data'][0]
curr_b_a_addr = batch3.b.a.__array_interface__['data'][0]
curr_b_b_addr = batch3.b.b.__array_interface__['data'][0]
assert batch2.c is batch3.c
assert batch2.b is batch3.b
assert batch2.b.a is batch3.b.a
assert batch2.b.b is batch3.b.b
assert orig_c_addr == curr_c_addr
assert orig_b_a_addr == curr_b_a_addr
assert orig_b_b_addr == curr_b_b_addr
# test with copy=True
batch3 = Batch(copy=True, **batch2)
curr_c_addr = batch3.c.__array_interface__['data'][0]
curr_b_a_addr = batch3.b.a.__array_interface__['data'][0]
curr_b_b_addr = batch3.b.b.__array_interface__['data'][0]
assert batch2.c is not batch3.c
assert batch2.b is not batch3.b
assert batch2.b.a is not batch3.b.a
assert batch2.b.b is not batch3.b.b
assert orig_c_addr != curr_c_addr
assert orig_b_a_addr != curr_b_a_addr
assert orig_b_b_addr != curr_b_b_addr
def test_batch_empty():
b5_dict = np.array([{'a': False, 'b': {'c': 2.0, 'd': 1.0}},
{'a': True, 'b': {'c': 3.0}}])
b5 = Batch(b5_dict)
b5[1] = Batch.empty(b5[0])
assert np.allclose(b5.a, [False, False])
assert np.allclose(b5.b.c, [2, 0])
assert np.allclose(b5.b.d, [1, 0])
data = Batch(a=[False, True],
b={'c': np.array([2., 'st'], dtype=np.object),
'd': [1, None],
'e': [2., float('nan')]},
c=np.array([1, 3, 4], dtype=np.int),
t=torch.tensor([4, 5, 6, 7.]))
data[-1] = Batch.empty(data[1])
assert np.allclose(data.c, [1, 3, 0])
assert np.allclose(data.a, [False, False])
assert list(data.b.c) == [2.0, None]
assert list(data.b.d) == [1, None]
assert np.allclose(data.b.e, [2, 0])
assert torch.allclose(data.t, torch.tensor([4, 5, 6, 0.]))
data[0].empty_() # which will fail in a, b.c, b.d, b.e, c
assert torch.allclose(data.t, torch.tensor([0., 5, 6, 0]))
data.empty_(index=0)
assert np.allclose(data.c, [0, 3, 0])
assert list(data.b.c) == [None, None]
assert list(data.b.d) == [None, None]
assert list(data.b.e) == [0, 0]
b0 = Batch()
b0.empty_()
assert b0.shape == []
def test_batch_standard_compatibility():
batch = Batch(a=np.array([[1.0, 2.0], [3.0, 4.0]]),
b=Batch(),
c=np.array([5.0, 6.0]))
batch_mean = np.mean(batch)
assert isinstance(batch_mean, Batch)
assert sorted(batch_mean.keys()) == ['a', 'b', 'c']
with pytest.raises(TypeError):
len(batch_mean)
assert np.all(batch_mean.a == np.mean(batch.a, axis=0))
assert batch_mean.c == np.mean(batch.c, axis=0)
with pytest.raises(IndexError):
Batch()[0]
if __name__ == '__main__':
test_batch()
test_batch_over_batch()
test_batch_over_batch_to_torch()
test_utils_to_torch()
test_batch_pickle()
test_batch_from_to_numpy_without_copy()
test_batch_standard_compatibility()
test_batch_cat_and_stack()
test_batch_copy()
test_batch_empty()
| 37.806452 | 77 | 0.593146 |
a6cf70251d12cc3169c83045805b4bac54232e54 | 3,526 | py | Python | my_utils/squad_eval.py | ashishbaghudana/san_mrc | 03ed7d94c735f1fe2854bb9c208385b5fde44905 | [
"BSD-3-Clause"
] | 1 | 2019-08-23T13:33:37.000Z | 2019-08-23T13:33:37.000Z | my_utils/squad_eval.py | ashishbaghudana/san_mrc | 03ed7d94c735f1fe2854bb9c208385b5fde44905 | [
"BSD-3-Clause"
] | null | null | null | my_utils/squad_eval.py | ashishbaghudana/san_mrc | 03ed7d94c735f1fe2854bb9c208385b5fde44905 | [
"BSD-3-Clause"
] | null | null | null | """ Official evaluation script for v1.1 of the SQuAD dataset.
Credit from: https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/
"""
from __future__ import print_function
import argparse
import json
import string
import sys
from collections import Counter
import re
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
if __name__ == '__main__':
expected_version = '1.1'
parser = argparse.ArgumentParser(
description='Evaluation for SQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if dataset_json['version'] != expected_version:
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
| 35.616162 | 106 | 0.658537 |
7bac5e6b258d48fa1d9cdbbf82aa03f2d8a47f88 | 1,349 | py | Python | as05ctl.py | netlabcode/gc-sim | 6bc65ea9b216e3b2d9e2d9d94f33b79c9f4a139f | [
"MIT"
] | null | null | null | as05ctl.py | netlabcode/gc-sim | 6bc65ea9b216e3b2d9e2d9d94f33b79c9f4a139f | [
"MIT"
] | null | null | null | as05ctl.py | netlabcode/gc-sim | 6bc65ea9b216e3b2d9e2d9d94f33b79c9f4a139f | [
"MIT"
] | null | null | null | import binascii
import _thread
import time
import socket
import time
import sqlite3
from sqlite3 import Error
import datetime
#Predefined parameters
MU01 = '100.5.0.11'
MU02 = '100.5.0.12'
MU03 = '100.5.0.13'
PORT1 = 991
PORT2 = 992
def serverXMUCC():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sx1:
sx1.connect((MU01, PORT2))
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sx2:
sx2.connect((MU02, PORT2))
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sx3:
sx3.connect((MU03, PORT2))
bx = 1
value2x=0
while bx < 6:
#covert inetger to string
print("Format: mu01_id+value")
stringx = str(input("Command entry: "))
if 'mu01' in stringx:
part1,part2 = stringx.split("_")
print(part2)
part2x = part2.encode()
sx1.sendall(part2x)
elif 'mu02' in stringx:
part1,part2 = stringx.split("_")
print(part2)
part2x = part2.encode()
sx2.sendall(part2x)
elif 'mu03' in stringx:
part1,part2 = stringx.split("_")
print(part2)
part2x = part2.encode()
sx3.sendall(part2x)
else:
print(".")
time.sleep(1)
#sx2.close()
# Create two threads as follows
try:
_thread.start_new_thread( serverXMUCC, ( ) )
except:
print ("Error: unable to start thread")
while 1:
pass
| 20.439394 | 65 | 0.646405 |
3b8e629e423654ee6b252c437f7f8d7dab1ffffd | 3,162 | py | Python | pyroms_toolbox/pyroms_toolbox/BGrid_SODA/flood.py | gorsol/pyroms | d293c9949daf95ec8a0a4e2ff1f67af8969c2b3f | [
"BSD-3-Clause"
] | 1 | 2020-01-21T14:30:29.000Z | 2020-01-21T14:30:29.000Z | pyroms_toolbox/pyroms_toolbox/BGrid_SODA/flood.py | gorsol/pyroms | d293c9949daf95ec8a0a4e2ff1f67af8969c2b3f | [
"BSD-3-Clause"
] | null | null | null | pyroms_toolbox/pyroms_toolbox/BGrid_SODA/flood.py | gorsol/pyroms | d293c9949daf95ec8a0a4e2ff1f67af8969c2b3f | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf-8
import numpy as np
from pyroms import _remapping
import pyroms
def flood(varz, Bgrd, Bpos='t', irange=None, jrange=None, \
spval=-9.99e+33, dmax=0, cdepth=0, kk=0):
"""
var = flood(var, Bgrd)
optional switch:
- Bpos='t', 'uv' specify the grid position where
the variable rely
- irange specify grid sub-sample for i direction
- jrange specify grid sub-sample for j direction
- spval=1e35 define spval value
- dmax=0 if dmax>0, maximum horizontal
flooding distance
- cdepth=0 critical depth for flooding
if depth<cdepth => no flooding
- kk
Flood varz on Bgrd
"""
varz = varz.copy()
varz = np.array(varz)
assert len(varz.shape) == 3, 'var must be 3D'
# replace spval by nan
idx = np.where(abs((varz-spval)/spval)<=1e-5)
varz[idx] = np.nan
x = Bgrd.lon_t
y = Bgrd.lat_t
h = Bgrd.h
if Bpos is 't':
mask = Bgrd.mask_t[0,:,:]
elif Bpos is 'uv':
mask = Bgrd.mask_uv[0,:,:]
nlev, Mm, Lm = varz.shape
if irange is None:
irange = (0,Lm)
else:
assert varz.shape[2] == irange[1]-irange[0], \
'var shape and irange must agreed'
if jrange is None:
jrange = (0,Mm)
else:
assert varz.shape[1] == jrange[1]-jrange[0], \
'var shape and jrange must agreed'
x = x[jrange[0]:jrange[1], irange[0]:irange[1]]
y = y[jrange[0]:jrange[1], irange[0]:irange[1]]
h = h[jrange[0]:jrange[1], irange[0]:irange[1]]
mask = mask[jrange[0]:jrange[1], irange[0]:irange[1]]
# Finding nearest values in horizontal
# critical deph => no change if depth is less than specified value
cdepth = abs(cdepth)
if cdepth != 0:
idx = np.where(h >= cdepth)
msk = np.zeros(mask.shape)
msk[idx] = 1
else:
msk = mask.copy()
for k in range(nlev-1,0,-1):
c1 = np.array(msk, dtype=bool)
c2 = np.isnan(varz[k,:,:]) == 1
if kk == 0:
c3 = np.ones(mask.shape).astype(bool)
else:
c3 = np.isnan(varz[min(k-kk,0),:,:]) == 0
c = c1 & c2 & c3
idxnan = np.where(c == True)
idx = np.where(c2 == False)
if list(idx[0]):
wet = np.zeros((len(idx[0]),2))
dry = np.zeros((len(idxnan[0]),2))
wet[:,0] = idx[0]+1
wet[:,1] = idx[1]+1
dry[:,0] = idxnan[0]+1
dry[:,1] = idxnan[1]+1
varz[k,:] = _remapping.flood(varz[k,:], wet, dry, x, y, dmax)
# drop the deepest values down
idx = np.where(np.isnan(varz) == 1)
varz[idx] = spval
bottom = pyroms.utility.get_bottom(varz[::-1,:,:], mask, spval=spval)
bottom = (nlev-1) - bottom
for i in range(Lm):
for j in range(Mm):
if mask[j,i] == 1:
varz[int(bottom[j,i]):,j,i] = varz[int(bottom[j,i]),j,i]
return varz
| 30.403846 | 76 | 0.500316 |
d04e2e205208305b564274024b3dd7b08da4a0f4 | 22,153 | py | Python | sdk/python/pulumi_github/team.py | pulumi/pulumi-github | 303ed7a28cbfe6ba1db75b3b365dcfa0b00e6e91 | [
"ECL-2.0",
"Apache-2.0"
] | 20 | 2020-04-27T15:05:01.000Z | 2022-02-08T00:28:32.000Z | sdk/python/pulumi_github/team.py | pulumi/pulumi-github | 303ed7a28cbfe6ba1db75b3b365dcfa0b00e6e91 | [
"ECL-2.0",
"Apache-2.0"
] | 103 | 2020-05-01T17:36:32.000Z | 2022-03-31T15:26:35.000Z | sdk/python/pulumi_github/team.py | pulumi/pulumi-github | 303ed7a28cbfe6ba1db75b3b365dcfa0b00e6e91 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2020-06-24T19:15:02.000Z | 2021-11-26T08:05:46.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['TeamArgs', 'Team']
@pulumi.input_type
class TeamArgs:
def __init__(__self__, *,
create_default_maintainer: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
ldap_dn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent_team_id: Optional[pulumi.Input[int]] = None,
privacy: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Team resource.
:param pulumi.Input[bool] create_default_maintainer: Adds a default maintainer to the team. Defaults to `true` and removes the default maintaner when `false`.
:param pulumi.Input[str] description: A description of the team.
:param pulumi.Input[str] ldap_dn: The LDAP Distinguished Name of the group where membership will be synchronized. Only available in GitHub Enterprise Server.
:param pulumi.Input[str] name: The name of the team.
:param pulumi.Input[int] parent_team_id: The ID of the parent team, if this is a nested team.
:param pulumi.Input[str] privacy: The level of privacy for the team. Must be one of `secret` or `closed`.
Defaults to `secret`.
"""
if create_default_maintainer is not None:
pulumi.set(__self__, "create_default_maintainer", create_default_maintainer)
if description is not None:
pulumi.set(__self__, "description", description)
if ldap_dn is not None:
pulumi.set(__self__, "ldap_dn", ldap_dn)
if name is not None:
pulumi.set(__self__, "name", name)
if parent_team_id is not None:
pulumi.set(__self__, "parent_team_id", parent_team_id)
if privacy is not None:
pulumi.set(__self__, "privacy", privacy)
@property
@pulumi.getter(name="createDefaultMaintainer")
def create_default_maintainer(self) -> Optional[pulumi.Input[bool]]:
"""
Adds a default maintainer to the team. Defaults to `true` and removes the default maintaner when `false`.
"""
return pulumi.get(self, "create_default_maintainer")
@create_default_maintainer.setter
def create_default_maintainer(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "create_default_maintainer", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the team.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="ldapDn")
def ldap_dn(self) -> Optional[pulumi.Input[str]]:
"""
The LDAP Distinguished Name of the group where membership will be synchronized. Only available in GitHub Enterprise Server.
"""
return pulumi.get(self, "ldap_dn")
@ldap_dn.setter
def ldap_dn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ldap_dn", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the team.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="parentTeamId")
def parent_team_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the parent team, if this is a nested team.
"""
return pulumi.get(self, "parent_team_id")
@parent_team_id.setter
def parent_team_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "parent_team_id", value)
@property
@pulumi.getter
def privacy(self) -> Optional[pulumi.Input[str]]:
"""
The level of privacy for the team. Must be one of `secret` or `closed`.
Defaults to `secret`.
"""
return pulumi.get(self, "privacy")
@privacy.setter
def privacy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "privacy", value)
@pulumi.input_type
class _TeamState:
def __init__(__self__, *,
create_default_maintainer: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
ldap_dn: Optional[pulumi.Input[str]] = None,
members_count: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
node_id: Optional[pulumi.Input[str]] = None,
parent_team_id: Optional[pulumi.Input[int]] = None,
privacy: Optional[pulumi.Input[str]] = None,
slug: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Team resources.
:param pulumi.Input[bool] create_default_maintainer: Adds a default maintainer to the team. Defaults to `true` and removes the default maintaner when `false`.
:param pulumi.Input[str] description: A description of the team.
:param pulumi.Input[str] ldap_dn: The LDAP Distinguished Name of the group where membership will be synchronized. Only available in GitHub Enterprise Server.
:param pulumi.Input[str] name: The name of the team.
:param pulumi.Input[str] node_id: The Node ID of the created team.
:param pulumi.Input[int] parent_team_id: The ID of the parent team, if this is a nested team.
:param pulumi.Input[str] privacy: The level of privacy for the team. Must be one of `secret` or `closed`.
Defaults to `secret`.
:param pulumi.Input[str] slug: The slug of the created team, which may or may not differ from `name`,
depending on whether `name` contains "URL-unsafe" characters.
Useful when referencing the team in [`BranchProtection`](https://www.terraform.io/docs/providers/github/r/branch_protection.html).
"""
if create_default_maintainer is not None:
pulumi.set(__self__, "create_default_maintainer", create_default_maintainer)
if description is not None:
pulumi.set(__self__, "description", description)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if ldap_dn is not None:
pulumi.set(__self__, "ldap_dn", ldap_dn)
if members_count is not None:
pulumi.set(__self__, "members_count", members_count)
if name is not None:
pulumi.set(__self__, "name", name)
if node_id is not None:
pulumi.set(__self__, "node_id", node_id)
if parent_team_id is not None:
pulumi.set(__self__, "parent_team_id", parent_team_id)
if privacy is not None:
pulumi.set(__self__, "privacy", privacy)
if slug is not None:
pulumi.set(__self__, "slug", slug)
@property
@pulumi.getter(name="createDefaultMaintainer")
def create_default_maintainer(self) -> Optional[pulumi.Input[bool]]:
"""
Adds a default maintainer to the team. Defaults to `true` and removes the default maintaner when `false`.
"""
return pulumi.get(self, "create_default_maintainer")
@create_default_maintainer.setter
def create_default_maintainer(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "create_default_maintainer", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the team.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="ldapDn")
def ldap_dn(self) -> Optional[pulumi.Input[str]]:
"""
The LDAP Distinguished Name of the group where membership will be synchronized. Only available in GitHub Enterprise Server.
"""
return pulumi.get(self, "ldap_dn")
@ldap_dn.setter
def ldap_dn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ldap_dn", value)
@property
@pulumi.getter(name="membersCount")
def members_count(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "members_count")
@members_count.setter
def members_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "members_count", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the team.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nodeId")
def node_id(self) -> Optional[pulumi.Input[str]]:
"""
The Node ID of the created team.
"""
return pulumi.get(self, "node_id")
@node_id.setter
def node_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_id", value)
@property
@pulumi.getter(name="parentTeamId")
def parent_team_id(self) -> Optional[pulumi.Input[int]]:
"""
The ID of the parent team, if this is a nested team.
"""
return pulumi.get(self, "parent_team_id")
@parent_team_id.setter
def parent_team_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "parent_team_id", value)
@property
@pulumi.getter
def privacy(self) -> Optional[pulumi.Input[str]]:
"""
The level of privacy for the team. Must be one of `secret` or `closed`.
Defaults to `secret`.
"""
return pulumi.get(self, "privacy")
@privacy.setter
def privacy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "privacy", value)
@property
@pulumi.getter
def slug(self) -> Optional[pulumi.Input[str]]:
"""
The slug of the created team, which may or may not differ from `name`,
depending on whether `name` contains "URL-unsafe" characters.
Useful when referencing the team in [`BranchProtection`](https://www.terraform.io/docs/providers/github/r/branch_protection.html).
"""
return pulumi.get(self, "slug")
@slug.setter
def slug(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "slug", value)
class Team(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
create_default_maintainer: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
ldap_dn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent_team_id: Optional[pulumi.Input[int]] = None,
privacy: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a GitHub team resource.
This resource allows you to add/remove teams from your organization. When applied,
a new team will be created. When destroyed, that team will be removed.
## Example Usage
```python
import pulumi
import pulumi_github as github
# Add a team to the organization
some_team = github.Team("someTeam",
description="Some cool team",
privacy="closed")
```
## Import
GitHub Teams can be imported using the GitHub team ID e.g.
```sh
$ pulumi import github:index/team:Team core 1234567
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] create_default_maintainer: Adds a default maintainer to the team. Defaults to `true` and removes the default maintaner when `false`.
:param pulumi.Input[str] description: A description of the team.
:param pulumi.Input[str] ldap_dn: The LDAP Distinguished Name of the group where membership will be synchronized. Only available in GitHub Enterprise Server.
:param pulumi.Input[str] name: The name of the team.
:param pulumi.Input[int] parent_team_id: The ID of the parent team, if this is a nested team.
:param pulumi.Input[str] privacy: The level of privacy for the team. Must be one of `secret` or `closed`.
Defaults to `secret`.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[TeamArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a GitHub team resource.
This resource allows you to add/remove teams from your organization. When applied,
a new team will be created. When destroyed, that team will be removed.
## Example Usage
```python
import pulumi
import pulumi_github as github
# Add a team to the organization
some_team = github.Team("someTeam",
description="Some cool team",
privacy="closed")
```
## Import
GitHub Teams can be imported using the GitHub team ID e.g.
```sh
$ pulumi import github:index/team:Team core 1234567
```
:param str resource_name: The name of the resource.
:param TeamArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TeamArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
create_default_maintainer: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
ldap_dn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent_team_id: Optional[pulumi.Input[int]] = None,
privacy: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TeamArgs.__new__(TeamArgs)
__props__.__dict__["create_default_maintainer"] = create_default_maintainer
__props__.__dict__["description"] = description
__props__.__dict__["ldap_dn"] = ldap_dn
__props__.__dict__["name"] = name
__props__.__dict__["parent_team_id"] = parent_team_id
__props__.__dict__["privacy"] = privacy
__props__.__dict__["etag"] = None
__props__.__dict__["members_count"] = None
__props__.__dict__["node_id"] = None
__props__.__dict__["slug"] = None
super(Team, __self__).__init__(
'github:index/team:Team',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
create_default_maintainer: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
etag: Optional[pulumi.Input[str]] = None,
ldap_dn: Optional[pulumi.Input[str]] = None,
members_count: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
node_id: Optional[pulumi.Input[str]] = None,
parent_team_id: Optional[pulumi.Input[int]] = None,
privacy: Optional[pulumi.Input[str]] = None,
slug: Optional[pulumi.Input[str]] = None) -> 'Team':
"""
Get an existing Team resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] create_default_maintainer: Adds a default maintainer to the team. Defaults to `true` and removes the default maintaner when `false`.
:param pulumi.Input[str] description: A description of the team.
:param pulumi.Input[str] ldap_dn: The LDAP Distinguished Name of the group where membership will be synchronized. Only available in GitHub Enterprise Server.
:param pulumi.Input[str] name: The name of the team.
:param pulumi.Input[str] node_id: The Node ID of the created team.
:param pulumi.Input[int] parent_team_id: The ID of the parent team, if this is a nested team.
:param pulumi.Input[str] privacy: The level of privacy for the team. Must be one of `secret` or `closed`.
Defaults to `secret`.
:param pulumi.Input[str] slug: The slug of the created team, which may or may not differ from `name`,
depending on whether `name` contains "URL-unsafe" characters.
Useful when referencing the team in [`BranchProtection`](https://www.terraform.io/docs/providers/github/r/branch_protection.html).
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TeamState.__new__(_TeamState)
__props__.__dict__["create_default_maintainer"] = create_default_maintainer
__props__.__dict__["description"] = description
__props__.__dict__["etag"] = etag
__props__.__dict__["ldap_dn"] = ldap_dn
__props__.__dict__["members_count"] = members_count
__props__.__dict__["name"] = name
__props__.__dict__["node_id"] = node_id
__props__.__dict__["parent_team_id"] = parent_team_id
__props__.__dict__["privacy"] = privacy
__props__.__dict__["slug"] = slug
return Team(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createDefaultMaintainer")
def create_default_maintainer(self) -> pulumi.Output[Optional[bool]]:
"""
Adds a default maintainer to the team. Defaults to `true` and removes the default maintaner when `false`.
"""
return pulumi.get(self, "create_default_maintainer")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description of the team.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ldapDn")
def ldap_dn(self) -> pulumi.Output[Optional[str]]:
"""
The LDAP Distinguished Name of the group where membership will be synchronized. Only available in GitHub Enterprise Server.
"""
return pulumi.get(self, "ldap_dn")
@property
@pulumi.getter(name="membersCount")
def members_count(self) -> pulumi.Output[int]:
return pulumi.get(self, "members_count")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the team.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nodeId")
def node_id(self) -> pulumi.Output[str]:
"""
The Node ID of the created team.
"""
return pulumi.get(self, "node_id")
@property
@pulumi.getter(name="parentTeamId")
def parent_team_id(self) -> pulumi.Output[Optional[int]]:
"""
The ID of the parent team, if this is a nested team.
"""
return pulumi.get(self, "parent_team_id")
@property
@pulumi.getter
def privacy(self) -> pulumi.Output[Optional[str]]:
"""
The level of privacy for the team. Must be one of `secret` or `closed`.
Defaults to `secret`.
"""
return pulumi.get(self, "privacy")
@property
@pulumi.getter
def slug(self) -> pulumi.Output[str]:
"""
The slug of the created team, which may or may not differ from `name`,
depending on whether `name` contains "URL-unsafe" characters.
Useful when referencing the team in [`BranchProtection`](https://www.terraform.io/docs/providers/github/r/branch_protection.html).
"""
return pulumi.get(self, "slug")
| 41.024074 | 166 | 0.632871 |
0e438cb966123973ef19b8868438cc884b8abeac | 1,661 | py | Python | examples/examples.py | jixiangqd/krds_openapi_sdk | c8bce69f81490ccef80327c318ecf0f816439488 | [
"Apache-2.0"
] | null | null | null | examples/examples.py | jixiangqd/krds_openapi_sdk | c8bce69f81490ccef80327c318ecf0f816439488 | [
"Apache-2.0"
] | null | null | null | examples/examples.py | jixiangqd/krds_openapi_sdk | c8bce69f81490ccef80327c318ecf0f816439488 | [
"Apache-2.0"
] | 1 | 2018-03-21T07:39:44.000Z | 2018-03-21T07:39:44.000Z | from krds_client import *
krds_client = KRDSClient('your_ak', 'your_sk', 'your_service_region')
# Demo
#
# r = krds_client.CreateDBInstance(DBInstanceClass='db.ram.1|db.disk.10', DBInstanceName='test', Engine='mysql',
# EngineVersion='5.6', MasterUserName='test', MasterUserPassword='Test123456',
# DBInstanceType=DBInstanceType.HA, PubliclyAccessible=True,
# VpcId='b33a2276-64a8-4c04-b28e-da253c8add32',
# SubnetId='c2e0abd7-13df-461a-bd8d-3b92faebf111', BillType=BillType.DAY)
# r = krds_client.DescribeDBInstances(None, 'HA', None, 'ACTIVE')
# r = krds_client.CreateDBInstanceReadReplica("test-rr", "d4fe1bcf-a99a-4b1e-ba61-3c751b650249")
# r = krds_client.DescribeDBEngineVersions()
# r = krds_client.ModifyDBInstance("f3944998-ebab-4e4c-8bcc-959206fff870", "test-modify-name")
# r = krds_client.RebootDBInstance("f3944998-ebab-4e4c-8bcc-959206fff870")
# r = krds_client.DeleteDBInstance("f3944998-ebab-4e4c-8bcc-959206fff870")
# r = krds_client.DescribeDBLogFiles("bb2d111a-af44-41ee-b10d-754f23bc59e1", DBLogType.Binlog)
# r = krds_client.CreateDBSecurityGroupRule("bb2d111a-af44-41ee-b10d-754f23bc59e1", [{'Cidr': '0.0.0.0/0'}])
# r = krds_client.DescribeDBSecurityGroup("bb2d111a-af44-41ee-b10d-754f23bc59e1")
# r = krds_client.DeleteDBSecurityGroup([{'Id': '74863'}])
# r = krds_client.CreateDBBackup("bb2d111a-af44-41ee-b10d-754f23bc59e1", "test-backup")
# r = krds_client.DescribeDBBackups("bb2d111a-af44-41ee-b10d-754f23bc59e1")
# r = krds_client.DeleteDBBackup('a2d67d80-d011-4856-8841-d7362f8c5779')
#
# print r
| 61.518519 | 112 | 0.712222 |
6dc7f59bfb7cb6bccd03e8969aefe9266f25fa94 | 6,646 | py | Python | data_v1/rdf/py/gs_counties.py | FellowsFreiesWissen/- | 3e74e6531985b0e9ba2e34bf1f6a1224cc7b52f3 | [
"MIT"
] | 1 | 2021-02-08T20:33:41.000Z | 2021-02-08T20:33:41.000Z | data_v1/rdf/py/gs_counties.py | FellowsFreiesWissen/- | 3e74e6531985b0e9ba2e34bf1f6a1224cc7b52f3 | [
"MIT"
] | null | null | null | data_v1/rdf/py/gs_counties.py | FellowsFreiesWissen/- | 3e74e6531985b0e9ba2e34bf1f6a1224cc7b52f3 | [
"MIT"
] | 1 | 2021-01-01T17:23:40.000Z | 2021-01-01T17:23:40.000Z | __author__ = "Florian Thiery"
__copyright__ = "MIT Licence 2021, Florian Thiery"
__credits__ = ["Florian Thiery"]
__license__ = "MIT"
__version__ = "beta"
__maintainer__ = "Florian Thiery"
__email__ = "mail@fthiery.de"
__status__ = "beta"
__update__ = "2021-05-11"
# import dependencies
import uuid
import requests
import io
import pandas as pd
import os
import codecs
import datetime
import importlib
import sys
import hashlib
import _config
# set UTF8 as default
importlib.reload(sys)
print("*****************************************")
# set starttime
starttime = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
# set paths
file_name = "gs_counties"
dir_path = os.path.dirname(os.path.realpath(__file__))
file_in = dir_path.replace("\\rdf\\py", "\\csv\\geodata") + "\\" + file_name + ".csv"
# read csv file
data = pd.read_csv(
file_in,
encoding='utf-8',
sep=',',
usecols=['id', 'label', 'wikidata_id', 'wkt'],
na_values=['.', '??', 'NULL'] # take any '.' or '??' values as NA
)
print(data.info())
# create triples from dataframe
lineNo = 2
outStr = ""
lines = []
for index, row in data.iterrows():
# print(lineNo)
tmpno = lineNo - 2
if tmpno % 1000 == 0:
print(tmpno)
lineNo += 1
# info
lines.append("ogham:GSD" + str(row['id']) + " " + "rdf:type" + " oghamonto:County .")
lines.append("ogham:GSD" + str(row['id']) + " " + "rdf:type" + " <http://www.opengis.net/ont/geosparql#Feature> .")
lines.append("ogham:GSD" + str(row['id']) + " " + "rdf:type" + " <http://ontologies.geohive.ie/osi#County> .")
lines.append("ogham:GSD" + str(row['id']) + " " + "rdfs:label" + " " + "'" + str(row['label']) + "'@en" + ".")
lines.append("ogham:GSD" + str(row['id']) + " " + "oghamonto:exactMatch" + " wd:" + str(row['wikidata_id']) + " .")
# geom
lines.append("ogham:GSD" + str(row['id']) + " " + "geosparql:hasGeometry" + " ogham:GSD" + str(row['id']) + "_geom .")
lines.append("ogham:GSD" + str(row['id']) + "_geom " + "rdf:type" + " sf:MultiPolygon .")
geom = "\"" + str(row['wkt']) + "\"^^geosparql:wktLiteral"
lines.append("ogham:GSD" + str(row['id']) + "_geom " + "geosparql:asWKT " + geom + ".")
lines.append("ogham:GSD" + str(row['id']) + "_geom " + "oghamonto:hasEPSG " + "<http://www.opengis.net/def/crs/EPSG/0/4326>" + ".")
# license
lines.append("ogham:GSD" + str(row['id']) + " " + "dct:license" + " <" + "https://creativecommons.org/licenses/by/4.0/deed.de" + "> .")
lines.append("ogham:GSD" + str(row['id']) + " " + "dct:license" + " <" + "http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/" + "> .")
lines.append("ogham:GSD" + str(row['id']) + " " + "dct:creator" + " <" + "https://orcid.org/0000-0002-3246-3531" + "> .")
lines.append("ogham:GSD" + str(row['id']) + " " + "dct:rightsHolder" + " wd:Q3355441 .") # OSi
lines.append("ogham:GSD" + str(row['id']) + " " + "dct:rightsHolder" + " wd:Q7100893 .") # OSNI
# prov-o
lines.append("ogham:GSD" + str(row['id']) + " " + "prov:wasAttributedTo" + " ogham:PythonStonesCIIC .")
lines.append("ogham:GSD" + str(row['id']) + " " + "prov:wasDerivedFrom" + " <https://github.com/ogi-ogham/ogham-datav1/blob/main/csv/geodata/" + file_name + ".csv> .")
lines.append("<https://github.com/ogi-ogham/ogham-datav1/blob/main/csv/geodata/" + file_name + ".csv> " + "prov:wasDerivedFrom" + " <https://www.opendatani.gov.uk/dataset/osni-open-data-largescale-boundaries-county-boundaries> .")
lines.append("<https://github.com/ogi-ogham/ogham-datav1/blob/main/csv/geodata/" + file_name + ".csv> " + "prov:wasDerivedFrom" + " <https://data-osi.opendata.arcgis.com/datasets/administrative-areas-osi-national-statutory-boundaries-generalised-20m?geometry=-29.165%2C51.112%2C12.649%2C55.691> .")
lines.append("<https://github.com/ogi-ogham/ogham-datav1/blob/main/csv/geodata/" + file_name + ".csv> " + "prov:wasAttributedTo" + " wd:Q3355441 .") # OSNI
lines.append("<https://github.com/ogi-ogham/ogham-datav1/blob/main/csv/geodata/" + file_name + ".csv> " + "prov:wasAttributedTo" + " wd:Q3355441 .") # OSi
lines.append("ogham:GSD" + str(row['id']) + " " + "prov:wasGeneratedBy" + " ogham:GSD" + str(row['id']) + "_activity .")
lines.append("ogham:GSD" + str(row['id']) + "_activity " + "rdf:type" + " prov:Activity .")
lines.append("ogham:GSD" + str(row['id']) + "_activity " + "prov:startedAtTime '" + starttime + "'^^xsd:dateTime .")
lines.append("ogham:GSD" + str(row['id']) + "_activity " + "prov:endedAtTime '" + datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ") + "'^^xsd:dateTime .")
lines.append("ogham:GSD" + str(row['id']) + "_activity " + "prov:wasAssociatedWith" + " ogham:PythonStonesCIIC .")
lines.append("")
files = (len(lines) / 100000) + 1
print("triples", len(lines), "files", int(files))
thiscount = len(lines)
_config.count(thiscount)
# write output files
f = 0
step = 100000
fileprefix = file_name + "_"
prefixes = ""
prefixes += "@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\r\n"
prefixes += "@prefix owl: <http://www.w3.org/2002/07/owl#> .\r\n"
prefixes += "@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\r\n"
prefixes += "@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\r\n"
prefixes += "@prefix geosparql: <http://www.opengis.net/ont/geosparql#> .\r\n"
prefixes += "@prefix dc: <http://purl.org/dc/elements/1.1/> .\r\n"
prefixes += "@prefix dct: <http://purl.org/dc/terms/> .\r\n"
prefixes += "@prefix sf: <http://www.opengis.net/ont/sf#> .\r\n"
prefixes += "@prefix prov: <http://www.w3.org/ns/prov#> .\r\n"
prefixes += "@prefix oghamonto: <http://ontology.ogham.link/> .\r\n"
prefixes += "@prefix ogham: <http://lod.ogham.link/data/> .\r\n"
prefixes += "@prefix skos: <http://www.w3.org/2004/02/skos/core#> .\r\n"
prefixes += "@prefix wd: <http://www.wikidata.org/entity/> .\r\n"
prefixes += "\r\n"
for x in range(1, int(files) + 1):
strX = str(x)
filename = dir_path.replace("\\py", "\\geodata") + "\\" + fileprefix + strX + ".ttl"
file = codecs.open(filename, "w", "utf-8")
file.write("# create triples from " + file_name + ".csv \r\n")
file.write("# on " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M") + "\r\n\r\n")
file.write(prefixes)
i = f
for i, line in enumerate(lines):
if (i > f - 1 and i < f + step):
file.write(line)
file.write("\r\n")
f = f + step
print(" > " + fileprefix + strX + ".ttl")
file.close()
print("*****************************************")
print("SUCCESS: closing script")
print("*****************************************")
| 50.348485 | 302 | 0.601113 |
a3e3fec684950a6ef15e2c29444b6402fbf9ea24 | 509 | py | Python | packages/galapagos/scripts/run_parking.py | 100kimch/ros_galapagos | 8f92cb93246c263b61199aef113e43cefc5f3939 | [
"MIT"
] | 2 | 2020-10-26T05:01:35.000Z | 2022-02-14T10:37:17.000Z | packages/galapagos/scripts/run_parking.py | 100kimch/ros_galapagos | 8f92cb93246c263b61199aef113e43cefc5f3939 | [
"MIT"
] | null | null | null | packages/galapagos/scripts/run_parking.py | 100kimch/ros_galapagos | 8f92cb93246c263b61199aef113e43cefc5f3939 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import rospy
from sensor_msgs.msg import CompressedImage, LaserScan
import processor
from constants import PATH_RASPICAM, PATH_USBCAM, PATH_LIDAR, SELECTED_STATE, IS_DEBUG_MODE
IS_DEBUG_MODE = True
SELECTED_STATE = 'parking'
rospy.Subscriber(PATH_RASPICAM, CompressedImage, processor.process_fishcam, queue_size=1)
rospy.Subscriber(PATH_USBCAM, CompressedImage, processor.process_frontcam, queue_size=1)
rospy.Subscriber(PATH_LIDAR, LaserScan, processor.process_lidar, queue_size=1)
| 36.357143 | 91 | 0.844794 |
557f31be472664702243843064c47eaa93177bf4 | 9,019 | py | Python | p4_hlir/hlir/p4_headers.py | TodoOrTODO/p4-hlir | 4dfdf93441cd226d526c10d99b9b7930676bcf58 | [
"Apache-2.0"
] | null | null | null | p4_hlir/hlir/p4_headers.py | TodoOrTODO/p4-hlir | 4dfdf93441cd226d526c10d99b9b7930676bcf58 | [
"Apache-2.0"
] | null | null | null | p4_hlir/hlir/p4_headers.py | TodoOrTODO/p4-hlir | 4dfdf93441cd226d526c10d99b9b7930676bcf58 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p4_core import *
from p4_expressions import p4_expression
from p4_sized_integer import p4_sized_integer
import os
import ast
import inspect
import logging
from collections import OrderedDict
import copy
p4_header_keywords = p4_create_enum("p4_header_keywords", [
"next",
"last",
"auto_width",
"payload",
"signed",
"saturating"
])
P4_NEXT = p4_header_keywords.next
P4_LAST = p4_header_keywords.last
P4_AUTO_WIDTH = p4_header_keywords.auto_width
P4_PAYLOAD = p4_header_keywords.payload
P4_SIGNED = p4_header_keywords.signed
P4_SATURATING = p4_header_keywords.saturating
class p4_field (object):
"""
TODO
"""
def __init__ (self, hlir, instance, name, width, attributes, offset, default):
self.instance = instance
self.name = name
self.width = width
self.attributes = attributes
self.offset = offset
self.default = default
self.calculation = []
self.ingress_read = False
self.ingress_write = False
self.egress_read = False
self.egress_write = False
hlir.p4_fields[str(self)] = self
@staticmethod
def get_from_hlir(hlir, name):
return hlir.p4_fields[name]
def build(self, hlir):
pass
def __str__ (self):
return str(self.instance)+"."+self.name
def validate_calculated_fields(hlir):
# TODO: generate warnings if these fields get referenced anywhere
field_calcs = {}
for binding in hlir.calculated_fields:
field_name, update_verify_list, _, _ = binding
field = hlir.p4_fields[field_name]
for op, calc_name, if_cond in update_verify_list:
calc = hlir.p4_field_list_calculations[calc_name]
if if_cond:
if_cond.resolve_names(hlir)
field.calculation.append( (op, calc, if_cond) )
class p4_field_list_calculation (p4_object):
"""
TODO
"""
required_attributes = ["name", "input", "algorithm", "output_width"]
allowed_attributes = required_attributes + ["doc"]
def __init__ (self, hlir, name, **kwargs):
p4_object.__init__(self, hlir, name, **kwargs)
if not self.valid_obj:
return
hlir.p4_field_list_calculations[self.name] = self
@staticmethod
def get_from_hlir(hlir, name):
return hlir.p4_field_list_calculations[name]
def build (self, hlir):
for idx, field_list_name in enumerate(self.input):
self.input[idx] = hlir.p4_field_lists[field_list_name]
class p4_field_list (p4_object):
"""
TODO
"""
required_attributes = ["name", "fields"]
allowed_attributes = required_attributes + ["doc"]
def __init__ (self, hlir, name, **kwargs):
p4_object.__init__(self, hlir, name, **kwargs)
if not self.valid_obj:
return
hlir.p4_field_lists[self.name] = self
@staticmethod
def get_from_hlir(hlir, name):
return hlir.p4_field_lists[name]
def build (self, hlir):
self._flat = True
new_fields = []
for field in self.fields:
if type(field) is int:
# new_fields.append(field)
new_fields.append(p4_sized_integer(field))
elif type(field) is p4_sized_integer:
new_fields.append(field)
elif field == "payload":
new_fields.append(P4_PAYLOAD)
elif "." in field:
new_fields.append(hlir.p4_fields[field])
elif field in hlir.p4_header_instances:
instance = hlir.p4_header_instances[field]
for header_field in instance.header_type.layout:
new_fields.append(hlir.p4_fields[instance.name+"."+header_field])
elif field in hlir.p4_field_lists:
new_fields.append(hlir.p4_field_lists[field])
self._flat = False
else: assert(False)
self.fields = new_fields
def flatten (self, hlir):
new_fields = []
for field in self.fields:
if type(field) is p4_field_list:
if not field._flat:
field.flatten(hlir)
new_fields += field.fields
else:
new_fields.append(field)
self.fields = new_fields
self._flat = True
class p4_header_instance (p4_object):
"""
TODO
"""
required_attributes = ["name", "header_type", "metadata"]
allowed_attributes = required_attributes + ["index", "max_index", "virtual", "initializer"]
def __init__(self, hlir, name, **kwargs):
self.base_name = name
if kwargs["index"] != None:
name += "[%s]" % str(kwargs["index"])
p4_object.__init__(self, hlir, name, **kwargs)
if not hasattr(self, "index"):
self.index = None
if not hasattr(self, "max_index"):
self.max_index = None
if not hasattr(self, "initializer"):
self.initializer = {}
if not hasattr(self, "virtual"):
self.virtual = False
self.fields = []
hlir.p4_header_instances[self.name] = self
@staticmethod
def get_from_hlir(hlir, name):
return hlir.p4_header_instances[name]
def build (self, hlir):
self.header_type = hlir.p4_headers[self.header_type]
field_offset = 0
for field in self.header_type.layout.items():
if not self.metadata:
init_value = None
else:
init_value = self.initializer.get(field[0], 0)
assert(type(init_value) is int)
attrs = self.header_type.attributes[field[0]]
self.fields.append(p4_field (hlir,
self, field[0], field[1], attrs,
field_offset, init_value))
if type(field[1]) is int or type(field[1]) is long:
field_offset += field[1]
else:
field_offset = P4_AUTO_WIDTH
break
if field_offset == P4_AUTO_WIDTH:
reverse_fields = self.header_type.layout.items()
reverse_fields.reverse()
field_offset = 0
for field in reverse_fields:
if not self.metadata:
init_value = None
else:
init_value = self.initializer.get(field[0], "0")
if type(field[1]) is int or type(field[1]) is long:
field_offset -= field[1]
attrs = self.header_type.attributes[field[0]]
self.fields.append(p4_field (hlir,
self, field[0], field[1],
attrs, field_offset, init_value))
else:
break
delattr(self, "initializer")
def __str__ (self):
return self.name
class p4_header (p4_object):
required_attributes = ["name", "layout", "attributes"]
allowed_attributes = required_attributes + ["length", "max_length", "doc"]
def __init__ (self, hlir, name, **kwargs):
p4_object.__init__(self, hlir, name, **kwargs)
if not self.valid_obj:
return
self.flex_width = False
for field, width in self.layout.items():
if width == P4_AUTO_WIDTH:
self.flex_width = True
hlir.p4_headers[self.name] = self
@staticmethod
def get_from_hlir(hlir, name):
return hlir.p4_headers[name]
def build(self, hlir):
pass
def p4_field_reference (hlir, str_ref):
# TODO: this function is made obsolete by p4_field.collection, try to
# remove it
tokens = str_ref.split(".")
if len(tokens) != 2:
raise p4_compiler_msg (
"Invalid field reference '"+str_ref+"' (must be of the form 'instance.field')"
)
if tokens[0] not in hlir.p4_header_instances:
raise p4_compiler_msg (
"Reference to undeclared header instance '"+tokens[0]+"'"
)
inst = hlir.p4_header_instances[tokens[0]]
for field in inst.fields:
if field.name == tokens[1]:
return field
raise p4_compiler_msg (
"Reference to invalid field '"+tokens[1]+"' in header instance '"+tokens[0]+"'"
)
| 31.1 | 95 | 0.596075 |
3017b54a7e03962af96a648d1f591c8052abd626 | 15,804 | py | Python | vddb_async/src/task_manager/cleaner.py | dlf412/thunderCopyright | c736c9eefc7c934cc830d9d6f27a00cf147e02aa | [
"MIT"
] | 1 | 2021-06-10T02:56:43.000Z | 2021-06-10T02:56:43.000Z | vddb_async/src/task_manager/cleaner.py | dlf412/thunderCopyright | c736c9eefc7c934cc830d9d6f27a00cf147e02aa | [
"MIT"
] | null | null | null | vddb_async/src/task_manager/cleaner.py | dlf412/thunderCopyright | c736c9eefc7c934cc830d9d6f27a00cf147e02aa | [
"MIT"
] | 1 | 2020-03-25T23:55:58.000Z | 2020-03-25T23:55:58.000Z | from server import server, catch_and_die, cluster
from tm_sqls import *
from task_stat import *
from db_txn import db_execute, db_query, db_result, db_txn
from collections import defaultdict
from functools import partial
import logging
from time import time
from datetime import datetime
from collections import defaultdict
try:
import samplejson as json
except:
import json
from logger import g_logger
from utils import trans2json
import requests
from requests import RequestException
from stats import stats, QUERY_FAILED, QUERY_SUCCESS
class SendMatchesError(Exception): pass
class cleaner_cluster(cluster):
def __init__(self, config, db_pool, hbase_pool, clean_cond, manage_cond):
cleaner_nums = int(config['cleaner_threads_num'])
super(cleaner_cluster, self).__init__(cleaner, cleaner_nums,
clean_cond, manage_cond)
self.backoff = int(config['retry_backoff'])
self.powlim = int(config['max_backoff_power'])
# backend codes to retry
# CONFIRM: we're not retrying with other codes
codes = []
if config['retry_forever_list'] != '':
codes += [int(c) for c in config['retry_forever_list'].split(',')]
if config['retry_sometime_list'] != '':
codes += [int(c) for c in config['retry_sometime_list'].split(',')]
self.backend_retry = set(codes)
# thresholds
self.thresholds = defaultdict(lambda: {})
self.thresholds['audio']['score'] = int(config["fp_audio_score"])
self.thresholds['video']['score'] = int(config["fp_video_score"])
self.thresholds['audio']['duration'] = int(config["fp_audio_duration"])
self.thresholds['video']['duration'] = int(config["fp_video_duration"])
self.matches_server = config['matches_server']
self.pool = db_pool
self.hbase_pool = hbase_pool
class cleaner(cluster.member):
def __init__(self, owner):
super(cleaner, self).__init__(owner)
def do_doom(self, t, code, queue_at, deadline):
logger = logging.getLogger('mwtm_cleaner')
if queue_at != None and (deadline == None or deadline == 0 or \
deadline > queue_at):
logger.debug('to retry task %s, queue at %s', t.uuid, queue_at)
yield db_execute(RETRY_TASK, queue_at, code, t.id)
#yield db_execute(RENEW_EVENT, t.uuid, 'retry')
g_logger.info(trans2json(message="task_uuid:%s, "
"site_asset_id:%s, deadline:%s, external_id:%s " % (t.uuid,
t.site_asset_id, deadline, t.external_id),
action="retry task"))
else:
logger.debug('to fail task %s', t.uuid)
g_logger.info(trans2json(message="task_uuid:%s, "
"site_asset_id:%s, external_id:%s" % (t.uuid,
t.site_asset_id, t.external_id), action="to fail task"))
rc, _ = yield db_query(CHECK_TASK, t.id)
if rc <= 0:
yield db_execute(FAIL_TASK, code, t.id)
self.send_matches(t, unrecognized=True)
task_status = db_txn(self.pool, partial(self.load_task_status, t.uuid))
self.update_hbase_task(task_status)
stats.incr(QUERY_FAILED, 1)
@staticmethod
def update_task(t, code, mc, tr):
# NOTE: status is always query success here
if t.from_reverse and mc > 0:
yield db_execute(FINISH_TASK, 'match', 1000, t.id)
else:
yield db_execute(FINISH_TASK, tr, code, t.id)
def update_hbase_task(self, task):
billing_time = datetime.utcnow().strftime('%Y%m%dT%H%M%S') \
if task.billing_time is None \
else task.billing_time.strftime('%Y%m%dT%H%M%S')
queued_at = '0' \
if task.queued_at is None \
else task.queued_at.strftime('%Y%m%dT%H%M%S')
end_query_time = datetime.utcnow().strftime('%Y%m%dT%H%M%S') \
if task.end_query_time is None \
else task.end_query_time.strftime('%Y%m%dT%H%M%S')
self.save_hbase("task", task.uuid,
{
"s:queued_at":queued_at,
"s:status":task.status,
"s:query_count":str(task.query_count),
"s:billing_time":billing_time,
"s:error_code":str(task.error_code),
"s:task_result":str(task.task_result),
"s:start_query_time":task.start_query_time.strftime('%Y%m%dT%H%M%S'),
"s:end_query_time":end_query_time,
"i:external_id":str(task.external_id),
"i:company_id":str(task.company_id),
"i:task_priority":str(task.task_priority),
"i:user_id":str(task.user_id),
"i:clip_duration":str(task.clip_duration),
"i:clip_format":str(task.clip_url),
"i:dna_type":str(task.dna_type),
"i:query_level":str(task.query_level),
"i:is_indexed":str(task.is_indexed),
"i:is_requery":str(task.is_requery),
"i:result_revision":str(task.result_revision),
"i:userClientInfo_id":str(task.userClientInfo_id),
"i:created_at":task.created_at.strftime('%Y%m%dT%H%M%S'),
"i:compressed_file_size":str(task.compressed_file_size),
"i:processed_file_size":str(task.processed_file_size),
"i:dna_url":str(task.dna_url)
}
)
def save_hbase(self, table, key, value):
with self.hbase_pool.connection() as conn:
table = conn.table(table)
table.put(key, value)
def read_hbase(self, table, row, columns=None):
with self.hbase_pool.connection() as conn:
table = conn.table(table)
return table.row(row, columns)
def filter_matches(self, matches):
ms = []
for m in matches:
keep = True
if not 'meta_uuid' in m or m['meta_uuid'] == '':
# bad match
self.logger.debug('dropping match with no meta uuid')
keep = False
elif not 'meta_name' in m:
m['meta_name'] = ''
elif not 'match_type' in m or \
not m['match_type'] in ('both', 'video', 'audio'):
# bad match
self.logger.debug('dropping match with invalid match type',
m['match_type'])
keep = False
else:
mt = m['match_type']
for typ in ('video', 'audio'):
for f in ('duration', 'score', 'ref_offset', \
'sample_offset'):
if not (typ + '_' + f) in m:
if typ != mt and mt != 'both':
m[typ + '_' + f] = 0
else:
# bad match
self.logger.debug('dropping match with ' +
'missing field %s, type %s',
typ + '_' + f, mt)
keep = False
if m['match_type'] in ('both', 'video'):
if m['video_score'] <= self.thresholds['video']['score'] or \
m['video_duration'] <= self.thresholds['video']['duration']:
keep = False
if m['match_type'] in ('both', 'audio'):
if m['audio_score'] <= self.thresholds['audio']['score'] or \
m['audio_duration'] <= self.thresholds['audio']['duration']:
keep = False
if keep:
ms.append(m)
return ms
@staticmethod
def store_crr(t, crr):
# record crr
if crr != None and crr != '':
crr = crr.encode('utf8')
yield db_execute(STORE_CRR, t.uuid, crr, crr)
@staticmethod
def store_matches(t, matches):
for m in matches:
c, _ = yield db_execute(STORE_MATCH, t.account, m['meta_uuid'],
t.uuid, t.created_at, t.site_asset_id,
m['match_type'], m['video_duration'],
m['video_score'], m['video_sample_offset'],
m['video_ref_offset'], m['audio_duration'],
m['audio_score'], m['audio_sample_offset'],
m['audio_ref_offset'], 'auto_match',
0, t.account, m['match_type'],
m['video_duration'], m['video_score'],
m['video_sample_offset'],
m['video_ref_offset'], m['audio_duration'],
m['audio_score'], m['audio_sample_offset'],
m['audio_ref_offset'], 'auto_match',
0)
if c > 0:
# match stored, add meta if not existing
yield db_execute(ADD_CONTENT, m['meta_uuid'],
m['meta_name'].encode('utf8'))
@staticmethod
def record_finish(t, row, code, matches):
yield db_execute(UPDATE_QUERY, t.uuid, code, matches, row)
@staticmethod
def check_matches(t):
# check for previous matches
rc, rs = yield db_query(CHECK_MATCHES, t.uuid)
yield db_result(rc > 0 and rs[0][0] > 1)
@staticmethod
def load_task_status(uuid):
rc, rs = yield db_query(LOAD_TASK_STATUS, uuid)
yield db_result(rs[0])
def doom(self, t, code, p, res):
self.logger.error("to doom task, task_uuid: %s, site_asset_id: %s, "
"code: %s, res: %s" % (t.uuid, t.site_asset_id, code, res))
retry = True
if code == 0:
retry = res.code > 0 or (res.code == 0 and \
# backend code == 0 -> finish failed
# query itself can succeed, so retry
(res.backend_code == 0 or \
res.backend_code in self.backend_retry))
elif code == OPS_KILLED:
retry = False
queue_at = None
if retry and p.retry != 'false':
power = t.retries if t.retries <= self.powlim else self.powlim
# NOTE: time() only returns utc timestamp on linux/posix
queue_at = time() + 2 ** power * self.backoff
try:
db_txn(self.pool, partial(self.do_doom, t, code, queue_at,
t.deadline))
except:
self.logger.error('failed to doom task:%s, site_asset_id: %s' % (t.uuid,
t.site_asset_id), exc_info=True)
def finish(self, t, p, res):
self.logger.info('to finish task, task_uuid:%s, site_asset_id:%s',
t.uuid, t.site_asset_id)
self.logger.debug("res:%s " % str(res))
assert res.matches != None
code = WITHOUT_MATCH if len(res.matches) == 0 else WITH_MATCH
if code == WITHOUT_MATCH:
try:
if db_txn(self.pool, partial(self.check_matches, t)):
code = WITH_MATCH
except:
pass
tr = 'match' if code == WITH_MATCH else 'no_match'
self.logger.debug('record finished task %s, site_asset_id: %s',
t.uuid, t.site_asset_id)
try:
ms = self.filter_matches(res.matches)
for m in ms:
g_logger.info(trans2json(message="company_id:%s, "
"meta_uuid:%s, instance_uuid:%s, vddb_company_id:%s" % (t.account, m['meta_uuid'],
m['instance_id'], m['company_id']),
action='matches info'))
mc = len(ms)
#m = match_saver(self.hbase_pool, self.redis_conn, task_status, ms, res.crr)
#m.save()
self.send_matches(t, ms, res.crr)
task_status = db_txn(self.pool, partial(self.load_task_status, t.uuid))
self.update_hbase_task(task_status)
db_txn(self.pool, partial(self.update_task, t, code, mc, tr))
except:
self.logger.error('failed to finish task: %s, site_asset_id: %s' %
(t.uuid, t.site_asset_id), exc_info=True)
# dooming may succeed, as it touches fewer tables
self.doom(t, INTERNAL, p, res)
return
g_logger.info(trans2json(message="site_asset_id:%s, "
"task_uuid:%s, external_id:%s" % (t.site_asset_id, t.uuid,
t.external_id), action="task query complete"))
stats.incr(QUERY_SUCCESS, 1)
def send_matches(self, task, matches=[], crr="", unrecognized=False):
match_type = "no_match"
if unrecognized:
match_type = "unrecognized"
elif len(matches):
match_type = 'match'
data = dict(id="null", jsonrpc="2.0",
method="matches",
params=dict(matches=matches,
site_asset_id=eval(task.site_asset_id), notification=crr,
match_type=match_type))
params = dict(source="auto_match")
req = None
try:
req = requests.post(self.matches_server, params=params,
data=json.dumps(data))
if req.status_code != 200:
self.logger.error("send matches failed, code:%s", req.status_code)
raise SendMatchesError("send matches faild, task_id:%s" % task.uuid)
except RequestException:
self.logger.error("send matches failed, %s", task.uuid, exc_info=True)
raise SendMatchesError("send matches faild")
self.logger.info("send matches success, task_uuid:%s, site_asset_id:%s,"
"external_id:%s", task.uuid, task.site_asset_id,
task.external_id)
g_logger.info(trans2json(message="task_uuid:%s, "
"site_asset_id:%s, external_id:%s " % \
(task.uuid, task.site_asset_id,
task.external_id), action="send matches success"))
@catch_and_die('mwtm_cleaner')
def run(self):
while True:
self.loop()
def task_failed(self, res):
return (res == None or res.code == None or res.backend_code == None or
res.code != 0 or res.backend_code != 0 or res.matches == None)
def loop(self):
reqs = self.requests(True)
accs = self.accounts()
for (t, code, res) in reqs:
if not t.account in accs:
continue
self.logger.info('receive task from manager, task_uuid:%s, site_asset_id:%s',
t.uuid, t.site_asset_id)
if code != 0 or self.task_failed(res):
if code == 0:
code = INTERNAL
self.doom(t, code, accs[t.account], res)
else:
self.finish(t, accs[t.account], res)
| 45.413793 | 98 | 0.514047 |
9b64c50342b0471ae939e6fdbe8f3a0250c47f00 | 36,498 | py | Python | TrainingExtensions/common/src/python/aimet_common/winnow/mask_propagator.py | Rohan-Chaudhury/aimet | 1c38cac8cc0fd32dca40ce5e39940805d29f7a4a | [
"BSD-3-Clause"
] | null | null | null | TrainingExtensions/common/src/python/aimet_common/winnow/mask_propagator.py | Rohan-Chaudhury/aimet | 1c38cac8cc0fd32dca40ce5e39940805d29f7a4a | [
"BSD-3-Clause"
] | null | null | null | TrainingExtensions/common/src/python/aimet_common/winnow/mask_propagator.py | Rohan-Chaudhury/aimet | 1c38cac8cc0fd32dca40ce5e39940805d29f7a4a | [
"BSD-3-Clause"
] | 1 | 2021-03-06T18:40:33.000Z | 2021-03-06T18:40:33.000Z | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
#
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019-2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
#
# =============================================================================
""" Contains functionality related to all aspects of propagating the masks. """
from typing import List, Union, Dict
from aimet_common.connected_graph.operation import Op, determine_preceding_op_input_product_index_in_multi_input_op, \
determine_succeeding_op_output_product_index_in_multi_output_op
from aimet_common.connected_graph.connectedgraph import ConnectedGraph
from aimet_common.connected_graph.product import Product
from aimet_common.winnow.mask import Mask, NullInternalConnectivity, DirectInternalConnectivity, \
SplitInternalConnectivity, SkipInternalConnectivity, AddInternalConnectivity, StopInternalConnectivity, \
ConcatInternalConnectivity
from aimet_common.winnow.winnow_utils import get_zero_positions_in_binary_mask, get_conv_ops_for_api
from aimet_common.utils import AimetLogger, ModelApi, api_channel_index_dict
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Winnow)
class MaskPropagator:
""" The MaskPropagator class encapsulates the mask propagation functionality.
It is responsible for Forward and Backward mask propagation within a module
as well as inter module mask propagation."""
def __init__(self, conn_graph: ConnectedGraph, model_api: ModelApi):
"""
Initialize the MaskPropagator
:param conn_graph: The computing graph that represents the model.
"""
self._graph = conn_graph
self._ops = self._graph.get_all_ops()
self._products = self._graph.get_all_products()
self._mask_changed = False
self._model_api = model_api
self._op_to_mask_dict = {}
self._create_masks()
@property
def op_to_mask_dict(self) -> Dict[Op, Mask]:
""" Return the op_to_mask_dict """
return self._op_to_mask_dict
def _create_masks(self):
""" Create masks for each op in the connected graph that leads to a conv op """
for op in self._ops.values():
# TODO: Only creating masks for ops that lead to conv ops was only tested for TF. See if the same can be
# done for pytorch, where we traditionally created masks for all ops.
if self._model_api == ModelApi.tensorflow:
if op.type in get_conv_ops_for_api(self._model_api):
self._create_masks_for_op_and_all_ancestors(op)
else:
self._create_masks_for_op_and_all_ancestors(op)
def _create_masks_for_op_and_all_ancestors(self, op: Op):
"""
Create mask for the current op, as well as all ancestors of the op
:param op: Op to create mask for as well as all ancestor ops
"""
dfs_queue = [op]
while dfs_queue:
current_op = dfs_queue.pop()
# If current_op already has a mask, it means we have already created masks for it and all ancestors from a
# conv descendant.
if current_op in self._op_to_mask_dict.keys():
continue
if current_op.inputs:
input_shape = current_op.inputs[0].shape
if input_shape:
current_op.num_in_channels = input_shape[api_channel_index_dict[self._model_api]]
if current_op.output:
output_shape = current_op.output.shape
if output_shape:
current_op.num_out_channels = output_shape[api_channel_index_dict[self._model_api]]
self._op_to_mask_dict[current_op] = Mask(current_op, self._model_api)
input_products = current_op.get_input_products()
if input_products:
for inp in input_products:
# Pytorch input products don't have Ops attached to them, while TF ones do
# Check that the input product does indeed have a producer
if inp.producer:
dfs_queue.append(inp.producer)
def propagate_masks(self):
""" Propagate the masks within the module and between the modules. """
# Print the masks before mask propagation starts.
self._print_all_ip_op_masks_zero_indices()
# Increasing the iterations to make sure masks are propagated.
for n in range(20):
self._propagate_intra_module_masks()
logger.debug("After Intra: %s", n)
self._print_all_ip_op_masks_zero_indices()
self._propagate_inter_module_masks()
logger.debug("After Inter: %s", n)
self._print_all_ip_op_masks_zero_indices()
# Mask propagation has been completed.
# Validate and adjust the multi-input and multi-output Ops.
self._validate_and_adjust_masks_for_multi_input_multi_output_ops()
logger.debug("After Validating and adjusting masks.")
self._print_all_ip_op_masks_zero_indices()
def _propagate_intra_module_masks(self):
""" Propagate the output channel masks to input channel masks, followed by
propagating the input channel masks to output channel masks. """
for op, _ in self._op_to_mask_dict.items():
self._op_to_mask_dict[op].propagate_internal_connectivity_out_channels_to_in_channels()
self._op_to_mask_dict[op].propagate_internal_connectivity_in_channels_to_out_channels()
def _propagate_inter_module_masks(self):
""" Propagate masks between Ops. In the case of Ops with multiple inputs and/or outputs, masks must be
propagated through all the branches. """
for a_product in self._products.values():
# The Product class represents the following entities in a model.
# 1) a Tensor between two modules (Ops)
# 2) an input Tensor
# 3) a constant
# 4) a parameter
# For inter module mask propagation, only Products between two Ops are considered.
inter_module = a_product.is_inter_module()
if inter_module and a_product.producer in self._op_to_mask_dict:
# This Product is between two Ops
producer = a_product.producer
# If parent op is stop connectivity, do not propagate mask up
if isinstance(self._op_to_mask_dict[producer].internal_connectivity, StopInternalConnectivity):
continue
# Look at the Producer Op and the consumer Op of the product and propagate the masks between them.
consumers = a_product.consumers
for consumer in consumers:
if consumer in self._op_to_mask_dict.keys():
consumer_connectivity = self._op_to_mask_dict[consumer].internal_connectivity
# If consumer op is stop connectivity, do not propagate mask up
if isinstance(consumer_connectivity, StopInternalConnectivity):
continue
if isinstance(consumer_connectivity, ConcatInternalConnectivity):
self._propagate_up_concat_inter_module_masks(consumer, a_product)
elif isinstance(consumer_connectivity, AddInternalConnectivity):
self._propagate_up_add_masks(consumer, a_product)
elif isinstance(consumer_connectivity, SkipInternalConnectivity):
# Get the Op's output product's consumer and propagate up that consumer's mask.
self._propagate_up_skip_masks(consumer, a_product)
else:
# Consumers that are not Add or Concat
assert isinstance(consumer_connectivity, (DirectInternalConnectivity,
NullInternalConnectivity,
SplitInternalConnectivity))
self._set_inter_module_producer_output_and_consumer_input_mask(consumer, a_product)
def _validate_and_adjust_masks_for_multi_input_multi_output_ops(self):
""" For Split, Add and Concat Ops, validate the integrity of the input and output masks.
Some of the masks might have to be adjusted. """
for op in (op for op, _ in self._op_to_mask_dict.items()):
internal_connectivity = self._op_to_mask_dict[op].internal_connectivity
if isinstance(internal_connectivity, SplitInternalConnectivity):
self._validate_and_adjust_split_op_masks(op)
elif isinstance(internal_connectivity, AddInternalConnectivity):
self._validate_and_adjust_add_op_masks(op, self._model_api)
elif isinstance(internal_connectivity, ConcatInternalConnectivity):
self._validate_and_adjust_concat_op_masks(op)
def _adjust_masks_for_upsample_ops(self):
""" For tensorflow mask propagation, if any upsample mask has changed, reset it to default mask.
This will cause downsample/upsample ops to again be inserted before or after the current upsample op.
Naive inefficient method for dealing with the situation, can look into alternative methods for actually
winnowing upsample op """
for op in (op for op, _ in self._op_to_mask_dict.items()):
if op.type == 'Upsample':
op_mask = self._op_to_mask_dict[op]
in_masks, out_masks = op_mask.input_channel_masks, op_mask.output_channel_masks
# Adjust all input masks
in_mask = in_masks[0]
in_mask_length = len(in_mask)
modified_mask = [1 for _ in range(in_mask_length)]
op_mask.set_input_channel_mask(0, modified_mask)
# Adjust the single output mask
output_mask = out_masks[0]
out_mask_length = len(output_mask)
out_modified_mask = [1 for _ in range(out_mask_length)]
op_mask.set_output_channel_mask(0, out_modified_mask)
def _print_all_ip_op_masks_zero_indices(self):
""" Print the input and output channel masks of the Ops.
Only mask indices for masked channels are printed.
If a module has a mask with default value (all 1s), it is printed as []
indicating no channels are masked. """
for op, _ in self._op_to_mask_dict.items():
ip_mask_zero_positions_list = []
op_mask_zero_positions_list = []
ip_masks = self._op_to_mask_dict[op].input_channel_masks
if ip_masks:
for num in range(len(ip_masks)):
ip_mask_zero_positions = [i for i in range(len(ip_masks[num])) if ip_masks[num][i] == 0]
# TODO: remove 'Add', 'Concat' when old CG is gone
if (op.type in ('Add', 'Concat', 'Split', 'add', 'cat') and
self._model_api == ModelApi.pytorch) or \
(op.type in ('Add', 'ConcatV2', 'branch') and self._model_api == ModelApi.tensorflow):
ip_mask_zero_positions_list.append(ip_mask_zero_positions)
else:
if ip_mask_zero_positions:
ip_mask_zero_positions_list.append(ip_mask_zero_positions)
op_masks = self._op_to_mask_dict[op].output_channel_masks
if op_masks:
for num in range(len(op_masks)):
op_mask_zero_positions = [i for i in range(len(op_masks[num])) if op_masks[num][i] == 0]
# TODO: remove 'Add', 'Concat' when old CG is gone
if (op.type in ('Add', 'Concat', 'Split', 'add', 'cat') and
self._model_api == ModelApi.pytorch) or \
(op.type in ('Add', 'ConcatV2', 'branch') and self._model_api == ModelApi.tensorflow):
op_mask_zero_positions_list.append(op_mask_zero_positions)
else:
if op_mask_zero_positions:
op_mask_zero_positions_list.append(op_mask_zero_positions)
# Log only if either input_masks or output masks are non-empty
if ip_mask_zero_positions_list or op_mask_zero_positions_list:
logger.debug("Op: %s ip mask zero indices: %s, op mask zero indices: %s",
op.dotted_name, ip_mask_zero_positions_list, op_mask_zero_positions_list)
def get_ops_with_non_default_ip_op_masks(self) -> List[Op]:
""" Returns a list of Ops whose input and/or output channel default masks have been modified. """
list_of_mask_modified_ops = []
for op, _ in self._op_to_mask_dict.items():
check_op = False
if self._model_api == ModelApi.pytorch and op.type in ('Dropout', 'Relu', 'ReLU', 'MaxPool', 'MaxPool2d',
'AveragePool', 'Neg', 'BatchNorm2d',
'Conv', 'Conv2d', 'Conv2D', 'convolution',
'batch_norm'):
check_op = True
elif self._model_api == ModelApi.tensorflow:
# marking any changed op as a modified op for tensorflow
check_op = True
if check_op:
op_mask = self._op_to_mask_dict[op]
ip_masks, op_masks = op_mask.input_channel_masks, op_mask.output_channel_masks
modified = False
for ip_mask in ip_masks:
in_zero_channels = get_zero_positions_in_binary_mask(ip_mask)
if in_zero_channels:
modified = True
continue
# None of the input masks have been modified. Check the output masks.
if op_masks:
for op_mask in op_masks:
out_zero_channels = get_zero_positions_in_binary_mask(op_mask)
if out_zero_channels:
modified = True
continue
if modified:
list_of_mask_modified_ops.append(op)
return list_of_mask_modified_ops
def _is_module_reshape_needed(self, op: Op):
"""
Tells whether the module requires reshaping during winnowing.
:param op: Determine if this Op is in reshape scenario
:return: True, if the module requires reshaping.
False, if the module doesn't require reshaping.
"""
# Look at the Op's input Op. If any of them have multiple inputs and/or outputs,
# then module requires reshaping. If the previous Op is a single input, single output Op,
# check if it is a Conv Op. If Conv Op, then module doesn't require reshaping. If it is
# not a Conv, Op keep looking up the next input Op, until a Conv module or a multi-input/output Op
# is reached.
# Conv module has one input Op.
input_product = op.inputs[0]
input_op = input_product.producer
# TODO: remove 'Add', 'Concat' when old CG is gone
if (input_op.type in ('Add', 'Concat', 'Split', 'add', 'cat') and self._model_api == ModelApi.pytorch) or \
(input_op.type in ('Add', 'ConcatV2', 'branch', 'Upsample', 'Downsample') and self._model_api ==
ModelApi.tensorflow) or isinstance(self._op_to_mask_dict[input_op].internal_connectivity,
StopInternalConnectivity):
logger.debug("Op: %s, below: %s", op.dotted_name, input_op.dotted_name)
return True
# TODO: remove 'Conv2d' when old CG is gone
if (input_op.type in ['Conv2d', 'convolution'] and self._model_api == ModelApi.pytorch) or \
(input_op.type in 'Conv2D' and self._model_api == ModelApi.tensorflow):
logger.debug("Op: %s, below: %s", op.dotted_name, input_op.dotted_name)
return False
return self._is_module_reshape_needed(input_op)
def _set_inter_module_producer_output_and_consumer_input_mask(self, consumer_op: Op, input_product: Product):
"""
Set the product's producer op's output mask and the product's consumer op's output mask.
:param consumer_op: Consumer op whose input mask will be set
:param input_product: Product with consumer op as output
"""
producer = input_product.producer
producer_mask = self._op_to_mask_dict[producer]
producer_out_masks = producer_mask.output_channel_masks
consumer_op_mask = self._op_to_mask_dict[consumer_op]
consumer_in_masks = consumer_op_mask.input_channel_masks
consumer_mask_index = None
producer_mask_index = None
# Determine the consumer mask index
num_consumer_in_masks = len(consumer_in_masks)
if num_consumer_in_masks == 1:
consumer_mask_index = 0
elif num_consumer_in_masks > 1:
consumer_mask_index = determine_preceding_op_input_product_index_in_multi_input_op(producer, consumer_op)
else:
logger.error("Number of input masks for Op: %s is None", consumer_op.dotted_name)
# Determine the producer mask index
num_producer_out_masks = len(producer_out_masks)
if num_producer_out_masks == 1:
producer_mask_index = 0
elif num_producer_out_masks > 1:
producer_mask_index = determine_succeeding_op_output_product_index_in_multi_output_op(consumer_op, producer)
else:
logger.error("Number of output masks for Product: %s is None", input_product.name)
# Create the connection mask and set the Producer Op's output mask and the Consumer Op's input mask.
connection_mask = producer_out_masks[producer_mask_index] and consumer_in_masks[consumer_mask_index]
producer_mask.set_output_channel_mask(producer_mask_index, connection_mask)
logger.debug("Connection propagation: Op: %s, Product: %s, number of producer masks: %s, "
"number of consumer masks: %s, Connection mask: %s",
consumer_op.dotted_name,
input_product.name,
len(producer_out_masks),
len(consumer_in_masks),
get_zero_positions_in_binary_mask(connection_mask))
def _propagate_up_concat_inter_module_masks(self, concat_op: Op, input_product: Product):
"""
Concat Op has multiple inputs. The input index is maintained in the same order in which the inputs were
mentioned in the torch.cat operation in the forward() function of the model. Look at the number of input
channels associated with each input and propagate up only the masks for those channels.
:param concat_op: The Concat Op for which the mask associated with the input_product must be propagated up.
:param input_product: One of the products for which the Concat Op is the consumer. The corresponding mask must
be propagated through this product to it's Producer Op.
"""
logger.debug("Propagate up concat: For Concat Op: %s, all input product names: %s", concat_op.dotted_name,
[input_product.name for input_product in concat_op.inputs])
concat_op_mask = self._op_to_mask_dict[concat_op]
# Need only input masks for propagating up to the previous op. Ignore the output mask.
concat_in_masks = concat_op_mask.input_channel_masks
logger.debug("Propagate up concat: Processing input product: %s. Concat's input mask lengths: %s",
input_product.name, [len(concat_mask) for concat_mask in concat_in_masks])
# For the Concat Op, look at all the input Ops and find the input Op that matches with this specific input
# product.
for concat_input_op_index, input_op in enumerate(concat_op.input_ops):
logger.debug("Propagate up concat: input Op: %s, Concat Op's index for this input op: %s, mask length: %s",
input_op.dotted_name, concat_input_op_index, len(concat_in_masks[concat_input_op_index]))
if input_product.producer.dotted_name == input_op.dotted_name:
logger.debug("Propagate up concat: Matching Product: %s with input_op: %s", input_product.name,
input_op.dotted_name)
for product_consumer_index in range(len(input_product.consumers)):
if input_product.consumers[product_consumer_index].dotted_name == concat_op.dotted_name:
logger.debug("Propagate up concat: Input op's index for the Concat Op: %s",
product_consumer_index)
# For the input Op, look at only the Output mask. That is the one going to be over written
# during this inter module mask propagation.
input_op_mask = self._op_to_mask_dict[input_product.producer]
input_producer_out_masks = input_op_mask.output_channel_masks
connection_mask = input_producer_out_masks[product_consumer_index] and \
concat_in_masks[concat_input_op_index]
if input_product.producer.type in 'Split':
logger.debug("Not propagating masks from Concat: %s to Split: %s", concat_op.dotted_name,
input_product.producer.dotted_name)
mask_length = len(concat_in_masks[concat_input_op_index])
modified_mask = [1 for _ in range(mask_length)]
# concat_op_mask.set_input_channel_mask(product_consumer_index, modified_mask)
concat_op_mask.set_input_channel_mask(concat_input_op_index, modified_mask)
else:
input_op_mask.set_output_channel_mask(product_consumer_index, connection_mask)
concat_op_mask.set_input_channel_mask(concat_input_op_index, connection_mask)
# No need to check other consumers.
break
logger.debug("Propagate up concat: Completed processing input product: %s, input mask lengths: %s",
input_product.name, [len(concat_mask) for concat_mask in concat_in_masks])
def _propagate_up_add_masks(self, add_op: Op, product: Product):
"""
Add has multiple inputs (i.e., input Products). If an input Product is originating from a Split Op,
do not propagate the mask up through that Product. This function is being called once for each one
of the Add's input Products.
:param add_op: The Add op for which masks are getting propagated.
:param product: The product through which masks are considered to be propagated.
"""
logger.debug("propagate_up_add_masks: Add's inputs: %s", [product.name for product in add_op.inputs])
for index in range(len(add_op.inputs)):
# get the product.
# look at the product shape[1]
# Propagate only those channel masks up.
a_product = add_op.inputs[index]
if a_product.producer.dotted_name == product.producer.dotted_name:
if isinstance(self._op_to_mask_dict[a_product.producer].internal_connectivity,
SplitInternalConnectivity):
add_op_mask = self._op_to_mask_dict[add_op]
logger.debug("Not propagating to Split. Restoring mask to default value.")
input_masks = add_op_mask.input_channel_masks
mask_length = len(input_masks[index])
modified_mask = [1 for _ in range(mask_length)]
add_op_mask.set_input_channel_mask(index, modified_mask)
else:
self._set_inter_module_producer_output_and_consumer_input_mask(add_op, product)
def _propagate_up_skip_masks(self, skip_op: Op, product: Product):
"""
Propagate up mask from skip op's child op to skip op's parent op.
:param skip_op: The Skip op for which masks are getting propagated.
:param product: The product through which masks are considered to be propagated.
"""
if skip_op.output:
skip_consumer_op = skip_op.output.consumers[0]
producer = product.producer
producer_mask = self._op_to_mask_dict[producer]
producer_out_masks = producer_mask.output_channel_masks
consumer_op_mask = self._op_to_mask_dict[skip_consumer_op]
consumer_in_masks = consumer_op_mask.input_channel_masks
# The producer could be a multi-output producer. Determine which output mask (index) should be used.
producer_mask_index = determine_succeeding_op_output_product_index_in_multi_output_op(skip_op, producer)
# In the case of Skip Op, there is only one consumer.
consumer_mask_index = 0
# Create the connection mask and set the Producer Op's output mask and the Consumer Op's input mask.
connection_mask = producer_out_masks[producer_mask_index] and consumer_in_masks[consumer_mask_index]
producer_mask.set_output_channel_mask(producer_mask_index, connection_mask)
def _validate_and_adjust_concat_op_masks(self, op: Op):
"""
Check if the concatenation of the multiple input masks yield the single output mask.
If the check fails, adjust the masks.
:param op: The Concat Op for which masks are validated and adjusted.
"""
op_mask = self._op_to_mask_dict[op]
in_masks, out_masks = op_mask.input_channel_masks, op_mask.output_channel_masks
# Adjust all input masks
index = 0
for in_mask in in_masks:
in_mask_length = len(in_mask)
modified_mask = [1 for _ in range(in_mask_length)]
op_mask.set_input_channel_mask(index, modified_mask)
index += 1
# Adjust the single output mask
output_mask = out_masks[0]
out_mask_length = len(output_mask)
out_modified_mask = [1 for _ in range(out_mask_length)]
op_mask.set_output_channel_mask(0, out_modified_mask)
def _validate_and_adjust_split_op_masks(self, op: Op):
"""
This function is called as a final step during mask propagation.
Make sure Split Op's input mask and output masks are the same.
Propagate the masks downstream so that Ops downstream have teh updated mask.
:param op: the Split Op for which masks are validated and adjusted.
"""
op_mask = self._op_to_mask_dict[op]
in_masks, out_masks = op_mask.input_channel_masks, op_mask.output_channel_masks
# Split Op has one input and multiple output masks
input_mask = in_masks[0]
# set all the output masks to the same value as the input mask.
for index in range(len(out_masks)):
op_mask.set_output_channel_mask(index, input_mask)
# The output masks of the split have been adjusted. Now this new mask must be propagated down
# to Ops further down. This is done so that while reducing Conv modules a local decision
# could be made based the module above the Conv Op. For this reason, we shouldn't adjust
# the Conv Op's masks. From Add and Concat Ops, the masks are not propagated to Split Op
# as this considered as a special-Op to special-Op.
for consumer in op.output.consumers:
while consumer in self._op_to_mask_dict.keys() and \
isinstance(self._op_to_mask_dict[consumer].internal_connectivity, DirectInternalConnectivity):
self._op_to_mask_dict[consumer].set_input_channel_mask(0, input_mask)
if not consumer.output:
break
self._op_to_mask_dict[consumer].set_output_channel_mask(0, input_mask)
logger.debug("Masks adjusted for: %s, %s", consumer.dotted_name, consumer.type)
consumer = consumer.output.consumers[0]
def _validate_and_adjust_add_op_masks(self, op: Op, model_api: ModelApi):
"""
Check if the Add Op's all input masks are the same.
If not, adjust the masks to default values.
:param op: the Add Op for which masks are validated and adjusted.
:param model_api: either tensorflow or pytorch
"""
op_mask = self._op_to_mask_dict[op]
in_masks, out_masks = op_mask.input_channel_masks, op_mask.output_channel_masks
mask_length = out_masks[0]
# The Add Op has multiple inputs and a single output.
# Check for number of zero positions in the masks? Not necessary.
# For Add Op, during Intra-Module back propagation of the masks,
# the output mask is propagated to all the input masks.
# In the ideal case, the input masks must be the same.
# When the Add is connected to Split, during Inter-module mask propagation,
# the Add's mask is NOT propagated to Split and the corresponding mask is set
# to default value of all 1s (no masking).
if all(mask == in_masks[0] for mask in in_masks):
logger.debug("Valid masks for Add Op: %s", op.dotted_name)
return
# Reset the all the input masks and the output mask to default value.
modified_mask = [1 for _ in range(len(mask_length))]
# Set the input channel masks
for index in range(len(in_masks)):
op_mask.set_input_channel_mask(index, modified_mask)
# Set the output channel mask. For Add, there is only one output mask.
out_index = 0
op_mask.set_output_channel_mask(out_index, modified_mask)
logger.debug("Invalid masks for Add Op: %s", op.dotted_name)
# Update downstream Ops' masks as long the Op is not a Conv.
# This step is essential so that for the Conv Op, only the previous Op's
# output mask is checked to make the local decision (whether a DownSampleLayer
# need to be prepended to the Conv.
downstream_op = op.output.consumers[0]
self._adjust_downstream_op_masks(downstream_op, modified_mask, model_api)
def _adjust_downstream_op_masks(self, downstream_op: Op, modified_mask: List[int], model_api: ModelApi):
"""
Starting with the downstream_op, adjust the input and output masks for the Ops until a Conv Op is reached.
:param downstream_op: the starting downstream op
:param modified_mask: the mask to be set for the downstream Ops
:param model_api: either tensorflow or pytorch
"""
if downstream_op.type not in get_conv_ops_for_api(model_api):
downstream_op_mask = self._op_to_mask_dict[downstream_op]
if isinstance(self._op_to_mask_dict[downstream_op].internal_connectivity, SplitInternalConnectivity):
# Downstream Op has single input and multiple outputs.
downstream_op_mask.set_input_channel_mask(0, modified_mask)
downstream_out_masks = downstream_op_mask.output_channel_masks
num_out_masks = len(downstream_out_masks)
for index in range(num_out_masks):
downstream_op_mask.set_output_channel_mask(index, modified_mask)
self._adjust_downstream_op_masks(downstream_op.output.consumers[index], modified_mask, model_api)
elif not isinstance(self._op_to_mask_dict[downstream_op].internal_connectivity,
StopInternalConnectivity):
# Downstream Op has single input and single output.
downstream_op_mask.set_input_channel_mask(0, modified_mask)
downstream_op_mask.set_output_channel_mask(0, modified_mask)
logger.debug("Masks adjusted for: %s", downstream_op.dotted_name)
if downstream_op.output:
self._adjust_downstream_op_masks(downstream_op.output.consumers[0], modified_mask, model_api)
else:
# Stop propagating downstream if we hit a stop connectivity op
return
def update_channels_to_winnow(self, name: str, reshape: bool, input_channels_to_winnow: Union[None, List[int]],
output_channels_to_winnow: Union[None, List[int]]):
""" For the Given Op, update the channels to be winnowed.
:param name: Name of module to winnow to search in ConnectedGraph
:param reshape: If set to False, UpSampleLayers and DownSampleLayers will not be used in the winnowed model.
If set to True, UpSampleLayers and DownSampleLayers will be used in the winnowed model.
:param input_channels_to_winnow: List of input channels to winnow
:param output_channels_to_winnow: List of output channels to winnow (currently not supported)
"""
module_op = self._graph.get_op_from_module_name(name)
if module_op:
if reshape:
# DownSampleLayers and UpSampleLayers can be added as needed.
self._op_to_mask_dict[module_op].update_channels_to_winnow(input_channels_to_winnow,
output_channels_to_winnow)
else:
# Determine if the OP is right below a Split, Add or Concat.
# If yes, do not update the channels to winnow.
reshape_needed = self._is_module_reshape_needed(module_op)
if reshape_needed:
logger.debug("Reshape flag set to False. Module :%s will not be winnowed.",
module_op.dotted_name)
else:
self._op_to_mask_dict[module_op].update_channels_to_winnow(input_channels_to_winnow,
output_channels_to_winnow)
else:
logger.error(" Update channels to winnow: module_op is None for: %s", name)
raise RuntimeError("For the module, an Op was not found in the ConnectedGraph:", name)
| 54.151335 | 120 | 0.644008 |
af407c0f206a6c47d9c00c1bba3d8ac33e55686a | 2,217 | py | Python | backend/old_truth_28517/urls.py | crowdbotics-apps/old-truth-28517 | d8f8f6be1c59e8d416711c2053b5a83283957153 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/old_truth_28517/urls.py | crowdbotics-apps/old-truth-28517 | d8f8f6be1c59e8d416711c2053b5a83283957153 | [
"FTL",
"AML",
"RSA-MD"
] | 20 | 2021-07-07T00:10:21.000Z | 2021-07-07T00:10:25.000Z | backend/old_truth_28517/urls.py | crowdbotics-apps/old-truth-28517 | d8f8f6be1c59e8d416711c2053b5a83283957153 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """old_truth_28517 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Old Truth"
admin.site.site_title = "Old Truth Admin Portal"
admin.site.index_title = "Old Truth Admin"
# swagger
api_info = openapi.Info(
title="Old Truth API",
default_version="v1",
description="API documentation for Old Truth App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| 35.190476 | 87 | 0.710871 |
2d59e901a187a97cf95400ddad216fa3cedbafbd | 2,173 | py | Python | airflow/cli/commands/plugins_command.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 15,947 | 2019-01-05T13:51:02.000Z | 2022-03-31T23:33:16.000Z | airflow/cli/commands/plugins_command.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 14,603 | 2019-01-05T09:43:19.000Z | 2022-03-31T23:11:59.000Z | airflow/cli/commands/plugins_command.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 8,429 | 2019-01-05T19:45:47.000Z | 2022-03-31T22:13:01.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
from typing import Any, Dict, List, Union
from airflow import plugins_manager
from airflow.cli.simple_table import AirflowConsole
from airflow.plugins_manager import PluginsDirectorySource, get_plugin_info
from airflow.utils.cli import suppress_logs_and_warning
def _get_name(class_like_object) -> str:
if isinstance(class_like_object, (str, PluginsDirectorySource)):
return str(class_like_object)
if inspect.isclass(class_like_object):
return class_like_object.__name__
return class_like_object.__class__.__name__
def _join_plugins_names(value: Union[List[Any], Any]) -> str:
value = value if isinstance(value, list) else [value]
return ",".join(_get_name(v) for v in value)
@suppress_logs_and_warning
def dump_plugins(args):
"""Dump plugins information"""
plugins_info: List[Dict[str, str]] = get_plugin_info()
if not plugins_manager.plugins:
print("No plugins loaded")
return
# Remove empty info
if args.output == "table":
# We can do plugins_info[0] as the element it will exist as there's
# at least one plugin at this point
for col in list(plugins_info[0]):
if all(not bool(p[col]) for p in plugins_info):
for plugin in plugins_info:
del plugin[col]
AirflowConsole().print_as(plugins_info, output=args.output)
| 38.122807 | 75 | 0.734008 |
364aadb9523f0299c16eac248553e7acbd5db777 | 24,106 | py | Python | pandas/core/indexes/range.py | tomahawk123/pandas | 3fe697f3c9dcb0c0a7a9b91c0098d31b39f51fef | [
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/range.py | tomahawk123/pandas | 3fe697f3c9dcb0c0a7a9b91c0098d31b39f51fef | [
"BSD-3-Clause"
] | 1 | 2016-08-15T12:35:16.000Z | 2016-08-15T12:35:16.000Z | pandas/core/indexes/range.py | tomahawk123/pandas | 3fe697f3c9dcb0c0a7a9b91c0098d31b39f51fef | [
"BSD-3-Clause"
] | 2 | 2017-05-27T03:25:12.000Z | 2021-09-21T21:51:12.000Z | from datetime import timedelta
import operator
from sys import getsizeof
import warnings
import numpy as np
from pandas._libs import index as libindex, lib
import pandas.compat as compat
from pandas.compat import get_range_parameters, lrange, range
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes import concat as _concat
from pandas.core.dtypes.common import (
is_int64_dtype, is_integer, is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.generic import (
ABCDataFrame, ABCSeries, ABCTimedeltaIndex)
from pandas.core import ops
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.numeric import Int64Index
class RangeIndex(Int64Index):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
name : object, optional
Name to be stored in the index
copy : bool, default False
Unused, accepted for homogeneity with other index types.
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
Attributes
----------
None
Methods
-------
from_range
"""
_typ = 'rangeindex'
_engine_type = libindex.Int64Engine
# --------------------------------------------------------------------
# Constructors
def __new__(cls, start=None, stop=None, step=None,
dtype=None, copy=False, name=None, fastpath=None):
if fastpath is not None:
warnings.warn("The 'fastpath' keyword is deprecated, and will be "
"removed in a future version.",
FutureWarning, stacklevel=2)
if fastpath:
return cls._simple_new(start, stop, step, name=name)
cls._validate_dtype(dtype)
# RangeIndex
if isinstance(start, RangeIndex):
if name is None:
name = start.name
return cls._simple_new(name=name,
**dict(start._get_data_as_items()))
# validate the arguments
def ensure_int(value, field):
msg = ("RangeIndex(...) must be called with integers,"
" {value} was passed for {field}")
if not is_scalar(value):
raise TypeError(msg.format(value=type(value).__name__,
field=field))
try:
new_value = int(value)
assert(new_value == value)
except (TypeError, ValueError, AssertionError):
raise TypeError(msg.format(value=type(value).__name__,
field=field))
return new_value
if com._all_none(start, stop, step):
msg = "RangeIndex(...) must be called with integers"
raise TypeError(msg)
elif start is None:
start = 0
else:
start = ensure_int(start, 'start')
if stop is None:
stop = start
start = 0
else:
stop = ensure_int(stop, 'stop')
if step is None:
step = 1
elif step == 0:
raise ValueError("Step must not be zero")
else:
step = ensure_int(step, 'step')
return cls._simple_new(start, stop, step, name)
@classmethod
def from_range(cls, data, name=None, dtype=None, **kwargs):
""" Create RangeIndex from a range (py3), or xrange (py2) object. """
if not isinstance(data, range):
raise TypeError(
'{0}(...) must be called with object coercible to a '
'range, {1} was passed'.format(cls.__name__, repr(data)))
start, stop, step = get_range_parameters(data)
return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs)
@classmethod
def _simple_new(cls, start, stop=None, step=None, name=None,
dtype=None, **kwargs):
result = object.__new__(cls)
# handle passed None, non-integers
if start is None and stop is None:
# empty
start, stop, step = 0, 0, 1
if start is None or not is_integer(start):
try:
return RangeIndex(start, stop, step, name=name, **kwargs)
except TypeError:
return Index(start, stop, step, name=name, **kwargs)
result._start = start
result._stop = stop or 0
result._step = step or 1
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
# --------------------------------------------------------------------
@staticmethod
def _validate_dtype(dtype):
""" require dtype to be None or int64 """
if not (dtype is None or is_int64_dtype(dtype)):
raise TypeError('Invalid to pass a non-int64 dtype to RangeIndex')
@cache_readonly
def _constructor(self):
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self):
return np.arange(self._start, self._stop, self._step, dtype=np.int64)
@cache_readonly
def _int64index(self):
return Int64Index._simple_new(self._data, name=self.name)
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
return [('start', self._start),
('stop', self._stop),
('step', self._step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (self.__class__, d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
# --------------------------------------------------------------------
@cache_readonly
def nbytes(self):
"""
Return the number of bytes in the underlying data
On implementations where this is undetermined (PyPy)
assume 24 bytes for each value
"""
return sum(getsizeof(getattr(self, v), 24) for v in
['_start', '_stop', '_step'])
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self):
return np.dtype(np.int64)
@property
def is_unique(self):
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self):
return self._step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self):
return self._step < 0 or len(self) <= 1
@property
def has_duplicates(self):
return False
def tolist(self):
return lrange(self._start, self._stop, self._step)
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
name = kwargs.get("name", self.name)
return RangeIndex._simple_new(
name=name, **dict(self._get_data_as_items()))
else:
kwargs.setdefault('name', self.name)
return self._int64index._shallow_copy(values, **kwargs)
@Appender(ibase._index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
self._validate_dtype(dtype)
if name is None:
name = self.name
return RangeIndex._simple_new(
name=name, **dict(self._get_data_as_items()))
def _minmax(self, meth):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif ((meth == 'min' and self._step > 0) or
(meth == 'max' and self._step < 0)):
return self._start
return self._start + self._step * no_steps
def min(self):
"""The minimum value of the RangeIndex"""
return self._minmax('min')
def max(self):
"""The maximum value of the RangeIndex"""
return self._minmax('max')
def argsort(self, *args, **kwargs):
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See Also
--------
numpy.ndarray.argsort
"""
nv.validate_argsort(args, kwargs)
if self._step > 0:
return np.arange(len(self))
else:
return np.arange(len(self) - 1, -1, -1)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
ls = len(self)
lo = len(other)
return (ls == lo == 0 or
ls == lo == 1 and
self._start == other._start or
ls == lo and
self._start == other._start and
self._step == other._step)
return super(RangeIndex, self).equals(other)
def intersection(self, other):
"""
Form the intersection of two Index objects. Sortedness of the result is
not guaranteed
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
"""
if self.equals(other):
return self._get_reconciled_name_object(other)
if not isinstance(other, RangeIndex):
return super(RangeIndex, self).intersection(other)
if not len(self) or not len(other):
return RangeIndex._simple_new(None)
first = self[::-1] if self._step < 0 else self
second = other[::-1] if other._step < 0 else other
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first._start, second._start)
int_high = min(first._stop, second._stop)
if int_high <= int_low:
return RangeIndex._simple_new(None)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, t = first._extended_gcd(first._step, second._step)
# check whether element sets intersect
if (first._start - second._start) % gcd:
return RangeIndex._simple_new(None)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first._start + (second._start - first._start) * \
first._step // gcd * s
new_step = first._step * second._step // gcd
new_index = RangeIndex._simple_new(tmp_start, int_high, new_step)
# adjust index to limiting interval
new_index._start = new_index._min_fitting_element(int_low)
if (self._step < 0 and other._step < 0) is not (new_index._step < 0):
new_index = new_index[::-1]
return new_index
def _min_fitting_element(self, lower_limit):
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self._start) // abs(self._step))
return self._start + abs(self._step) * no_steps
def _max_fitting_element(self, upper_limit):
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self._start) // abs(self._step)
return self._start + abs(self._step) * no_steps
def _extended_gcd(self, a, b):
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def union(self, other):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other) or len(self) == 0:
return super(RangeIndex, self).union(other)
if isinstance(other, RangeIndex):
start_s, step_s = self._start, self._step
end_s = self._start + self._step * (len(self) - 1)
start_o, step_o = other._start, other._step
end_o = other._start + other._step * (len(other) - 1)
if self._step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other._step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self._start - other._start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if ((start_s - start_o) % step_s == 0 and
(start_s - end_o) <= step_s and
(start_o - end_s) <= step_s):
return RangeIndex(start_r, end_r + step_s, step_s)
if ((step_s % 2 == 0) and
(abs(start_s - start_o) <= step_s / 2) and
(abs(end_s - end_o) <= step_s / 2)):
return RangeIndex(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if ((start_o - start_s) % step_s == 0 and
(start_o + step_s >= start_s) and
(end_o - step_s <= end_s)):
return RangeIndex(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if ((start_s - start_o) % step_o == 0 and
(start_s + step_o >= start_o) and
(end_s - step_o <= end_o)):
return RangeIndex(start_r, end_r + step_o, step_o)
return self._int64index.union(other)
@Appender(_index_shared_docs['join'])
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
if how == 'outer' and self is not other:
# note: could return RangeIndex in more circumstances
return self._int64index.join(other, how, level, return_indexers,
sort)
return super(RangeIndex, self).join(other, how, level, return_indexers,
sort)
def _concat_same_dtype(self, indexes, name):
return _concat._concat_rangeindex_same_dtype(indexes).rename(name)
def __len__(self):
"""
return the length of the RangeIndex
"""
return max(0, -(-(self._stop - self._start) // self._step))
@property
def size(self):
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
super_getitem = super(RangeIndex, self).__getitem__
if is_scalar(key):
if not lib.is_integer(key):
raise IndexError("only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices")
n = com.cast_scalar_indexer(key)
if n != key:
return super_getitem(key)
if n < 0:
n = len(self) + key
if n < 0 or n > len(self) - 1:
raise IndexError("index {key} is out of bounds for axis 0 "
"with size {size}".format(key=key,
size=len(self)))
return self._start + n * self._step
if isinstance(key, slice):
# This is basically PySlice_GetIndicesEx, but delegation to our
# super routines if we don't have integers
length = len(self)
# complete missing slice information
step = 1 if key.step is None else key.step
if key.start is None:
start = length - 1 if step < 0 else 0
else:
start = key.start
if start < 0:
start += length
if start < 0:
start = -1 if step < 0 else 0
if start >= length:
start = length - 1 if step < 0 else length
if key.stop is None:
stop = -1 if step < 0 else length
else:
stop = key.stop
if stop < 0:
stop += length
if stop < 0:
stop = -1
if stop > length:
stop = length
# delegate non-integer slices
if (start != int(start) or
stop != int(stop) or
step != int(step)):
return super_getitem(key)
# convert indexes to values
start = self._start + self._step * start
stop = self._start + self._step * stop
step = self._step * step
return RangeIndex._simple_new(start, stop, step, name=self.name)
# fall back to Int64Index
return super_getitem(key)
def __floordiv__(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
if is_integer(other) and other != 0:
if (len(self) == 0 or
self._start % other == 0 and
self._step % other == 0):
start = self._start // other
step = self._step // other
stop = start + len(self) * step
return RangeIndex._simple_new(
start, stop, step, name=self.name)
if len(self) == 1:
start = self._start // other
return RangeIndex._simple_new(
start, start + 1, 1, name=self.name)
return self._int64index // other
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods, specialized to RangeIndex """
def _make_evaluate_binop(op, step=False):
"""
Parameters
----------
op : callable that accepts 2 parms
perform the binary op
step : callable, optional, default to False
op to apply to the step parm if not None
if False, use the existing step
"""
def _evaluate_numeric_binop(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
other = self._validate_for_numeric_binop(other, op)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all='ignore'):
rstep = step(left._step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left._step
with np.errstate(all='ignore'):
rstart = op(left._start, right)
rstop = op(left._stop, right)
result = RangeIndex(rstart,
rstop,
rstep,
**attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in
[rstart, rstop, rstep]):
result = result.astype('float64')
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
name = '__{name}__'.format(name=op.__name__)
return compat.set_function_name(_evaluate_numeric_binop, name, cls)
cls.__add__ = _make_evaluate_binop(operator.add)
cls.__radd__ = _make_evaluate_binop(ops.radd)
cls.__sub__ = _make_evaluate_binop(operator.sub)
cls.__rsub__ = _make_evaluate_binop(ops.rsub)
cls.__mul__ = _make_evaluate_binop(operator.mul, step=operator.mul)
cls.__rmul__ = _make_evaluate_binop(ops.rmul, step=ops.rmul)
cls.__truediv__ = _make_evaluate_binop(operator.truediv,
step=operator.truediv)
cls.__rtruediv__ = _make_evaluate_binop(ops.rtruediv,
step=ops.rtruediv)
if not compat.PY3:
cls.__div__ = _make_evaluate_binop(operator.div, step=operator.div)
cls.__rdiv__ = _make_evaluate_binop(ops.rdiv, step=ops.rdiv)
RangeIndex._add_numeric_methods()
RangeIndex._add_logical_methods()
| 34.936232 | 79 | 0.538165 |
4968258b2266caf38f6c23b89d8b4d51bea20676 | 1,740 | py | Python | solutions/Fall20/L5/dominanceFrontier.py | mhomidi/bril | a62e1beef1b8193bbdf9bd1f3931ffc7236b714e | [
"MIT"
] | 2 | 2022-02-21T00:23:20.000Z | 2022-02-21T20:03:52.000Z | solutions/Fall20/L5/dominanceFrontier.py | mhomidi/bril | a62e1beef1b8193bbdf9bd1f3931ffc7236b714e | [
"MIT"
] | null | null | null | solutions/Fall20/L5/dominanceFrontier.py | mhomidi/bril | a62e1beef1b8193bbdf9bd1f3931ffc7236b714e | [
"MIT"
] | null | null | null | from dominatorTree import *
from dominators import find_dominators
from solutions.utils.form_blocks import form_blocks
from solutions.utils import cfg
import json
import sys
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, dir_path + '/../../../')
def get_dominance_frontiers(function) -> dict:
dominantors = find_dominators(function)
blocks = cfg.block_map(form_blocks(function['instrs']))
cfg.add_terminators(blocks)
__, succs = cfg.edges(blocks)
dominance_frontiers = dict()
cols = dict()
col = 0
for key in dominantors.keys():
cols[key] = col
col += 1
for node in dominantors.keys():
viewed_nodes = [0 for i in range(col)]
dominance_frontiers[node] = find_dominance_frontier(
node, dominantors, succs, viewed_nodes, cols, node, set()
)
return dominance_frontiers
def find_dominance_frontier(root_node, dominators, succs, viewed_nodes, cols, cur_node, dominance_frontier) -> set:
if viewed_nodes[cols[cur_node]]:
return dominance_frontier
viewed_nodes[cols[cur_node]] = 1
if root_node not in dominators[cur_node]:
dominance_frontier.add(cur_node)
if len(succs[cur_node]) == 0:
return dominance_frontier
for succ in succs[cur_node]:
dominance_frontier.union(find_dominance_frontier(root_node, dominators, succs,
viewed_nodes, cols, succ, dominance_frontier))
return dominance_frontier
if __name__ == '__main__':
code = json.load(sys.stdin)
for function in code['functions']:
dominance_frontiers = get_dominance_frontiers(function)
print(dominance_frontiers)
| 29 | 115 | 0.682759 |
0efae5be55d86436809c1618ec3d9de59e63a9ec | 2,807 | py | Python | BOReL/environments/mujoco/rand_param_envs/gym/envs/safety/semisuper.py | hai-h-nguyen/pomdp-baselines | 629180d56641810d99653a116cca41ede65172eb | [
"MIT"
] | 40 | 2021-10-15T14:53:00.000Z | 2022-03-31T02:27:20.000Z | BOReL/environments/mujoco/rand_param_envs/gym/envs/safety/semisuper.py | hai-h-nguyen/pomdp-baselines | 629180d56641810d99653a116cca41ede65172eb | [
"MIT"
] | 1 | 2022-03-13T04:02:30.000Z | 2022-03-13T04:02:30.000Z | BOReL/environments/mujoco/rand_param_envs/gym/envs/safety/semisuper.py | hai-h-nguyen/pomdp-baselines | 629180d56641810d99653a116cca41ede65172eb | [
"MIT"
] | 5 | 2021-11-28T04:08:13.000Z | 2022-03-17T02:33:51.000Z | """
Superclass for all semi-supervised envs
These are toy problems but the principle is useful -- RL agents in the real world
will likely be learning from an inconsistent signal. For example, a human might
use a clicker to reward an RL agent but likely wouldn't do so with perfect consistency.
Note: In all semisupervised environmenvts, we judge the RL agent based on their total
true_reward, not their percieved_reward. This means that even if the true_reward happens to
not be shown to the agent for an entire episode, the agent is still being judged
and should still perform as well as possible.
"""
from environments.mujoco.rand_param_envs import gym
class SemisuperEnv(gym.Env):
def step(self, action):
assert self.action_space.contains(action)
observation, true_reward, done, info = self._step(action)
info["true_reward"] = true_reward # Used by monitor for evaluating performance
assert self.observation_space.contains(observation)
perceived_reward = self._distort_reward(true_reward)
return observation, perceived_reward, done, info
"""
true_reward is only shown to the agent 1/10th of the time.
"""
class SemisuperRandomEnv(SemisuperEnv):
PROB_GET_REWARD = 0.1
def _distort_reward(self, true_reward):
if self.np_random.uniform() < SemisuperRandomEnv.PROB_GET_REWARD:
return true_reward
else:
return 0
"""
semisuper_pendulum_noise is the pendulum task but where reward function is noisy.
"""
class SemisuperNoiseEnv(SemisuperEnv):
NOISE_STANDARD_DEVIATION = 3.0
def _distort_reward(self, true_reward):
return true_reward + self.np_random.normal(
scale=SemisuperNoiseEnv.NOISE_STANDARD_DEVIATION
)
"""
semisuper_pendulum_decay is the pendulum task but where the reward function
is given to the agent less and less often over time.
"""
class SemisuperDecayEnv(SemisuperEnv):
DECAY_RATE = 0.999
def __init__(self):
super(SemisuperDecayEnv, self).__init__()
# This probability is only reset when you create a new instance of this env:
self.prob_get_reward = 1.0
def _distort_reward(self, true_reward):
self.prob_get_reward *= SemisuperDecayEnv.DECAY_RATE
# Then we compute the perceived_reward
if self.np_random.uniform() < self.prob_get_reward:
return true_reward
else:
return 0
"""
Now let's make some envs!
"""
from environments.mujoco.rand_param_envs.gym.envs.classic_control.pendulum import (
PendulumEnv,
)
class SemisuperPendulumNoiseEnv(SemisuperNoiseEnv, PendulumEnv):
pass
class SemisuperPendulumRandomEnv(SemisuperRandomEnv, PendulumEnv):
pass
class SemisuperPendulumDecayEnv(SemisuperDecayEnv, PendulumEnv):
pass
| 27.792079 | 91 | 0.733523 |
02ff668ce52305381839a432f072ea9d065c9bd8 | 1,189 | py | Python | awswrangler/__init__.py | mumblepins/aws-data-wrangler | 4572fb53858ed204bb706d80e9f44fb8dd2e8679 | [
"Apache-2.0"
] | null | null | null | awswrangler/__init__.py | mumblepins/aws-data-wrangler | 4572fb53858ed204bb706d80e9f44fb8dd2e8679 | [
"Apache-2.0"
] | null | null | null | awswrangler/__init__.py | mumblepins/aws-data-wrangler | 4572fb53858ed204bb706d80e9f44fb8dd2e8679 | [
"Apache-2.0"
] | null | null | null | """Initial Module.
Source repository: https://github.com/awslabs/aws-data-wrangler
Documentation: https://aws-data-wrangler.readthedocs.io/
"""
import logging as _logging
from awswrangler import ( # noqa
athena,
catalog,
chime,
cloudwatch,
data_api,
dynamodb,
emr,
exceptions,
lakeformation,
mysql,
opensearch,
postgresql,
quicksight,
s3,
secretsmanager,
sqlserver,
sts,
timestream,
)
from awswrangler.__metadata__ import __description__, __license__, __title__, __version__ # noqa
from awswrangler._config import config # noqa
__all__ = [
"athena",
"catalog",
"chime",
"cloudwatch",
"emr",
"data_api",
"dynamodb",
"exceptions",
"opensearch",
"quicksight",
"s3",
"sts",
"lakeformation",
"mysql",
"postgresql",
"secretsmanager",
"sqlserver",
"config",
"timestream",
"__description__",
"__license__",
"__title__",
"__version__",
]
try:
from awswrangler import redshift # noqa
__all__.append("redshift")
except ModuleNotFoundError:
pass
_logging.getLogger("awswrangler").addHandler(_logging.NullHandler())
| 18.292308 | 97 | 0.645921 |
9e81e27629dab37504407ca3d2140575273639a2 | 2,827 | py | Python | tests/conftest.py | bollwyvl/pathy | 36c8de95572047862557f4009103db1037816f78 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | bollwyvl/pathy | 36c8de95572047862557f4009103db1037816f78 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | bollwyvl/pathy | 36c8de95572047862557f4009103db1037816f78 | [
"Apache-2.0"
] | null | null | null | import json
import os
import shutil
import tempfile
from pathlib import Path
import pytest
from pathy import Pathy, use_fs, use_fs_cache
from pathy.clients import set_client_params
from pathy.gcs import has_gcs
has_credentials = "GCS_CREDENTIALS" in os.environ
# Which adapters to use
TEST_ADAPTERS = ["gcs", "fs"] if has_credentials and has_gcs else ["fs"]
@pytest.fixture()
def bucket() -> str:
return "pathy-tests-bucket"
@pytest.fixture()
def other_bucket() -> str:
return "pathy-tests-bucket-other"
@pytest.fixture()
def temp_folder():
tmp_dir = tempfile.mkdtemp()
yield Path(tmp_dir)
shutil.rmtree(tmp_dir)
@pytest.fixture()
def with_fs(temp_folder):
yield temp_folder
# Turn off FS adapter
use_fs(False)
def credentials_from_env():
"""Extract a credentials instance from the GCS_CREDENTIALS env variable.
You can specify the contents of a credentials JSON file or a file path
that points to a JSON file.
Raises AssertionError if the value is present but does not point to a file
or valid JSON content."""
if not has_gcs:
return None
creds = os.environ.get("GCS_CREDENTIALS", None)
if creds is None:
return None
from google.oauth2 import service_account
json_creds = None
try:
json_creds = json.loads(creds)
except json.decoder.JSONDecodeError:
pass
# If not a file path, assume it's JSON content
if json_creds is None:
credentials = service_account.Credentials.from_service_account_file(creds)
else:
fd, path = tempfile.mkstemp()
try:
with os.fdopen(fd, "w") as tmp:
tmp.write(json.dumps(json_creds))
credentials = service_account.Credentials.from_service_account_file(path)
finally:
os.remove(path)
return credentials
@pytest.fixture()
def with_adapter(adapter: str, bucket: str, other_bucket: str):
tmp_dir = None
scheme = "gs"
if adapter == "gcs":
# Use GCS
use_fs(False)
credentials = credentials_from_env()
if credentials is not None:
set_client_params("gs", credentials=credentials)
elif adapter == "fs":
# Use local file-system in a temp folder
tmp_dir = tempfile.mkdtemp()
use_fs(tmp_dir)
bucket_one = Pathy.from_bucket(bucket)
if not bucket_one.exists():
bucket_one.mkdir()
bucket_two = Pathy.from_bucket(other_bucket)
if not bucket_two.exists():
bucket_two.mkdir()
else:
raise ValueError("invalid adapter, nothing is configured")
# execute the test
yield scheme
if adapter == "fs" and tmp_dir is not None:
# Cleanup fs temp folder
shutil.rmtree(tmp_dir)
use_fs(False)
use_fs_cache(False)
| 25.93578 | 85 | 0.666431 |
d1c259c2e06613f74d133d7055d2ffa694714fa7 | 168 | py | Python | todos/admin.py | yuliiabuchko/todo | 2a553ef6177f978126bbd746113eb4a50b9e3dea | [
"MIT"
] | null | null | null | todos/admin.py | yuliiabuchko/todo | 2a553ef6177f978126bbd746113eb4a50b9e3dea | [
"MIT"
] | 1 | 2021-04-08T21:17:38.000Z | 2021-04-08T21:17:38.000Z | todos/admin.py | yuliiabuchko/todo | 2a553ef6177f978126bbd746113eb4a50b9e3dea | [
"MIT"
] | null | null | null | from django.contrib import admin
from todos.models import *
@admin.register(Event, Statistic, Entry, Task, Status, Week)
class TodoAdmin(admin.ModelAdmin):
pass
| 18.666667 | 60 | 0.761905 |
7c02b486b89981f9110dd32d63b4a286719557fa | 21,612 | py | Python | detectron2/data/detection_utils.py | IliasMAOUDJ/detectron2 | 2764af5120d29e63422884d4c9aebcacf74fea44 | [
"Apache-2.0"
] | null | null | null | detectron2/data/detection_utils.py | IliasMAOUDJ/detectron2 | 2764af5120d29e63422884d4c9aebcacf74fea44 | [
"Apache-2.0"
] | null | null | null | detectron2/data/detection_utils.py | IliasMAOUDJ/detectron2 | 2764af5120d29e63422884d4c9aebcacf74fea44 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Common data processing utilities that are used in a
typical object detection data pipeline.
"""
import logging
import numpy as np
from typing import List, Union
import pycocotools.mask as mask_util
import torch
from PIL import Image
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
Keypoints,
PolygonMasks,
RotatedBoxes,
polygons_to_bitmask,
)
from detectron2.utils.file_io import PathManager
from . import transforms as T
from .catalog import MetadataCatalog
__all__ = [
"SizeMismatchError",
"convert_image_to_rgb",
"check_image_size",
"transform_proposals",
"transform_instance_annotations",
"annotations_to_instances",
"annotations_to_instances_rotated",
"build_augmentation",
"build_transform_gen",
"create_keypoint_hflip_indices",
"filter_empty_instances",
"read_image",
]
class SizeMismatchError(ValueError):
"""
When loaded image has difference width/height compared with annotation.
"""
# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
# https://www.exiv2.org/tags.html
_EXIF_ORIENT = 274 # exif 'Orientation' tag
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def convert_image_to_rgb(image, format):
"""
Convert an image from given format to RGB.
Args:
image (np.ndarray or Tensor): an HWC image
format (str): the format of input image, also see `read_image`
Returns:
(np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
"""
if isinstance(image, torch.Tensor):
image = image.cpu().numpy()
if format == "BGR":
image = image[:, :, [2, 1, 0]]
elif format == "YUV-BT.601":
image = np.dot(image, np.array(_M_YUV2RGB).T)
image = image * 255.0
else:
if format == "L":
image = image[:, :, 0]
image = image.astype(np.uint8)
image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
return image
def _apply_exif_orientation(image):
"""
Applies the exif orientation correctly.
This code exists per the bug:
https://github.com/python-pillow/Pillow/issues/3973
with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
various methods, especially `tobytes`
Function based on:
https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
Args:
image (PIL.Image): a PIL image
Returns:
(PIL.Image): the PIL image with exif orientation applied, if applicable
"""
if not hasattr(image, "getexif"):
return image
try:
exif = image.getexif()
except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
exif = None
if exif is None:
return image
orientation = exif.get(_EXIF_ORIENT)
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90,
}.get(orientation)
if method is not None:
return image.transpose(method)
return image
def read_image(file_name, format=None):
"""
Read an image into the given format.
Will apply rotation and flipping if the image has such exif information.
Args:
file_name (str): image file path
format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
Returns:
image (np.ndarray):
an HWC image in the given format, which is 0-255, uint8 for
supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
"""
with PathManager.open(file_name, "rb") as f:
image = Image.open(f)
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = _apply_exif_orientation(image)
return convert_PIL_to_numpy(image, format)
def check_image_size(dataset_dict, image):
"""
Raise an error if the image does not match the size specified in the dict.
"""
if "width" in dataset_dict or "height" in dataset_dict:
image_wh = (image.shape[1], image.shape[0])
expected_wh = (dataset_dict["width"], dataset_dict["height"])
if not image_wh == expected_wh:
raise SizeMismatchError(
"Mismatched image shape{}, got {}, expect {}.".format(
" for image " + dataset_dict["file_name"]
if "file_name" in dataset_dict
else "",
image_wh,
expected_wh,
)
+ " Please check the width/height in your annotation."
)
# To ensure bbox always remap to original image size
if "width" not in dataset_dict:
dataset_dict["width"] = image.shape[1]
if "height" not in dataset_dict:
dataset_dict["height"] = image.shape[0]
def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
"""
Apply transformations to the proposals in dataset_dict, if any.
Args:
dataset_dict (dict): a dict read from the dataset, possibly
contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
image_shape (tuple): height, width
transforms (TransformList):
proposal_topk (int): only keep top-K scoring proposals
min_box_size (int): proposals with either side smaller than this
threshold are removed
The input dict is modified in-place, with abovementioned keys removed. A new
key "proposals" will be added. Its value is an `Instances`
object which contains the transformed proposals in its field
"proposal_boxes" and "objectness_logits".
"""
if "proposal_boxes" in dataset_dict:
# Transform proposal boxes
boxes = transforms.apply_box(
BoxMode.convert(
dataset_dict.pop("proposal_boxes"),
dataset_dict.pop("proposal_bbox_mode"),
BoxMode.XYXY_ABS,
)
)
boxes = Boxes(boxes)
objectness_logits = torch.as_tensor(
dataset_dict.pop("proposal_objectness_logits").astype("float32")
)
boxes.clip(image_shape)
keep = boxes.nonempty(threshold=min_box_size)
boxes = boxes[keep]
objectness_logits = objectness_logits[keep]
proposals = Instances(image_shape)
proposals.proposal_boxes = boxes[:proposal_topk]
proposals.objectness_logits = objectness_logits[:proposal_topk]
dataset_dict["proposals"] = proposals
def transform_instance_annotations(
annotation, transforms, image_size, *, keypoint_hflip_indices=None
):
"""
Apply transforms to box, segmentation and keypoints annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for segmentation polygons & keypoints.
If you need anything more specially designed for each data structure,
you'll need to implement your own version of this function or the transforms.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList or list[Transform]):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
Returns:
dict:
the same input dict with fields "bbox", "segmentation", "keypoints"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
if isinstance(transforms, (tuple, list)):
transforms = T.TransformList(transforms)
# bbox is 1d (per-instance bounding box)
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
# clip transformed bbox to image size
bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0)
annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if "segmentation" in annotation:
# each instance contains 1 or more polygons
segm = annotation["segmentation"]
if isinstance(segm, list):
# polygons
polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
annotation["segmentation"] = [
p.reshape(-1) for p in transforms.apply_polygons(polygons)
]
elif isinstance(segm, dict):
# RLE
mask = mask_util.decode(segm)
mask = transforms.apply_segmentation(mask)
assert tuple(mask.shape[:2]) == image_size
annotation["segmentation"] = mask
else:
raise ValueError(
"Cannot transform segmentation of type '{}'!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict.".format(type(segm))
)
if "keypoints" in annotation:
keypoints = transform_keypoint_annotations(
annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
)
annotation["keypoints"] = keypoints
return annotation
def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
"""
Transform keypoint annotations of an image.
If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
Args:
keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
Each point is represented by (x, y, visibility).
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
When `transforms` includes horizontal flip, will use the index
mapping to flip keypoints.
"""
# (N*3,) -> (N, 3)
keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
keypoints_xy = transforms.apply_coords(keypoints[:, :2])
# Set all out-of-boundary points to "unlabeled"
inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1]))
inside = inside.all(axis=1)
keypoints[:, :2] = keypoints_xy
keypoints[:, 2][~inside] = 0
# This assumes that HorizFlipTransform is the only one that does flip
do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
# Alternative way: check if probe points was horizontally flipped.
# probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
# probe_aug = transforms.apply_coords(probe.copy())
# do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
# If flipped, swap each keypoint with its opposite-handed equivalent
if do_hflip:
assert keypoint_hflip_indices is not None
keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :]
# Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0
keypoints[keypoints[:, 2] == 0] = 0
return keypoints
def annotations_to_instances(annos, image_size, mask_format="polygon"):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
target = Instances(image_size)
target.gt_boxes = Boxes(boxes)
classes = [int(obj["category_id"]) for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
try:
masks = PolygonMasks(segms)
except ValueError as e:
raise ValueError(
"Failed to use mask_format=='polygon' from the given annotations!"
) from e
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a binary segmentation mask "
" in a 2D numpy array of shape HxW.".format(type(segm))
)
# torch.from_numpy does not support array with negative stride.
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "keypoints" in annos[0]:
kpts = [obj.get("keypoints", []) for obj in annos]
target.gt_keypoints = Keypoints(kpts)
return target
def annotations_to_instances_rotated(annos, image_size):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Compared to `annotations_to_instances`, this function is for rotated boxes only
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
Containing fields "gt_boxes", "gt_classes",
if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [obj["bbox"] for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = RotatedBoxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
return target
def filter_empty_instances(
instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False
):
"""
Filter out empty instances in an `Instances` object.
Args:
instances (Instances):
by_box (bool): whether to filter out instances with empty boxes
by_mask (bool): whether to filter out instances with empty masks
box_threshold (float): minimum width and height to be considered non-empty
return_mask (bool): whether to return boolean mask of filtered instances
Returns:
Instances: the filtered instances.
tensor[bool], optional: boolean mask of filtered instances
"""
assert by_box or by_mask
r = []
if by_box:
r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
if instances.has("gt_masks") and by_mask:
r.append(instances.gt_masks.nonempty())
# TODO: can also filter visible keypoints
if not r:
return instances
m = r[0]
for x in r[1:]:
m = m & x
if return_mask:
return instances[m], m
return instances[m]
def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]:
"""
Args:
dataset_names: list of dataset names
Returns:
list[int]: a list of size=#keypoints, storing the
horizontally-flipped keypoint indices.
"""
if isinstance(dataset_names, str):
dataset_names = [dataset_names]
check_metadata_consistency("keypoint_names", dataset_names)
check_metadata_consistency("keypoint_flip_map", dataset_names)
meta = MetadataCatalog.get(dataset_names[0])
names = meta.keypoint_names
# TODO flip -> hflip
flip_map = dict(meta.keypoint_flip_map)
flip_map.update({v: k for k, v in flip_map.items()})
flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
flip_indices = [names.index(i) for i in flipped_names]
return flip_indices
def gen_crop_transform_with_instance(crop_size, image_size, instance):
"""
Generate a CropTransform so that the cropping region contains
the center of the given instance.
Args:
crop_size (tuple): h, w in pixels
image_size (tuple): h, w
instance (dict): an annotation dict of one instance, in Detectron2's
dataset format.
"""
crop_size = np.asarray(crop_size, dtype=np.int32)
bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
assert (
image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
), "The annotation bounding box is outside of the image!"
assert (
image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
), "Crop size is larger than image size!"
min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
def check_metadata_consistency(key, dataset_names):
"""
Check that the datasets have consistent metadata.
Args:
key (str): a metadata key
dataset_names (list[str]): a list of dataset names
Raises:
AttributeError: if the key does not exist in the metadata
ValueError: if the given datasets do not have the same metadata values defined by key
"""
if len(dataset_names) == 0:
return
logger = logging.getLogger(__name__)
entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
for idx, entry in enumerate(entries_per_dataset):
if entry != entries_per_dataset[0]:
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
)
logger.error(
"Metadata '{}' for dataset '{}' is '{}'".format(
key, dataset_names[0], str(entries_per_dataset[0])
)
)
raise ValueError("Datasets have different metadata '{}'!".format(key))
def build_augmentation(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
if is_train and cfg.INPUT.RANDOM_FLIP != "none":
augmentation.append(
T.RandomFlip(
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
)
)
return augmentation
build_transform_gen = build_augmentation
"""
Alias for backward-compatibility.
"""
| 35.256117 | 105 | 0.634 |
96bb1c844aed51159fdeb8c2b04d0b95ba79ca1e | 1,057 | py | Python | src/downloader_4anime/events.py | fabriciopashaj/downloader-4anime | d313059a38d53d6019279da212e9196135e24ea5 | [
"MIT"
] | null | null | null | src/downloader_4anime/events.py | fabriciopashaj/downloader-4anime | d313059a38d53d6019279da212e9196135e24ea5 | [
"MIT"
] | 1 | 2021-11-01T21:45:30.000Z | 2021-11-11T13:15:24.000Z | src/downloader_4anime/events.py | fabriciopashaj/downloader-4anime | d313059a38d53d6019279da212e9196135e24ea5 | [
"MIT"
] | null | null | null | from typing import Callable, Union, Optional
EventDescriptor = Union[str, tuple[str, int]]
EventHandler = Union[Callable, list[Callable]]
class EventEmitter(object):
def __init__(self, events: list[str] = []):
self.__events = dict(((e, []) for e in events))
def on(self, event, handler: Callable):
if event not in self:
self.__events[event] = [handler]
else:
self.__events[event].append(handler)
def emit(self, event: str, *argv, **kwargv) -> bool:
if event not in self:
return False
else:
for h in self.__events[event]:
h(*argv, **kwargv);
return True
def __contains__(self, event: str) -> bool:
return event in self.__events
def __getitem__(self, event: EventDescriptor) -> Optional[EventHandler]:
if isinstance(event, str):
return self.__events[event] if event in self else []
elif isinstance(event, tuple):
if event[0] not in self:
return None
else:
return self[event[0]][event[1]] if event[1] < len(self) else None
else:
return None
def __len__(self) -> int:
return len(self.__events)
| 30.2 | 73 | 0.685904 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.