id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3066 | """
tweet stuff in intervals
"""
import time
import datetime
import twitter
from markov_chains import german_text
from config import config_no, config_yes
MAX_TWEET_LENGTH = 280
greeting = ' Sehr geehrte/r Anstragssteller/in.'
ending = ' MfG'
num_tweets = 3
class FoiaBot:
def __init__(self, config):
self.api = twitter.Api(consumer_key=config["consumer_key"],
consumer_secret=config["consumer_secret"],
access_token_key=config["access_token"],
access_token_secret=config["access_token_secret"], sleep_on_rate_limit=True)
self.screen_name = config["screen_name"]
self.model = german_text.setup_model(config["model_path"])
self.hour_to_tweet = config["hour_to_tweet"]
def get_favorites(self):
favorites = self.api.GetFavorites(
screen_name=self.screen_name, count=200)
print(favorites)
fav_set = set([f.id for f in favorites])
return fav_set
def get_status_to_work_on(self):
favorites = self.get_favorites()
status_list = self.api.GetMentions(count=200, trim_user=True,
contributor_details=False, include_entities=False)
for status in status_list:
print(status)
if status.id in favorites:
continue
if status.in_reply_to_status_id is not None:
continue
if not status.text.startswith('@' + self.screen_name):
continue
self.post_replies(status)
def post_replies(self, status):
tweets = self.create_tweets()
print(tweets)
success = True
reply_to_status_id = status.id
for tweet in tweets:
response = self.api.PostUpdate(tweet, in_reply_to_status_id=reply_to_status_id, auto_populate_reply_metadata=True,
exclude_reply_user_ids=False, trim_user=True, verify_status_length=False)
if response is None:
success = False
break
else:
reply_to_status_id = response.id
if success:
self.api.CreateFavorite(status=status)
def generate_sentence(self, tweet_text, chars_left, set_limit=False):
max_length = 150
if set_limit:
max_length = chars_left
new_sent = self.model.make_short_sentence(max_length, tries=100)
if new_sent is not None and len(new_sent) < chars_left:
tweet_text += ' ' + new_sent
return tweet_text
# https://stackoverflow.com/questions/7703865/going-from-twitter-date-to-python-datetime-date
def get_date_from_twitter_string(self, created_at):
x = time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y')
return datetime.datetime.fromtimestamp(time.mktime(x))
def tweet_once_a_day(self):
now = datetime.datetime.now()
print(now.hour)
if now.hour == self.hour_to_tweet:
last_status_list = self.api.GetUserTimeline(screen_name=self.screen_name, count=1,
include_rts=False, trim_user=True, exclude_replies=True)
print(last_status_list)
if last_status_list is None:
return
if len(last_status_list) == 0:
self.post_single_tweet()
if len(last_status_list) == 1:
last_status = last_status_list[0]
created_at_date = self.get_date_from_twitter_string(
last_status.created_at)
time_diff = now - created_at_date
print('time_diff', time_diff)
time_diff_hours = time_diff.seconds / 3600 + time_diff.days * 24
print(time_diff_hours)
if time_diff_hours > 20: # something is broken with the date but whatever
self.post_single_tweet()
def post_single_tweet(self):
tweet_text = self.generate_single_tweet_text()
response = self.api.PostUpdate(tweet_text, verify_status_length=False)
def generate_single_tweet_text(self):
tweet_text = ""
while True:
chars_left = MAX_TWEET_LENGTH - len(tweet_text)
chars_left -= 1 # for the space
if chars_left < 20:
break
if chars_left < 70:
tweet_text = self.generate_sentence(
tweet_text, chars_left, True)
else:
tweet_text = self.generate_sentence(
tweet_text, chars_left)
return tweet_text
def create_tweets(self):
tweets = []
for i in range(num_tweets):
tweet_text = f'{i + 1}/{num_tweets}'
if i == 0:
tweet_text += greeting
while True:
chars_left = MAX_TWEET_LENGTH - \
len(tweet_text) - 1 # because of space
# ensure space for the ending
if i + 1 == num_tweets:
chars_left -= len(ending)
if chars_left < 20:
# at ending
if i + 1 == num_tweets:
tweet_text += ending
break
if chars_left < 70:
tweet_text = self.generate_sentence(
tweet_text, chars_left, True)
else:
tweet_text = self.generate_sentence(
tweet_text, chars_left)
tweets.append(tweet_text)
return tweets
def run(self):
self.get_status_to_work_on()
def main():
print('main called')
no_bot = FoiaBot(config_no)
print('after setting up no bot')
yes_bot = FoiaBot(config_yes)
print('after setting up yes bot')
no_bot.run()
print('after running no bot')
yes_bot.run()
print('after running yes bot')
no_bot.tweet_once_a_day()
yes_bot.tweet_once_a_day()
print('after tweet once a day')
def lambda_handler(event, context):
print('handler called')
main()
print('handler about to finish')
# if __name__ == '__main__':
# main()
| StarcoderdataPython |
9767860 | from .templates import ContextControl, ContextValue, Data, QuickReply
from .buttons import BlockButton, MessageButton, PhoneButton, WeblinkButton, ShareButton
from .simples import SimpleText, SimpleImage
from .cards import BasicCard, CommerceCard, ListCard, Carousel, ListItem
from .commons import CarouselHeader, Link, ListItem, Thumbnail, Profile, Social
from .builder import SkillResponseBuilder | StarcoderdataPython |
1690480 | import os # Import os and sys python modules
from api import create_app # import create_app fxn for api(local) module
config_name = os.getenv("APP_SETTINGS") # Get the app settings defined in the .env file
app = create_app(config_name) # defining the configuration to be used
if __name__ == "__main__": # the interpreter inserts this at the top of the module when run as the main program.
app.run()
| StarcoderdataPython |
6445920 | <filename>tests/test_objects.py
import unittest
@unittest.skip("showing class skipping")
class MySkippedTestCase(unittest.TestCase):
def test_not_run(self):
pass | StarcoderdataPython |
122694 | <gh_stars>1-10
import numpy as np
import os
import csv
import librosa
from scipy.signal import lfilter
import matplotlib.pyplot as plt
import pandas as pd
attribute_file = 'attribute/train_zsl_linear.csv'
dataroot = 'train_zsl_linear'
if os.path.isdir(dataroot) == False:
os.makedirs(dataroot)
data = pd.read_csv(attribute_file, delimiter=',')
data = np.array(data)
print(data.shape)
seen_features = []
unseen_features = []
seen_target = []
unseen_target = []
seen_att = []
unseen_att = []
test_seen_loc = []
test_unseen_loc = []
train_loc = []
trainval_loc = []
val_loc = []
seen_loc = []
unseen_loc = []
label = []
att = []
features = []
count = 0
image_files = []
test_seen_label = []
trainval_label = []
for line in range(data.shape[0]):
if data[line, 2] == -1:
pass
else:
count += 1
file_name = data[line, 0]
'''x, sr = librosa.load(file_name, sr=None, mono=True)
# parameters
unit_frequency = 20
slicing_num = 1
n_fft = int(sr / slicing_num / unit_frequency)
hop_length = int(n_fft / 5)
# zero mean centering, slicing
x = x - sum(x) / len(x)
# pre emphasising with digital filter
x_filter = lfilter([1, -0.95], 1, x)
mel = librosa.feature.melspectrogram(y=x_filter, sr=sr, n_fft=n_fft, hop_length=hop_length,
n_mels=120, norm=np.inf, fmax=10000, fmin=0)
mel = mel / np.max(np.max(mel))'''
print(line)
#features.append(mel)
if data[line, 2] == 1:
#seen_features.append(mel)
seen_target.append(data[line, 1])
seen_loc.append(line)
#seen_att.append(data[line, 4:].astype(float))
elif data[line, 2] == 0:
#unseen_features.append(mel)
unseen_target.append(data[line, 1])
unseen_loc.append(line)
#print(len(seen_features), len(unseen_features))
label.append(data[line, 1])
if line % 50 == 1:
if data[line, 2] == 1:
seen_att.append(data[line, 4:].astype(float))
#seen_target.append(data[line, 1])
elif data[line, 2] == 0:
unseen_att.append(data[line, 4:].astype(float))
#unseen_target.append(data[line, 1])
att.append(data[line, 4:].astype(float))
if data[line, 3] == 0:
test_unseen_loc.append(count)
elif data[line, 3] == 1:
if count % 5 == 1:
test_seen_loc.append(count)
test_seen_label.append(data[line, 1])
else:
train_loc.append(count)
trainval_loc.append(count)
trainval_label.append(data[line, 1])
elif data[line, 3] == 2:
if count % 5 == 1:
test_seen_loc.append(count)
test_seen_label.append(data[line, 1])
else:
val_loc.append(count)
trainval_loc.append(count)
trainval_label.append(data[line, 1])
image_files.append(data[line, 0])
###### Feature #######
seen_features = np.array(seen_features)
unseen_features = np.array(unseen_features)
print(seen_features.shape, unseen_features.shape)
print(seen_features.shape)
SNU36_mel_seen_dir = dataroot + '/SNU36_mel_10K_120_0.95_seen.npy'
SNU36_mel_unseen_dir = dataroot + '/SNU36_mel_10K_120_0.95_unseen.npy'
np.save(SNU36_mel_seen_dir, seen_features)
np.save(SNU36_mel_unseen_dir, unseen_features)
##### for pre-training #####
seen_target = np.array(seen_target).astype(float)
unseen_target = np.array(unseen_target).astype(float)
num = np.unique(np.hstack((seen_target, unseen_target)), axis=0)
seen_num = np.unique(seen_target, axis=0)
unseen_num = np.unique(unseen_target, axis=0)
seen_encoding = np.zeros((seen_target.shape[0]))
unseen_encoding = np.zeros((unseen_target.shape[0]))
for i in range(seen_target.shape[0]):
for j in range(seen_num.shape[0]):
if seen_target[i] == seen_num[j]:
seen_encoding[i] = j
break
for i in range(unseen_target.shape[0]):
for j in range(num.shape[0]):
if unseen_target[i] == num[j]:
unseen_encoding[i] = j
break
seen_target_dir = dataroot + '/seen_target.npy'
unseen_target_dir = dataroot + '/unseen_target.npy'
seen_class_dir = dataroot + '/seen_class.npy'
unseen_class_dir = dataroot + '/unseen_class.npy'
np.save(seen_target_dir, seen_target)
np.save(unseen_target_dir, unseen_target)
np.save(seen_class_dir, seen_encoding)
np.save(unseen_class_dir, unseen_encoding)
###### location ######
test_seen_loc = np.array(test_seen_loc).astype(float)
test_unseen_loc = np.array(test_unseen_loc).astype(float)
train_loc = np.array(train_loc).astype(float)
trainval_loc = np.array(trainval_loc).astype(float)
val_loc = np.array(val_loc).astype(float)
image_files = np.array(image_files)
test_seen_loc_dir = dataroot + '/test_seen_loc.npy'
test_unseen_loc_dir = dataroot + '/test_unseen_loc.npy'
train_loc_dir = dataroot + '/train_loc.npy'
trainval_loc_dir = dataroot + '/trainval_loc.npy'
val_loc_dir = dataroot + '/val_loc.npy'
image_files_dir = dataroot + '/image_files.npy'
np.save(test_seen_loc_dir, test_seen_loc)
np.save(test_unseen_loc_dir, test_unseen_loc)
np.save(train_loc_dir, train_loc)
np.save(trainval_loc_dir, trainval_loc)
np.save(val_loc_dir, val_loc)
np.save(image_files_dir, image_files)
###### features #######
'''features = np.array(features).astype(float)
features = features.T
print(features.shape)
np.save('data_for_fg/features.npy', features)'''
'''random_seed = 462
val_split = 0.2
shuffle_dataset = True
##### train, test locations #####
dataset_size = len(seen_loc)
split = int(np.floor(val_split * dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(seen_loc)
train_indices, val_indices = seen_loc[split:], seen_loc[:split]
train_indices = np.array(train_indices).astype(float) + 1
val_indices = np.array(val_indices).astype(float) + 1
unseen_loc = np.array(unseen_loc).astype(float) + 1
np.save('train_loc.npy', train_indices)
np.save('test_seen_loc.npy', val_indices)
np.save('test_unseen_loc.npy', unseen_loc)'''
#f.close()
##### For pre-training #####
trainval_label = np.array(trainval_label)
test_seen_label = np.array(test_seen_label)
trainval_uni = np.unique(trainval_label)
test_seen_uni = np.unique(test_seen_label)
print(trainval_label.shape[0], test_seen_label.shape[0])
trainval_encoding = np.zeros((trainval_label.shape[0]))
test_seen_encoding = np.zeros((test_seen_label.shape[0]))
for j in range(trainval_uni.shape[0]):
for i in range(trainval_label.shape[0]):
if trainval_label[i] == trainval_uni[j]:
trainval_encoding[i] = j
for ii in range(test_seen_label.shape[0]):
if test_seen_label[ii] == trainval_uni[j]:
test_seen_encoding[ii] = j
print(trainval_encoding.shape, test_seen_encoding.shape)
np.save('data_for_fg/trainval_class.npy', trainval_encoding)
np.save('data_for_fg/test_seen_class.npy', test_seen_encoding)
##### category number, target and One-hot #####
seen_one_hot = np.zeros((seen_target.shape[0], 59))
unseen_one_hot = np.zeros((unseen_target.shape[0], 59))
for i in range(seen_target.shape[0]):
for j in range(59):
if seen_target[i] == j + 1:
seen_one_hot[i, j] = 1
break
for i in range(unseen_target.shape[0]):
for j in range(59):
if unseen_target[i] == j + 1:
unseen_one_hot[i, j] = 1
break
print(seen_one_hot.shape, unseen_one_hot.shape)
np.save('seen_att_one_hot_unique.npy', seen_one_hot)
np.save('unseen_att_one_hot_unique.npy', unseen_one_hot)
seen_att = np.array(seen_att)
unseen_att = np.array(unseen_att)
print(seen_att.shape, unseen_att.shape)
np.save('seen_att_spherical_unique.npy', seen_att)
np.save('unseen_att_spherical_unique.npy', unseen_att)
label = np.array(label).astype(float)
print(label.shape)
np.save('labels', label)
##### attribute #####
att_dir = dataroot + '/att.npy'
data_name = dataroot.split('_')
if data_name[-1] == 'oh':
vec_size = 21
else:
vec_size = 7
in_att1 = np.zeros([vec_size, 13])
in_att2 = np.zeros([vec_size, 13])
for i in range(13):
in_att1[:, i] = att[i]
in_att1[0, i], in_att1[4, i] = 0, 1
att.insert(i + 156, in_att1[:, i])
in_att2[:, i] = att[i + 26]
in_att2[0, i], in_att2[4, i] = 0, 1
att.append(in_att2[:, i])
att = np.array(att).astype(float)
att = att.T
print(att.shape)
np.save(att_dir, att)
##### all feature ######
mel_dir = dataroot + '/mel.npy'
features = np.array(features).astype(float)
np.save(mel_dir, features)
##### label #####
label_dir = dataroot + '/labels.npy'
label = np.array(label).astype(float)
'''label_uni = np.unique(label)
labels = []
for i in range(label.shape[0]):
for j in range(label_uni.shape[0]):
if label[i] == label_uni[j]:
labels.append(j + 1)
labels = np.array(labels).astype(float)'''
labels = np.array(label).astype(float)
np.save(label_dir, labels)
print(labels)
| StarcoderdataPython |
4935365 | <filename>ivf/scene/layer.py
# -*- coding: utf-8 -*-
## @package ivf.scene.layer
#
# ivf.scene.layer utility package.
# @author tody
# @date 2016/01/27
import numpy as np
from ivf.scene.data import Data
class Layer(Data):
## Constructor
def __init__(self, name="", color=(1.0, 0.0, 0.0, 0.4), mask=None):
super(Layer, self).__init__()
self._mask = mask
self._name = name
self._color = color
def name(self):
return self._name
def color(self):
return self._color
def mask(self):
return self._mask
## dictionary data for writeJson method.
def _dataDict(self):
data = {}
data["name"] = self._name
data["color"] = self._color
return data
## set dictionary data for loadJson method.
def _setDataDict(self, data):
self._name = data["name"]
self._color = data["color"]
class LayerSet(Data):
## Constructor
def __init__(self):
super(LayerSet, self).__init__()
self._layers = []
def layers(self):
return self._layers
def clear(self):
self._layers = []
def addLayer(self, layer):
self._layers.append(layer) | StarcoderdataPython |
3418270 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Implements a simple service using cx_Freeze.
See below for more information on what methods must be implemented and how they
are called.
"""
import threading
class Handler(object):
# no parameters are permitted; all configuration should be placed in the
# configuration file and handled in the Initialize() method
def __init__(self):
self.stopEvent = threading.Event()
self.stopRequestedEvent = threading.Event()
# called when the service is starting
def Initialize(self, configFileName):
pass
# called when the service is starting immediately after Initialize()
# use this to perform the work of the service; don't forget to set or check
# for the stop event or the service GUI will not respond to requests to
# stop the service
def Run(self):
self.stopRequestedEvent.wait()
self.stopEvent.set()
# called when the service is being stopped by the service manager GUI
def Stop(self):
self.stopRequestedEvent.set()
self.stopEvent.wait()
| StarcoderdataPython |
6705673 | <filename>{{cookiecutter.project_type}}/__cc_fastAPI/{{cookiecutter.directory_name}}/app/src/api/endpoints/user.py
import datetime as dt
from typing import Any, List
from fastapi import APIRouter, Depends, HTTPException
# from fastapi.encoders import jsonable_encoder
from sqlalchemy.orm import Session
from app.src.db.manager import get_session
from app.src import models
from app.src.api import crud
from app.src.common import security, utils
router = APIRouter()
# define and inherit the base model for the CRUD operations over products
crud_base = crud.base(models.User)
# create user
# get all users
# get single user
# update user
# delete user
# make user admin
@router.get("/", response_model=List[models.UserDataModel])
async def read_users(
*,
db: Session = Depends(get_session),
skip: int = 0,
limit: int = 100,
current_user: models.UserBase = Depends(security.get_current_admin_user),
) -> Any:
"""
Retrieve all users
"""
start_time = dt.datetime.now()
try:
users = crud.user.get_multi(db, skip=skip, limit=limit)
utils.profiling_api("user:get:all", start_time, "info")
if not users:
raise HTTPException(status_code=404, detail="No users found")
except Exception as e:
utils.profiling_api("user:get:all", start_time, "error")
raise HTTPException(
status_code=404, detail=f"Impossible to get the list of all users: {e}"
)
return users
@router.get("/{user_id}", response_model=models.UserDataModel)
async def read_single_user(
*,
db: Session = Depends(get_session),
user_id: int,
current_user: models.UserBase = Depends(security.get_current_user),
) -> Any:
start_date = dt.datetime.now()
user = crud_base.get(db, id=user_id)
if not user:
utils.profiling_api("user:get:single:id", start_date, "error")
raise HTTPException(status_code=404, detail="User not found")
utils.profiling_api("user:get:single:id", start_date, "info")
return user
@router.get("/info/{email}", response_model=models.UserDataModel)
async def read_single_user_by_mail(
*,
db: Session = Depends(get_session),
email: str,
current_user: models.UserBase = Depends(security.get_current_user),
) -> Any:
start_date = dt.datetime.now()
user = crud.user.get_by_email(db, email=email)
if not user:
utils.profiling_api("user:get:single:email", start_date, "error")
raise HTTPException(status_code=404, detail="User not found")
utils.profiling_api("user:get:single:email", start_date, "info")
return user
@router.get("/info/{username}", response_model=models.UserDataModel)
async def read_single_user_by_username(
*,
db: Session = Depends(get_session),
username: str,
current_user: models.UserBase = Depends(security.get_current_user),
) -> Any:
start_date = dt.datetime.now()
user = crud.user.get_by_username(db, username=username)
if not user:
utils.profiling_api("user:get:single:username", start_date, "error")
raise HTTPException(status_code=404, detail="User not found")
utils.profiling_api("user:get:single:username", start_date, "info")
return user
@router.post("/", response_model=models.UserDataModel)
async def create_user(
*,
db: Session = Depends(get_session),
user_in: models.UserCreate,
current_user: models.UserBase = Depends(security.get_current_admin_user),
) -> Any:
start_date = dt.datetime.now()
try:
user = crud.user.create(db, obj_in=user_in)
utils.profiling_api("user:create", start_date, "info")
return user
except Exception as message:
utils.profiling_api("user:create", start_date, "error")
raise HTTPException(
status_code=404, detail=f"Impossible to add a new user: {message}"
)
@router.put("/update/me", response_model=models.UserDataModel)
async def update_user_me(
*,
db: Session = Depends(get_session),
user_in: models.UserUpdate,
current_user: models.UserBase = Depends(security.get_current_user),
) -> Any:
start_date = dt.datetime.now()
# current_user_data = jsonable_encoder(current_user)
# user_existing = models.UserUpdate(**current_user_data)
# user_data = user_in.dict(exclude_unset=True)
# for key, value in user_data.items():
# setattr(current_user_data, key, value)
try:
user = crud_base.update(db, db_obj=current_user, obj_in=user_in)
utils.profiling_api("user:update", start_date, "info")
if not user:
utils.profiling_api("user:update", start_date, "error")
raise HTTPException(
status_code=404,
detail="Impossible to update the user",
)
except Exception as message:
utils.profiling_api("user:update", start_date, "error")
raise HTTPException(
status_code=400,
detail=f"Impossible to update the user: {message}",
)
| StarcoderdataPython |
8101851 | <reponame>l3p-cv/lost_ds<gh_stars>1-10
from shapely.geometry import Point as Pt, MultiPoint
import numpy as np
import cv2
from lost_ds.geometry.api import Geometry
from lost_ds.vis.geometries import draw_points
class Point(Geometry):
def __init__(self):
super().__init__()
def to_shapely(self, data):
return Pt(data)
def segmentation(self, segmentation, color, anno_data, anno_format,
anno_style, radius, **kwargs):
anno_data = self.to_abs(anno_data, anno_format, segmentation.shape)
if radius is None:
return segmentation
cv2.circle(segmentation, tuple(anno_data.astype(np.int32)), radius,
color, cv2.FILLED)
return segmentation
def crop(self, crop_pos, data, **kwargs):
xmin, ymin, xmax, ymax = crop_pos.bounds
point = self.to_shapely(data)
intersection = point.intersection(crop_pos)
if intersection.is_empty:
return [np.nan]
new_points = []
if isinstance(intersection, MultiPoint):
new_points = list(intersection.geoms)
else:
new_points = [intersection]
for i, point in enumerate(new_points):
new_point = np.array(point.coords) - [xmin, ymin]
new_points[i] = new_point.squeeze()
return new_points
def validate(self, data):
return len(data.shape)==1 and len(data)==2
def _draw(self, img, data, style, text, color, line_thickness, fontscale,
radius):
if line_thickness is None:
line_thickness = -1
return draw_points(img, data, text, color, radius, line_thickness,
fontscale) | StarcoderdataPython |
3472037 | <filename>test/mocks.py
import json
import shutil
import os
import cdsapi
class CDSClientMock:
"""A simple mock of the cdsapi.Client class
This mock class uses predefined requests from on-disk JSON files. When
the retrieve method is called, it checks if the request matches one of
these known requests, and if so returns the associated result (also read
from disk). If the request cannot be matched, an exception is raised.
This class implements a minimal subset of the functionality of the
actual cdsapi.Client class, just sufficient to allow the unit tests to run.
The url and key arguments are not perfectly mocked: in the actual CDS API
client, the environment variables are read as part of the default
argument definitions. This is hard to test, since default arguments are
only evaluated once, when the method is defined. In the mock,
the parameters have None as a default, which is replaced (if possible) in
the method body with an environment variable read at runtime.
"""
def __init__(self, url=None, key=None):
class Session:
def close(self):
pass
self.session = Session()
if url is None:
url = os.environ.get('CDSAPI_URL')
if key is None:
key = os.environ.get('CDSAPI_KEY')
dotrc = os.environ.get('CDSAPI_RC', os.path.expanduser('~/.cdsapirc'))
if url is None or key is None:
if os.path.exists(dotrc):
config = cdsapi.api.read_config(dotrc)
if key is None:
key = config.get('key')
if url is None:
url = config.get('url')
if url is None or key is None:
raise Exception(f'Missing/incomplete configuration file: {dotrc}')
self.url = url
self.key = key
resource_path = os.path.join(os.path.dirname(__file__),
'mock_results')
# request_map is a list because dicts can't be hashed in Python, and
# it's not worth introducing a dependency on frozendict just for this.
self.request_map = []
for d in os.listdir(resource_path):
dir_path = os.path.join(resource_path, d)
with open(os.path.join(dir_path, 'request.json'), 'r') as fh:
request = json.load(fh)
self.request_map.append((request, os.path.join(dir_path, 'result')))
def _get_result(self, request):
for canned_request, canned_result in self.request_map:
if request == canned_request:
return canned_result
raise KeyError('Request not recognized')
def retrieve(self, dataset_name, params, file_path):
params_with_name = {**dict(_dataset_name=dataset_name),
**params}
shutil.copy2(self._get_result(params_with_name), file_path)
| StarcoderdataPython |
3419788 | <reponame>team-oss/dspg20oss
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 20 19:47:13 2020
@author: dnb3k
"""
lesserCompanies=multiCoWorkerTable.iloc[15000:-1]
lesserCompanies['guesses']=""
import difflib
df['Name_r'] = df.Name_x.map(lambda x: (difflib.get_close_matches(x, dfF.Name)[:1] or [None])[0])
df2.index = df2.index.map(lambda x: difflib.get_close_matches(x, multiCoWorkerTable.index)[0])
for iAttempts in range(len(lesserCompanies.index)):
currentNameRange=range(0,1000+iAttempts)
lesserCompanies['guesses'].iloc[iAttempts]=difflib.get_close_matches(lesserCompanies['company'].iloc[iAttempts],multiCoWorkerTable['company'].iloc[currentNameRange],cutoff=0.8)
lesserCompanies['guesses'].iloc[iAttempts] | StarcoderdataPython |
203838 | <filename>sentry/commands/manage.py<gh_stars>0
"""
sentry.commands.manage
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from sentry.commands.utils import consume_args
@consume_args
def manage(args):
from django.core.management import ManagementUtility
utility = ManagementUtility(args)
utility.execute()
def update_migrations():
"""
Creates schemamigrations for sentry.
"""
from django.core.management import ManagementUtility
args = 'manage.py schemamigration sentry --auto'.split(' ')
utility = ManagementUtility(args)
utility.execute()
| StarcoderdataPython |
12801463 | <gh_stars>0
class Person(): # 创建一个类
def __init__(self, name): # 定义初始化信息。
self.name = name
__slots__ = ('name', 'age')
li = Person('李') # 实例化Person('李'),给变量li
li.age = 20 # 再程序没有停止下,将实例属性age传入。动态语言的特点。
Person.age = None # 这里使用类名来创建一个属性age给类,默认值是None。Python支持的动态属性添加。
li.ss = 222 | StarcoderdataPython |
6608611 | while 1:
try:
print(input())
except:
break; | StarcoderdataPython |
5026813 | <filename>usaspending_api/common/retrieve_file_from_uri.py
import boto3
import io
import requests
import tempfile
import urllib
from shutil import copyfile
from django.conf import settings
VALID_SCHEMES = ("http", "https", "s3", "file", "")
SCHEMA_HELP_TEXT = (
"Internet RFC on Relative Uniform Resource Locators "
+ "Format: scheme://netloc/path;parameters?query#fragment "
+ "List of supported schemes: "
+ ", ".join(["{}://".format(s) for s in VALID_SCHEMES if s])
)
class SpooledTempFileIOBase(tempfile.SpooledTemporaryFile, io.IOBase):
"""Improving the current implementation of standard library's
SpooledTemporaryFile class so that it mimics IOBase abstract. This is a
documented issue (https://bugs.python.org/issue26175) and has an open PR
(https://github.com/python/cpython/pull/3249) to fix the issue.
Inheriting the two classes and adding a few functions gets this class
_close_ to what is needs to be. If future issues appear it might be better
to bite the bullet and only use tempfile.NamedTemporaryFile()
"""
def readable(self):
return self._file.readable()
def readinto(self, b):
return self._file.readinto(b)
def writable(self):
return self._file.writable()
def seekable(self):
return self._file.seekable()
def seek(self, *args):
return self._file.seek(*args)
def truncate(self, size=None):
if size is None:
return self._file.truncate()
if size > self._max_size:
self.rollover()
return self._file.truncate(size)
class RetrieveFileFromUri:
def __init__(self, ruri):
self.ruri = ruri # Relative Uniform Resource Locator
self.parsed_url_obj = urllib.parse.urlparse(ruri)
if self.parsed_url_obj.scheme not in VALID_SCHEMES:
msg = "Scheme '{}' isn't supported. Try one of these: {}"
raise NotImplementedError(msg.format(self.parsed_url_obj.scheme, VALID_SCHEMES))
def get_file_object(self, text=False):
"""
return a file object (aka file handler) to either:
the local file,
a temporary file that was loaded from the pulled external file
Recommendation is to use this method as a context manager
"""
if self.parsed_url_obj.scheme == "s3":
return self._handle_s3(text)
elif self.parsed_url_obj.scheme.startswith("http"):
return self._handle_http(text)
elif self.parsed_url_obj.scheme in ("file", ""):
return self._handle_file(text)
else:
raise NotImplementedError("No handler for scheme: {}!".format(self.parsed_url_obj.scheme))
def copy(self, dest_file_path):
"""
create a copy of the file and place at "dest_file_path" which
currently must be a file system path (not s3 or http).
"""
if self.parsed_url_obj.scheme == "s3":
file_path = self.parsed_url_obj.path[1:] # remove leading '/' character
boto3_s3 = boto3.resource("s3", region_name=settings.USASPENDING_AWS_REGION)
s3_bucket = boto3_s3.Bucket(self.parsed_url_obj.netloc)
s3_bucket.download_file(file_path, dest_file_path)
elif self.parsed_url_obj.scheme.startswith("http"):
urllib.request.urlretrieve(self.ruri, dest_file_path)
elif self.parsed_url_obj.scheme in ("file", ""):
copyfile(self.ruri, dest_file_path)
else:
raise NotImplementedError("No handler for scheme: {}!".format(self.parsed_url_obj.scheme))
def copy_to_temporary_file(self):
"""Sometimes it is super helpful to just have a nice, concrete, local file to work with."""
with tempfile.NamedTemporaryFile() as tf:
path = tf.name
self.copy(path)
return path
def _handle_s3(self, text):
file_path = self.parsed_url_obj.path[1:] # remove leading '/' character
boto3_s3 = boto3.resource("s3", region_name=settings.USASPENDING_AWS_REGION)
s3_bucket = boto3_s3.Bucket(self.parsed_url_obj.netloc)
f = SpooledTempFileIOBase() # Must be in binary mode (default)
s3_bucket.download_fileobj(file_path, f)
if text:
byte_str = f._file.getvalue()
f = SpooledTempFileIOBase(mode="r")
f.write(byte_str.decode())
f.seek(0) # go to beginning of file for reading
return f
def _handle_http(self, text):
r = requests.get(self.ruri, allow_redirects=True)
f = SpooledTempFileIOBase(mode="w" if text else "w+b")
f.write(r.text if text else r.content)
f.seek(0) # go to beginning of file for reading
return f
def _handle_file(self, text):
if self.parsed_url_obj == "file":
file_path = self.parsed_url_obj.netloc
else: # if no schema provided, treat it as a relative file path
file_path = self.parsed_url_obj.path
return open(file_path, "r" if text else "rb")
| StarcoderdataPython |
1948481 | import socket
import sys
import time
port = int(sys.argv[1])
conn = socket.socket()
conn.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
conn.bind( ('localhost', port))
conn.listen(500)
while 1:
client = conn.accept()[0]
client.recv(32 * 1024)
client.send(
'HTTP/1.0 200 OK\r\n'
'Content-type: text/plain\r\n'
'Conent-length:8\r\n\r\n' +
str(time.time()))
client.close()
| StarcoderdataPython |
6554158 | <filename>ros/src/twist_controller/test_twist_controller.py
from unittest import TestCase
from twist_controller import Controller
import numpy as np
PKG = 'twist_controller'
class TestTwistController(TestCase):
def setUp(self):
v_mass = 1736.35
decel_limit = -5.0
wheel_radius = 0.2413
wheel_base = 2.8498
steer_ratio = 14.8
max_lat_accel = 3.0
max_steer_angle = 8.0
# self.controller = Controller(v_mass, decel_limit, wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle)
self.current_velocity = []
self.linear_velocity = []
def test_twist(self):
pass
if __name__ == '__main__':
import rosunit
rosunit.unitrun(PKG, 'test_twist_controller', TestTwistController)
| StarcoderdataPython |
1618546 | <gh_stars>1-10
class Loader:
def __init__(self, sample_rate, duration, mono):
self.sample_rate = sample_rate
self.duration = duration
self.mono = mono
def load(self, file_path):
signal = librosa.load(file_path,
sr=self.sample_rate,
duration=self.duration,
mono=self.mono)[0]
return signal
class Padder:
def __init__(self, mode="constant"):
self.mode = mode
def left_pad(self, array, num_missing_items):
padded_array = np.pad(array,
(num_missing_items, 0),
mode=self.mode)
return padded_array
def right_pad(self, array, num_missing_items):
padded_array = np.pad(array,
(0, num_missing_items),
mode=self.mode)
return padded_array
class LogSpectrogramExtractor:
def __init__(self, frame_size, hop_length):
self.frame_size = frame_size
self.hop_length = hop_length
def extract(self, signal):
stft = librosa.stft(signal,
n_fft=self.frame_size,
hop_length=self.hop_length)[:-1]
spectrogram = np.abs(stft)
log_spectrogram = librosa.amplitude_to_db(spectrogram)
return log_spectrogram
class MinMaxNormaliser:
def __init__(self, min_val, max_val):
self.min = min_val
self.max = max_val
def normalise(self, array):
norm_array = (array - array.min()) / (array.max() - array.min())
norm_array = norm_array * (self.max - self.min) + self.min
return norm_array
def denormalise(self, norm_array, original_min, original_max):
array = (norm_array - self.min) / (self.max - self.min)
array = array * (original_max - original_min) + original_min
return array
class Saver:
def __init__(self, feature_save_dir, min_max_values_save_dir):
self.feature_save_dir = feature_save_dir
self.min_max_values_save_dir = min_max_values_save_dir
def save_feature(self, feature, file_path):
save_path = self._generate_save_path(file_path)
np.save(save_path, feature)
return save_path
def save_min_max_values(self, min_max_values):
save_path = os.path.join(self.min_max_values_save_dir,
"min_max_values.pkl")
self._save(min_max_values, save_path)
@staticmethod
def _save(data, save_path):
with open(save_path, "wb") as f:
pickle.dump(data, f)
def _generate_save_path(self, file_path):
file_name = os.path.split(file_path)[1]
save_path = os.path.join(self.feature_save_dir, file_name + ".npy")
return save_path
class PreprocessingPipeline:
def __init__(self):
self.padder = None
self.extractor = None
self.normaliser = None
self.saver = None
self.min_max_values = {}
self._loader = None
self._num_expected_samples = None
@property
def loader(self):
return self._loader
@loader.setter
def loader(self, loader):
self._loader = loader
self._num_expected_samples = int(loader.sample_rate * loader.duration)
def process(self, audio_files_dir):
for root, _, files in os.walk(audio_files_dir):
for file in tqdm(files):
file_path = os.path.join(root, file)
self._process_file(file_path)
# print(f"Processed file {file_path}")
self.saver.save_min_max_values(self.min_max_values)
def _process_file(self, file_path):
signal = self.loader.load(file_path)
if self._is_padding_necessary(signal):
signal = self._apply_padding(signal)
feature = self.extractor.extract(signal)
norm_feature = self.normaliser.normalise(feature)
save_path = self.saver.save_feature(norm_feature, file_path)
self._store_min_max_value(save_path, feature.min(), feature.max())
def _is_padding_necessary(self, signal):
if len(signal) < self._num_expected_samples:
return True
return False
def _apply_padding(self, signal):
num_missing_samples = self._num_expected_samples - len(signal)
padded_signal = self.padder.right_pad(signal, num_missing_samples)
return padded_signal
def _store_min_max_value(self, save_path, min_val, max_val):
self.min_max_values[save_path] = {
"min": min_val,
"max": max_val
} | StarcoderdataPython |
6509926 | #!/usr/bin/env runaiida
# -*- coding: utf-8 -*-
from __future__ import print_function
from aiida.orm.data.bool import Bool
from aiida.orm.data.float import Float
from aiida.orm.data.int import Int
from aiida.work import run
from complex_parent import ComplexParentWorkChain
if __name__ == '__main__':
result = run(
ComplexParentWorkChain,
a=Int(1),
child_1=dict(b=Float(1.2), c=Bool(True)),
child_2=dict(b=Float(2.3), c=Bool(False))
)
print(result)
# {
# u'e': 1.2,
# u'child_1.d': 1, u'child_1.f': True,
# u'child_2.d': 1, u'child_2.f': False
# }
| StarcoderdataPython |
3324115 | <gh_stars>100-1000
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import gettext_lazy as _
from .fields import TreeNodeForeignKey
from .query import TreeQuerySet
class TreeNode(models.Model):
parent = TreeNodeForeignKey(
"self",
blank=True,
null=True,
on_delete=models.CASCADE,
verbose_name=_("parent"),
related_name="children",
)
objects = TreeQuerySet.as_manager()
class Meta:
abstract = True
def ancestors(self, **kwargs):
"""
Returns all ancestors of the current node
See ``TreeQuerySet.ancestors`` for details and optional arguments.
"""
return self.__class__._default_manager.ancestors(self, **kwargs)
def descendants(self, **kwargs):
"""
Returns all descendants of the current node
See ``TreeQuerySet.descendants`` for details and optional arguments.
"""
return self.__class__._default_manager.descendants(self, **kwargs)
def clean(self):
"""
Raises a validation error if saving this instance would result in loops
in the tree structure
"""
super(TreeNode, self).clean()
if (
self.parent_id
and self.pk
and (
self.__class__._default_manager.ancestors(
self.parent_id, include_self=True
)
.filter(pk=self.pk)
.exists()
)
):
raise ValidationError(_("A node cannot be made a descendant of itself."))
| StarcoderdataPython |
4940805 | <filename>tensorbackends/extensions/rsvd.py
from ..utils.svd_absorb_s import svd_absorb_s
def rsvd(backend, a, rank, niter, oversamp, absorb_s):
dtype = a.dtype
m, n = a.shape
r = min(rank + oversamp, m, n)
# find subspace
q = backend.random.uniform(low=-1.0, high=1.0, size=(n, r)).astype(dtype)
a_H = a.H
for i in range(niter):
q = a_H @ (a @ q)
q, _ = backend.qr(q)
q = a @ q
q, _ = backend.qr(q)
# svd in subspace
a_sub = q.H @ a
u_sub, s, vh = backend.svd(a_sub)
u = q @ u_sub
if rank < r:
u, s, vh = u[:,:rank], s[:rank], vh[:rank,:]
u, s, vh = svd_absorb_s(u, s, vh, absorb_s)
return u, s, vh
| StarcoderdataPython |
5104631 | <filename>themessage_server/themessage_server_test.py
import os
import themessage_server
def test_themessage_server_has_current_version_of_module():
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'version.txt')) as version_file:
assert themessage_server.__version__ == version_file.read().strip()
| StarcoderdataPython |
4966358 | <reponame>agustingianni/CoverageReport<filename>report.py<gh_stars>1-10
#!/usr/bin/env python
import re
import os
import sys
import logging
import argparse
import subprocess
import collections
class CoverageReport(object):
def __init__(self, dr_path, output, path_maps, src_filter, debug_level=0):
# Tools path.
self.dr_cov2lcov = os.path.join(dr_path, "tools", "bin64", "drcov2lcov")
self.dr_genhtml = os.path.join(dr_path, "tools", "bin64", "genhtml")
self.path_maps = path_maps
self.src_filter = src_filter
self.debug_level = debug_level
# Create the working directory structure.
self.output_dir = os.path.abspath(output)
self.traces_dir = os.path.join(self.output_dir, "traces")
self.report_dir = os.path.join(self.output_dir, "report")
self.coverage_info_file = os.path.join(self.output_dir, "coverage.info")
# Check that the output directory has the right layout.
if not os.path.exists(self.output_dir) or not os.path.exists(self.traces_dir):
raise Exception("Output directory is not valid.")
if not os.path.exists(self.report_dir):
os.makedirs(self.report_dir)
logging.info("Working directory `%s`" % self.output_dir)
logging.info("Traces directory `%s`" % self.traces_dir)
logging.info("Report directory `%s`" % self.traces_dir)
def run(self):
# Process all the generated traces and generate a single coverage file.
if not self.process_traces():
return False
# Create an html report with the coverage information.
if not self.generate_report():
return False
return True
def generate_report(self):
process_command = [
self.dr_genhtml,
"-ignore-errors=source",
"--output-directory", self.report_dir,
"--quiet",
"--demangle-cpp",
"--legend",
"--highlight",
"--show-details",
self.coverage_info_file
]
try:
output = subprocess.check_output(process_command, stderr=subprocess.STDOUT)
logging.debug(output)
# Extract missing source files from the output so the user can map them to the right places.
missing_dirs = set()
for line in output.split("\n"):
match = re.match(r"(genhtml: WARNING: cannot read )(.+)!", line)
if not match:
continue
# Extract the missing file and its directory.
missing_file = match.groups()[1]
missing_dir = os.path.dirname(missing_file)
missing_dirs.add(missing_dir)
logging.debug("Missing file `%s`" % missing_file)
missing_dirs = sorted(missing_dirs, reverse=True)
count = collections.defaultdict(int)
for path in missing_dirs:
elements = path.split(os.path.sep)
if elements[0] == "":
elements = elements[1:]
for i in range(0, len(elements)):
count[os.path.sep.join(elements[0:i+1])] += 1
if len(missing_dirs):
logging.info("-" * 80)
logging.info(" There are missing source directories. This is not a fatal error.")
logging.info(" Use the command line option `-m` to map from the missing paths displayed")
logging.info(" bellow to the actual path. You can add as many source code maps as you need.")
logging.info(" If you fail to do this, some source will be missing from the report.")
logging.info("")
logging.info(" Example:")
logging.info(" -m \"build/glibc-OTsEL5/glibc-2.27\" \"/home/user/glibc-2.27\"")
logging.info("-" * 80)
missing_dirs = sorted([k for (k, v) in count.iteritems() if v > 1], reverse=True)
for path in missing_dirs:
logging.info(" Missing directory `%s`" % path)
except subprocess.CalledProcessError as identifier:
if "no valid records" in identifier.output:
logging.warning("Could not generate a report since the trace file is empty. Maybe the source filter is wrong?")
return False
logging.error("Could not execute reporting command: %s" % identifier)
return False
return True
def process_traces(self):
process_command = [
self.dr_cov2lcov,
"-warning", str(self.debug_level),
"-verbose", str(self.debug_level),
"-dir", self.traces_dir,
"-output", self.coverage_info_file
]
if self.src_filter:
process_command.append("-src_filter")
process_command.append(self.src_filter)
try:
output = subprocess.check_output(process_command)
logging.debug(output)
except subprocess.CalledProcessError as identifier:
logging.error("Could not execute processing command: %s" % identifier)
return False
# We need to map some paths here. Supposedly dr has builtin support but it does not work for me.
contents = open(self.coverage_info_file, "rb").read()
for replacee, replacement in self.path_maps:
logging.info("Replacing path `%s` with `%s`." % (replacee, replacement))
contents = contents.replace(replacee, replacement)
with open(self.coverage_info_file, "wb") as file:
file.write(contents)
return True
def main(argv):
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
parser = argparse.ArgumentParser(description='Test case coverage collector.')
parser.add_argument("-p", action="store", dest="dr_path", required=True, help="DynamoRio installation path.")
parser.add_argument("-o", action="store", dest="output", required=True, help="Output directory that will contain the trace files.")
parser.add_argument("-d", type=int, default=0, dest="debug_level", help="Set the debug/warning level of dr's tools to a value.")
parser.add_argument("-f", action="store", dest="src_filter", help="Only include files that match this patter.")
parser.add_argument("-m", nargs=2, action="append", default=[], dest="src_map", help="Map a source path to another.")
args = parser.parse_args()
if not os.path.isdir(args.dr_path):
logging.error("Path to DynamoRio is not a directory.")
return -1
try:
# Create a report.
report = CoverageReport(args.dr_path, args.output, args.src_map, args.src_filter, debug_level=args.debug_level)
if not report.run():
return -1
except Exception as error:
logging.error("Error: %s" % error)
return -1
return 0
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
6440959 | <gh_stars>0
import os
import logging
import logging.config
from pythonjsonlogger import jsonlogger
from datetime import datetime;
import boto3
import requests
import time
from botocore.client import Config
from botocore.exceptions import ClientError
from os import listdir
from os.path import isfile, join
class ElkJsonFormatter(jsonlogger.JsonFormatter):
def add_fields(self, log_record, record, message_dict):
super(ElkJsonFormatter, self).add_fields(log_record, record, message_dict)
log_record['level'] = record.levelname
log_record['logger'] = record.name
logging.config.fileConfig('logging.conf')
logger = logging.getLogger('file processor')
file_path = '/usr/share/Test/output/'
TGT_URL = os.getenv('TARGET_MINIO_URL', 'http://192.168.99.123:30493')
TGT_ACCESS_KEY = os.getenv('TARGET_MINIO_ACCESS_KEY', 'test')
TGT_SECRET_KEY = os.getenv('TARGET_MINIO_SECRET_KEY', 'test@123')
TGT_BUCKET = os.getenv('TARGET_MINIO_BUCKET', 'output')
LOG_LEVEL = os.getenv('LOG_LEVEL', 'INFO').upper()
jwt_token = os.getenv("API_TOKEN","YOUR_REBUILD_API_TOKEN")
url = os.getenv("API_URL","https://gzlhbtpvk2.execute-api.eu-west-1.amazonaws.com/Prod/api/rebuild/file")
FILE_TO_PROCESS = os.getenv("FILE_TO_PROCESS", "Reports 1.pdf")
SHELL_ACCESS = True
class Main():
@staticmethod
def log_level(level):
logging.basicConfig(level=getattr(logging, level))
@staticmethod
def upload_to_minio(file_path, filename):
try:
logger.info('Uploading file {}.'.format(filename))
s3 = boto3.resource('s3', endpoint_url=TGT_URL, aws_access_key_id=TGT_ACCESS_KEY,
aws_secret_access_key=TGT_SECRET_KEY, config=Config(signature_version='s3v4'))
logger.debug('Checking if the Bucket to upload files exists or not.')
if (s3.Bucket(TGT_BUCKET) in s3.buckets.all()) == False:
logger.info('Bucket not Found. Creating Bucket.')
s3.create_bucket(Bucket=TGT_BUCKET)
logger.debug('Uploading file to bucket {} minio {}'.format(TGT_BUCKET, TGT_URL))
file_to_upload = file_path + filename
s3.Bucket(TGT_BUCKET).upload_file(file_to_upload, filename)
except ClientError as e:
logger.error("Cannot connect to the minio {}. Please vefify the Credentials.".format(TGT_URL))
except Exception as e:
logger.info(e)
@staticmethod
def application():
#if os.name == 'nt':
# file_path = 'C:/GW/files/'
try:
for f in listdir(file_path):
if isfile(join(file_path, f)):
logger.info(f)
Main.upload_to_minio(file_path, f)
except Exception as e:
logger.error(e)
@staticmethod
def main():
Main.log_level(LOG_LEVEL)
#if os.name == 'nt':
# file_path = 'C:/GW/files/'
#else:
os.system('/usr/share/Test/launch.sh')
# os.system('service filebeat start')
Main.application()
time.sleep(5)
if SHELL_ACCESS:
while True:
Main.application()
time.sleep(5)
if __name__ == "__main__":
Main.main()
| StarcoderdataPython |
11304650 | from __future__ import annotations
import multiprocessing
import os
from dataclasses import asdict
from dataclasses import dataclass
from typing import Any
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
from PIL import Image
from CCAgT_utils.categories import Categories
from CCAgT_utils.categories import CategoriesInfos
from CCAgT_utils.constants import STRUCTURE
from CCAgT_utils.converters.CCAgT import CCAgT
from CCAgT_utils.converters.CCAgT import read_parquet
from CCAgT_utils.utils import find_files
from CCAgT_utils.utils import get_traceback
R = Union[float, Tuple[float, float, float]]
@dataclass
class Statistics:
mean: R = 0.
std: R = 0.
max: R = 0.
min: R = 0.
count: int = 0
def join_stats(self, results: Statistics) -> None:
if self.count == 0:
self.mean = results.mean
self.std = results.std
self.max = results.max
self.min = results.min
else:
self.mean = np.mean([self.mean, results.mean], axis=0)
self.std = np.mean([self.std, results.std], axis=0)
self.max = np.max([self.max, results.max], axis=0)
self.min = np.min([self.min, results.min], axis=0)
self.count += results.count
def to_dict(self) -> dict[str, R | int]:
return asdict(self)
def __str__(self) -> str:
_mean = f'Mean: {self.mean:.2f}'
_std = f'std: {self.std:.2f}'
_max = f'Max: {self.max:.2f}'
_min = f'Min: {self.min:.2f}'
_count = f'Quantity: {self.count}'
return f'{_count} | {_mean} | {_std} | {_max} | {_min}'
def from_list(itens: list[int | float]) -> Statistics:
_mean = np.mean(itens)
_std = np.std(itens)
_max = np.max(itens)
_min = np.min(itens)
return Statistics(_mean, _std, _max, _min, count=len(itens))
def from_array(array: np.ndarray) -> Statistics:
axis = (0, 1)
_mean = np.mean(array, axis=axis)
_std = np.std(array, axis=axis)
_max = np.max(array, axis=axis)
_min = np.min(array, axis=axis)
return Statistics(_mean, _std, _max, _min, count=1)
@get_traceback
def single_core_from_image_files(filenames: list[str]) -> Statistics:
if len(filenames) == 0:
raise ValueError('It was expected a list of filenames with at least one value.')
out_stats = Statistics()
for filename in filenames:
out_stats.join_stats(
from_array(
np.asarray(
Image.open(filename),
),
),
)
return out_stats
def from_image_files(
images_dir: str,
extensions: str | tuple[str, ...] = '.jpg',
selection: set[str] = set(),
) -> Statistics:
"""From a directory path with images, will generate the stats of all
images. The statistics generated are: mean, std, max, and min.
Parameters
----------
images_dir : str
Path for the directories that contains the images of interest.
extensions : str | tuple[str, ...], optional
The extensions of the images files, by default '.jpg'
selection : set[str], optional
The images basenames (with extension) of selected to compute
the statistics, by default set([]) (all images will be used)
Returns
-------
dict[str, float | tuple[float, ...]]
Will a dict where the key is the name of the statistics and the
value is the computed statistic.
"""
all_images = find_files(images_dir, extensions, True, selection)
all_filenames = list(all_images.values())
cpu_num = multiprocessing.cpu_count()
workers = multiprocessing.Pool(processes=cpu_num)
filenames_splitted = np.array_split(all_filenames, cpu_num)
print(
f'Start compute Statistics for {len(all_filenames)} ({extensions}) files using {cpu_num} cores with '
f'{len(filenames_splitted[0])} files per core...',
)
processes = []
for filenames in filenames_splitted:
if len(filenames) == 0:
continue # pragma: no cover
p = workers.apply_async(single_core_from_image_files, (filenames.tolist(),))
processes.append(p)
out_stats = Statistics()
for p in processes:
out_stats.join_stats(p.get())
print(f'Successfully computed the statstics of {out_stats.count} files with {len(processes)} processes!')
return out_stats
def annotations_per_image(
ccagt: CCAgT,
categories_infos: CategoriesInfos,
) -> pd.DataFrame:
df = ccagt.df
df_describe_images = df.groupby(['image_id', 'category_id']).size().reset_index().rename(columns={0: 'count'})
df_describe_images = df_describe_images.pivot(columns=['category_id'], index='image_id')
df_describe_images = df_describe_images.rename({c.id: c.name.upper() for c in categories_infos}, axis=1)
df_describe_images['qtd_annotations'] = df_describe_images.sum(axis=1)
df_describe_images = df_describe_images.fillna(0)
df_describe_images['NORs'] = df_describe_images[
'count',
Categories.CLUSTER.name,
] + df_describe_images[
'count',
Categories.SATELLITE.name,
]
return df_describe_images
def ccagt_annotations(
ccagt: CCAgT,
categories_infos: CategoriesInfos,
) -> dict[str, Any]:
df = ccagt.df
ann_count = {cat.name: df.loc[df['category_id'] == cat.id, 'area'].shape[0] for cat in categories_infos}
qtd_ann = df.shape[0]
ann_dist = {cat_name: qtd_cat / qtd_ann for cat_name, qtd_cat in ann_count.items()}
area_stats = {
cat.name: from_list(df.loc[df['category_id'] == cat.id, 'area'].tolist())
for cat in categories_infos if ann_count[cat.name] > 0
}
qtd_images = df['image_id'].nunique()
qtd_slides = df['slide_id'].nunique()
return {
'qtd_images': qtd_images,
'qtd_slide': qtd_slides,
'qtd_annotations': qtd_ann,
'qtd_annotations_categorical': ann_count,
'dist_annotations': ann_dist,
'area_stats': area_stats,
}
def tvt_annotations_as_df(
train: dict[str, Any],
valid: dict[str, Any],
test: dict[str, Any],
) -> tuple[pd.DataFrame, ...]:
out = {}
out['train'] = train
out['validation'] = valid
out['test'] = test
folds = out.keys()
df_qtd = pd.DataFrame({
'fold': folds,
'images': [out[f]['qtd_images'] for f in folds],
'slides': [out[f]['qtd_slide'] for f in folds],
'annotations': [out[f]['qtd_annotations'] for f in folds],
})
df_qtd_categorical = pd.DataFrame([
{
'fold': f,
**{
k: v for k, v in out[f]['qtd_annotations_categorical'].items()
if k != Categories.BACKGROUND.name
},
}
for f in folds
])
df_qtd = pd.merge(df_qtd, df_qtd_categorical, on='fold')
df_qtd.loc['total'] = df_qtd.sum()
df_qtd.loc[df_qtd.index == 'total', 'fold'] = 'total'
total_images = df_qtd.loc[df_qtd['fold'] == 'total', 'images'].tolist()[0]
total_ann = df_qtd.loc[df_qtd['fold'] == 'total', 'annotations'].tolist()[0]
df_dist = pd.DataFrame({
'fold': folds,
'% images': [out[f]['qtd_images'] / total_images for f in folds],
'% annotations': [out[f]['qtd_annotations'] / total_ann for f in folds],
})
df_dist_categorical = pd.DataFrame([
{
'fold': f,
**{
f'% {k}': v / out[f]['qtd_annotations']
for k, v in out[f]['qtd_annotations_categorical'].items()
if k != Categories.BACKGROUND.name
},
}
for f in folds
])
df_dist = pd.merge(df_dist, df_dist_categorical, on='fold')
df_area = pd.DataFrame()
for f in folds:
_df = pd.DataFrame([{'category': k, **v.to_dict()} for k, v in out[f]['area_stats'].items()])
_df = _df.set_index('category').transpose()
_df['fold'] = f
df_area = pd.concat([df_area, _df])
return df_qtd, df_dist, df_area
def dataset(
ccagt_path: str,
categories_infos: CategoriesInfos,
dataset_dir: str,
extensions: tuple[str, ...] = ('.jpg', '.png'),
) -> None:
ccagt = read_parquet(ccagt_path)
name = os.path.basename(os.path.normpath(dataset_dir))
images_dir = os.path.join(dataset_dir, STRUCTURE['i'])
masks_dir = os.path.join(dataset_dir, STRUCTURE['m'])
print(f'Dataset name: `{name}` | Location: `{dataset_dir}`')
print(f'From the annotations file ({ccagt_path}) -')
if ccagt.df.shape[0] == 0:
print('Do not have any annotation!')
else:
desc = ccagt_annotations(ccagt, categories_infos)
print(f'Quantity of images: {desc["qtd_images"]}')
print(f'Quantity of slides: {desc["qtd_slide"]}')
print(f'Quantity of annotations: {desc["qtd_annotations"]}')
for cat_name, qtd in desc['qtd_annotations_categorical'].items():
dist = desc['dist_annotations'][cat_name]
print(f' > Quantity of annotations for {cat_name}: {qtd} - {dist*100:.2f}%')
print('Statistics of the area of each category...')
for cat_name, area_stats in desc['area_stats'].items():
print(f' > Statistics of area for {cat_name}: {area_stats}')
images_quantity = len(find_files(images_dir, extensions, True))
masks_quantity = len(find_files(masks_dir, extensions, True))
print('On disk data -')
print(f'Total of images: {images_quantity} - at `{images_dir}`')
print(f'Total of masks: {masks_quantity} - at `{masks_dir}`')
def categorical_mask(mask: np.ndarray) -> dict[int, int]:
unique, counts = np.unique(mask, return_counts=True)
return dict(zip(unique, counts))
@get_traceback
def single_core_from_mask_files(
filenames: list[str],
) -> dict[int, int]:
if len(filenames) == 0:
raise ValueError('It was expected a list of filenames with at least one value.')
out = {cat.value: 0 for cat in Categories}
for filename in filenames:
counts = categorical_mask(
np.asarray(
Image.open(filename).convert('L'),
),
)
out = {k: v + counts[k] if k in counts else v for k, v in out.items()}
return out
def from_mask_files(
masks_dir: str,
extensions: str | tuple[str, ...] = '.png',
selection: set[str] = set(),
) -> dict[str, int]:
all_masks = find_files(masks_dir, extensions, True, selection)
all_filenames = list(all_masks.values())
cpu_num = multiprocessing.cpu_count()
workers = multiprocessing.Pool(processes=cpu_num)
filenames_splitted = np.array_split(all_filenames, cpu_num)
print(
f'Start count pixels quantity for {len(all_filenames)} ({extensions}) files using {cpu_num} cores with '
f'{len(filenames_splitted[0])} files per core...',
)
processes = []
for filenames in filenames_splitted:
if len(filenames) == 0:
continue # pragma: no cover
p = workers.apply_async(single_core_from_mask_files, (filenames.tolist(),))
processes.append(p)
out = {cat.value: 0 for cat in Categories}
for p in processes:
counts = p.get()
out = {k: v + counts[k] if k in counts else v for k, v in out.items()}
n_files = len(all_masks)
print(f'Successfully computed pixels quantity of each category from {n_files} files with {len(processes)} processes!')
out_by_names = {str(Categories(k).name): int(v) for k, v in out.items()}
return out_by_names
| StarcoderdataPython |
61903 | """
Generate all synonymous mutants for a input protein
<NAME>
"""
# Ensure Python 2/3 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import argparse
import sys
from signal import signal, SIGPIPE, SIG_DFL
# catch broken pipe errors to allow ex) python pyParse.py foo bar | head
# see: https://stackoverflow.com/a/30091579
signal(SIGPIPE, SIG_DFL)
#===============================================================================
def fasta_reader(fasta):
"""
Read in a fasta file lazily and return a generator of the name and sequence
Parameters:
-----------
fasta :: FileType
opened file
Yields:
-------
generator :: (name, seq)
name :: str
Name of the read taken from the fasta file
read :: str
Sequence taken from the fasta file
Requires:
---------
itertools
Example:
--------
itertools.groupby takes a key function and groups all items into a list
until that key changes. We can key on lines beginning with >, then grab
every line until the next record in the fasta. This makes our method robust
to some fasta formats that have forced line breaks at given characters.
foo = '>ABC>DEF>GHI'
[(k, list(g)) for k,g in itertools.groupby(foo, lambda x: x == '>')]
--> [(True, ['>']), (False, ['A', 'B', 'C']), (True, ['>']), ... ]
Note:
-----
Adapted from: https://www.biostars.org/p/710/#1412
"""
# ditch the boolean (x[0]) and just keep the header/seq grouping
fa_iter = (x[1] for x in itertools.groupby(fasta, lambda line: line[0] == ">"))
for header in fa_iter:
# drop the ">"
name = next(header)[1:].strip()
# join all sequence lines to one by iterating until the next group.
read = "".join(s.strip() for s in next(fa_iter))
yield name, read
#-------------------------------------------------------------------------------
def revcomp(seq):
"""
Reverse Complement a string
Parameters:
-----------
seq :: str
Returns:
--------
str
"""
comp = {'A':'T', 'C':'G', 'G':'C', 'T':'A', 'N':'N'}
return ''.join(comp[nuc] for nuc in seq[::-1])
#-------------------------------------------------------------------------------
def to_prot(dna):
"""
Translate a DNA sequence into it's protein equivalent
Parameters:
-----------
dna - DNA sequnce
Returns:
--------
(aminos, flag) :: (str, bool)
aminos - Protein sequence
flag - Was there an X?
Requires:
---------
itertools
Note:
-----
Assumes sequences are already in-frame (e.g. does not find start codons).
Will convert any N's to a dummy codon.
Adapted from: http://stackoverflow.com/a/19522901
"""
codon_table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',
'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W'}
codons = (dna[x:x+3] for x in range(0, len(dna), 3))
coding_seq = itertools.takewhile(lambda x: len(x) == 3, codons)
clean_codons = ('X' if 'N' in x else codon_table[x] for x in coding_seq)
aminos = ''.join(clean_codons)
# set a flag for downstream filtering
if 'X' in aminos:
return (aminos, True)
else:
return (aminos, False)
#-------------------------------------------------------------------------------
def to_dna(dna):
rev_codon_table = {
'A':['GCT','GCC','GCA','GCG'],
'R':['CGT','CGC','CGA','CGG','AGA','AGG'],
'N':['AAT','AAC'],
'D':['GAT','GAC'],
'C':['TGT','TGC'],
'Q':['CAA','CAG'],
'E':['GAA','GAG'],
'G':['GGT','GGC','GGA','GGG'],
'H':['CAT','CAC'],
'I':['ATT','ATC','ATA'],
'L':['TTA','TTG','CTT','CTC','CTA','CTG'],
'K':['AAA','AAG'],
'M':['ATG'],
'F':['TTT','TTC'],
'P':['CCT','CCC','CCA','CCG'],
'S':['TCT','TCC','TCA','TCG','AGT','AGC'],
'T':['ACT','ACC','ACA','ACG'],
'W':['TGG'],
'Y':['TAT','TAC'],
'V':['GTT','GTC','GTA','GTG'],
'*':['TAA','TGA','TAG']}
# get codons
old_codons = (dna[x:x+3] for x in range(0, len(dna), 3))
coding_seq = list(itertools.takewhile(lambda x: len(x) == 3, old_codons))
prot, flag = to_prot(dna)
if flag:
raise ValueError('N found in input sequence')
raw_seqs = list()
names = list()
for pos, aa in enumerate(prot):
new_codons = rev_codon_table[aa]
for codon in new_codons:
names.append('{}_{}_{}'.format(pos + 1, aa, codon))
raw_seqs.append(''.join(
itertools.chain.from_iterable([coding_seq[:pos], codon, coding_seq[pos+1:]])))
return([x for x in zip(names, raw_seqs) if x[1] != dna])
#===============================================================================
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate all single synonymous mutations for an input sequence')
parser.add_argument('infile',
type=argparse.FileType('r'),
default=sys.stdin,
nargs='?',
help='path to a *.fasta file of the reads (or stdin if none)')
parser.add_argument('-r',
'--rev-comp',
dest='rc',
action='store_true',
help='reverse complement the sequence?')
args = parser.parse_args()
# drop the fasta header since we don't need it
for _, seq in fasta_reader(args.infile):
if args.rc:
seq = revcomp(seq)
out = to_dna(seq)
for header, synon in out:
print('>{}\n{}'.format(header, synon), file=sys.stdout)
| StarcoderdataPython |
266731 | #!/bin/python
import i3ipc
i3 = i3ipc.Connection()
splitv_text = ''
splith_text = ''
split_none = ''
parent = i3.get_tree().find_focused().parent
if parent.layout == 'splitv' :
print( splitv_text )
elif parent.layout == 'splith' :
print( splith_text )
else :
print( split_none ) | StarcoderdataPython |
1817059 | """
Reads a stream from a stream (files)
@author rambabu.posa
"""
from pyspark.sql import SparkSession
import logging
logging.debug("-> start")
# Creates a session on a local master
spark = SparkSession.builder.appName("Read lines from a file stream") \
.master("local[*]").getOrCreate()
df = spark.readStream.format("text") \
.load("/tmp/")
# Use below for Windows
# .load("C:/tmp/")
query = df.writeStream.outputMode("append") \
.format("console") \
.option("truncate", False) \
.option("numRows", 3) \
.start()
query.awaitTermination()
logging.debug("<- end")
| StarcoderdataPython |
5134402 | from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
class GraphProcessor:
def __init__(self, dataframe: pd.DataFrame, cols=[]):
if dataframe is None or not cols:
raise Exception("Dataframe must not be None or Cols is empty")
self.data_frame = dataframe
self.usecols = cols
self.extrac_data_list = None
self._pre_process_data()
def _pre_process_data(self):
size_of_cols = len(self.usecols)
self.extrac_data_list = []
for i in range(size_of_cols):
list_data = self.data_frame[self.usecols[i]]
self.extrac_data_list.append(list_data.to_list())
def get_data_list(self):
return self.extrac_data_list
def get_xtick_positions(self, num_of_data_types=None):
xtick_positions = []
if num_of_data_types % 2 == 0:
#print("Even number of items per xtick")
d = 2
for i in range(0, num_of_data_types):
xtick_positions.append(num_of_data_types - d * i)
else:
#print("Odd number of items per tick")
d = 2
for i in range(0, num_of_data_types):
xtick_positions.append(num_of_data_types - d * i)
xtick_positions.reverse()
return xtick_positions
def get_y_max(self):
if self.extrac_data_list is None:
raise Exception("Data not populated")
y_maxes = []
for data in self.extrac_data_list:
y_maxes.append(np.max(data))
return max(y_maxes)
def get_y_min(self):
if self.extrac_data_list is None:
raise Exception("Data not populated")
y_mins = []
for data in self.extrac_data_list:
y_mins.append(np.min(data))
return min(y_mins)
def plot(self, datasets=None, labels=None, hatches=None,
colors=None,
xlabel=None, ylabel=None, xticks=None,
title=None, xlabelfontsize=26, xlabelfontweight='medium',
ylabelfontsize=26, ylabelfontweight='medium',
yticksfontsize=26, yticksfontweight='medium',
xticksfontsize=26, xticksfontweight='medium',
titlefontsize=28,
titlefontweight='bold',
xtick_positions=None,
legendfontweight='medium', legendfontsize=22,
y_max=None,
fig_size=(24, 14),
width=0.25,
alpha=0.5,
save=False,
save_file='',
show=True):
plt.rc('xtick', labelsize=xticksfontsize)
plt.rc('ytick', labelsize=yticksfontsize)
plt.rc('axes', labelsize=xlabelfontsize) # , labelweight='bold')
plt.rc('legend', fontsize=legendfontsize)
x = np.arange(len(xticks))
fig, ax = plt.subplots(figsize=fig_size)
# ax.set_yscale('log')
count = 0
for dataset, label, hatch, color, xtick_pos in zip(datasets, labels, hatches, colors,
xtick_positions):
y = dataset
count = count + 1
rects1 = ax.bar(x + width / 2 * xtick_pos, y, width=width,
label=label, alpha=alpha, color=color, hatch=hatch)
ax.set_ylabel(ylabel, fontsize=ylabelfontsize, fontweight=ylabelfontweight)
ax.set_xlabel(xlabel, fontsize=xlabelfontsize, fontweight=xlabelfontweight)
ax.set_title(title, fontsize=titlefontsize, fontweight=titlefontweight)
ax.set_xticks(x)
ax.set_xticklabels(xticks, fontsize=xticksfontsize, fontweight=xticksfontweight)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.07),
fancybox=True, shadow=True, ncol=5, prop={'size': legendfontsize, 'weight': legendfontweight})
#ax.legend()
fig.tight_layout()
if save:
plt.savefig(save_file)
if show:
plt.show()
plt.close()
| StarcoderdataPython |
9627167 | <reponame>VladimirsHisamutdinovs/Advanced_Python_Operations
import requests
def main():
# url = 'http://httpbin.org/xml'
# res = requests.get(url)
# print_results(res)
# url_post = 'http://httpbin.org/post'
# data_vals = {
# 'key1': 'Ave',
# 'key2': 'Coder'
# }
# #get_res = requests.get(url_get, params=data_vals)
# post_res = requests.post(url_post, data=data_vals)
# print_results(post_res)
url_get = 'http://httpbin.org/get'
header_vals = {
'author': '<NAME>'
}
get_res = requests.get(url_get, headers=header_vals)
print_results(get_res)
def print_results(res):
print(f'Result: {res.status_code}')
print(f'Headers: {res.headers}')
print(f'Data: {res.text}')
if __name__ == "__main__":
main()
| StarcoderdataPython |
8023999 | <filename>tests/test_stream.py
from __future__ import print_function
import io
import sys
import pytest
import progressbar
def test_nowrap():
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
stdout = sys.stdout
stderr = sys.stderr
progressbar.streams.wrap()
assert stdout == sys.stdout
assert stderr == sys.stderr
progressbar.streams.unwrap()
assert stdout == sys.stdout
assert stderr == sys.stderr
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
def test_wrap():
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
stdout = sys.stdout
stderr = sys.stderr
progressbar.streams.wrap(stderr=True, stdout=True)
assert stdout != sys.stdout
assert stderr != sys.stderr
# Wrap again
stdout = sys.stdout
stderr = sys.stderr
progressbar.streams.wrap(stderr=True, stdout=True)
assert stdout == sys.stdout
assert stderr == sys.stderr
# Make sure we definitely unwrap
for i in range(5):
progressbar.streams.unwrap(stderr=True, stdout=True)
def test_excepthook():
progressbar.streams.wrap(stderr=True, stdout=True)
try:
raise RuntimeError()
except RuntimeError:
progressbar.streams.excepthook(*sys.exc_info())
progressbar.streams.unwrap_excepthook()
progressbar.streams.unwrap_excepthook()
def test_fd_as_io_stream():
stream = io.StringIO()
with progressbar.ProgressBar(fd=stream) as pb:
for i in range(101):
pb.update(i)
stream.close()
def test_no_newlines():
kwargs = dict(
redirect_stderr=True,
redirect_stdout=True,
line_breaks=False,
is_terminal=True,
)
with progressbar.ProgressBar(**kwargs) as bar:
for i in range(5):
bar.update(i)
for i in range(5, 10):
try:
print('\n\n', file=progressbar.streams.stdout)
print('\n\n', file=progressbar.streams.stderr)
except ValueError:
pass
bar.update(i)
@pytest.mark.parametrize('stream', [sys.__stdout__, sys.__stderr__])
def test_fd_as_standard_streams(stream):
with progressbar.ProgressBar(fd=stream) as pb:
for i in range(101):
pb.update(i)
| StarcoderdataPython |
5048462 | <gh_stars>0
# You are given a list of n-1 integers and these integers are in the range of 1 to n
# There are no duplicates in the list. One of the integers is missing in the list
# Write an efficient code to find the missing integer.
arry = [1,2,3,5,4,8,6,7,10,9,12,13,16,15,14]
| StarcoderdataPython |
3230610 | import datetime
from json import loads, JSONDecodeError
import re
from .net.http import ApiRequester
from .models.response import Response
from .models.request import Fields
from .exceptions.error import ParameterError, EmptyApiKeyError, \
UnparsableApiResponseError
class Client:
__default_url = "https://registrant-alert.whoisxmlapi.com/api/v2"
_api_requester: ApiRequester or None
_api_key: str
_re_api_key = re.compile(r'^at_[a-z0-9]{29}$', re.IGNORECASE)
_SUPPORTED_FORMATS = ['json', 'xml']
_PARSABLE_FORMAT = 'json'
JSON_FORMAT = 'json'
XML_FORMAT = 'xml'
PREVIEW_MODE = 'preview'
PURCHASE_MODE = 'purchase'
__DATETIME_OR_NONE_MSG = 'Value should be None or an instance of ' \
'datetime.date'
def __init__(self, api_key: str, **kwargs):
"""
:param api_key: str: Your API key.
:key base_url: str: (optional) API endpoint URL.
:key timeout: float: (optional) API call timeout in seconds
"""
self._api_key = ''
self.api_key = api_key
if 'base_url' not in kwargs:
kwargs['base_url'] = Client.__default_url
self.api_requester = ApiRequester(**kwargs)
@property
def api_key(self) -> str:
return self._api_key
@api_key.setter
def api_key(self, value: str):
self._api_key = Client._validate_api_key(value)
@property
def api_requester(self) -> ApiRequester or None:
return self._api_requester
@api_requester.setter
def api_requester(self, value: ApiRequester):
self._api_requester = value
@property
def base_url(self) -> str:
return self._api_requester.base_url
@base_url.setter
def base_url(self, value: str or None):
if value is None:
self._api_requester.base_url = Client.__default_url
else:
self._api_requester.base_url = value
@property
def timeout(self) -> float:
return self._api_requester.timeout
@timeout.setter
def timeout(self, value: float):
self._api_requester.timeout = value
def preview(self, **kwargs) -> Response:
"""
Get parsed API response as a `Response` instance.
Mode = `preview`
:key basic_terms: Required if advanced_terms aren't specified.
Dictionary. Take a look at API documentation for the format
:key advanced_terms: Required if basic_terms aren't specified
List. Take a look at API documentation for the format
:key since_date: Optional. datetime.date. Yesterday's date by default.
:key punycode: Optional. Boolean. Default value is `True`
:key include_audit_dates: Optional. Boolean. Default value is `False`
:key created_date_from: Optional. datetime.date.
:key created_date_to: Optional. datetime.date.
:key updated_date_from: Optional. datetime.date.
:key updated_date_to: Optional. datetime.date.
:key expired_date_from: Optional. datetime.date.
:key expired_date_to: Optional. datetime.date.
:return: `Response` instance
:raises ConnectionError:
:raises RegistrantAlertApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
kwargs['mode'] = Client.PREVIEW_MODE
return self.data(**kwargs)
def purchase(self, **kwargs):
"""
Get parsed API response as a `Response` instance.
Mode = `purchase`
:key basic_terms: Required if advanced_terms aren't specified.
Dictionary. Take a look at API documentation for the format
:key advanced_terms: Required if basic_terms aren't specified
List. Take a look at API documentation for the format
:key since_date: Optional. datetime.date. Yesterday's date by default.
:key punycode: Optional. Boolean. Default value is `True`
:key created_date_from: Optional. datetime.date.
:key created_date_to: Optional. datetime.date.
:key updated_date_from: Optional. datetime.date.
:key updated_date_to: Optional. datetime.date.
:key expired_date_from: Optional. datetime.date.
:key expired_date_to: Optional. datetime.date.
:return: `Response` instance
:raises ConnectionError:
:raises RegistrantAlertApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
kwargs['mode'] = Client.PURCHASE_MODE
return self.data(**kwargs)
def data(self, **kwargs) -> Response:
"""
Get parsed API response as a `Response` instance.
:key basic_terms: Required if advanced_terms aren't specified.
Dictionary. Take a look at API documentation for the format
:key advanced_terms: Required if basic_terms aren't specified
List. Take a look at API documentation for the format
:key mode: Optional. Supported options - `Client.PREVIEW_MODE` and
`Client.PURCHASE_MODE`. Default is `Client.PREVIEW_MODE`
:key since_date: Optional. datetime.date. Yesterday's date by default.
:key punycode: Optional. Boolean. Default value is `True`
:key created_date_from: Optional. datetime.date.
:key created_date_to: Optional. datetime.date.
:key updated_date_from: Optional. datetime.date.
:key updated_date_to: Optional. datetime.date.
:key expired_date_from: Optional. datetime.date.
:key expired_date_to: Optional. datetime.date.
:return: `Response` instance
:raises ConnectionError:
:raises RegistrantAlertApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
kwargs['response_format'] = Client._PARSABLE_FORMAT
response = self.raw_data(**kwargs)
try:
parsed = loads(str(response))
if 'domainsCount' in parsed:
return Response(parsed)
raise UnparsableApiResponseError(
"Could not find the correct root element.", None)
except JSONDecodeError as error:
raise UnparsableApiResponseError("Could not parse API response", error)
def raw_data(self, **kwargs) -> str:
"""
Get raw API response.
:key basic_terms: Required if advanced_terms aren't specified.
Dictionary. Take a look at API documentation for the format
:key advanced_terms: Required if basic_terms aren't specified
List. Take a look at API documentation for the format
:key mode: Optional. Supported options - `Client.PREVIEW_MODE` and
`Client.PURCHASE_MODE`. Default is `Client.PREVIEW_MODE`
:key since_date: Optional. datetime.date. Yesterday's date by default.
:key punycode: Optional. Boolean. Default value is `True`
:key created_date_from: Optional. datetime.date.
:key created_date_to: Optional. datetime.date.
:key updated_date_from: Optional. datetime.date.
:key updated_date_to: Optional. datetime.date.
:key expired_date_from: Optional. datetime.date.
:key expired_date_to: Optional. datetime.date.
:key response_format: Optional. use constants
JSON_FORMAT and XML_FORMAT
:return: str
:raises ConnectionError:
:raises RegistrantAlertApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
if self.api_key == '':
raise EmptyApiKeyError('')
if 'basic_terms' in kwargs:
basic_terms = Client._validate_basic_terms(kwargs['basic_terms'])
else:
basic_terms = None
if 'advanced_terms' in kwargs:
advanced_terms = Client._validate_advanced_terms(
kwargs['advanced_terms'])
else:
advanced_terms = None
if not advanced_terms and not basic_terms:
raise ParameterError(
"Required one from basic_terms and advanced_terms")
if 'output_format' in kwargs:
kwargs['response_format'] = kwargs['output_format']
if 'response_format' in kwargs:
response_format = Client._validate_response_format(
kwargs['response_format'])
else:
response_format = Client._PARSABLE_FORMAT
if 'since_date' in kwargs:
since_date = Client._validate_date(kwargs['since_date'])
else:
since_date = Client._validate_date(datetime.date.today() - datetime.timedelta(days=1))
if 'punycode' in kwargs:
punycode = Client._validate_punycode(kwargs['punycode'])
else:
punycode = True
if 'mode' in kwargs:
mode = Client._validate_mode(kwargs['mode'])
else:
mode = Client.PREVIEW_MODE
if 'created_date_from' in kwargs:
created_date_from = Client._validate_date(
kwargs['created_date_from']
)
else:
created_date_from = None
if 'created_date_to' in kwargs:
created_date_to = Client._validate_date(
kwargs['created_date_to']
)
else:
created_date_to = None
if 'updated_date_from' in kwargs:
updated_date_from = Client._validate_date(
kwargs['updated_date_from']
)
else:
updated_date_from = None
if 'updated_date_to' in kwargs:
updated_date_to = Client._validate_date(
kwargs['updated_date_to']
)
else:
updated_date_to = None
if 'expired_date_from' in kwargs:
expired_date_from = Client._validate_date(
kwargs['expired_date_from']
)
else:
expired_date_from = None
if 'expired_date_to' in kwargs:
expired_date_to = Client._validate_date(
kwargs['expired_date_to']
)
else:
expired_date_to = None
return self._api_requester.post(self._build_payload(
self.api_key,
basic_terms,
advanced_terms,
mode,
since_date,
punycode,
response_format,
created_date_from,
created_date_to,
updated_date_from,
updated_date_to,
expired_date_from,
expired_date_to,
))
@staticmethod
def _validate_api_key(api_key) -> str:
if Client._re_api_key.search(
str(api_key)
) is not None:
return str(api_key)
else:
raise ParameterError("Invalid API key format.")
@staticmethod
def _validate_basic_terms(value) -> dict:
include, exclude = [], []
if value is None:
raise ParameterError("Terms list cannot be None.")
elif type(value) is dict:
if 'include' in value:
include = list(map(lambda s: str(s), value['include']))
include = list(
filter(lambda s: s is not None and len(s) > 0, include))
if 4 <= len(include) <= 1:
raise ParameterError("Include terms list must include "
"from 1 to 4 terms.")
if 'exclude' in value:
exclude = list(map(lambda s: str(s), value['exclude']))
exclude = list(
filter(lambda s: s is not None and len(s) > 0, exclude))
if 4 <= len(exclude) <= 0:
raise ParameterError("Exclude terms list must include "
"from 0 to 4 terms.")
if include:
return {'include': include, 'exclude': exclude}
raise ParameterError("Expected a dict with 2 lists of strings.")
@staticmethod
def _validate_advanced_terms(value) -> list:
if value is None:
raise ParameterError("Terms list cannot be None.")
elif type(value) is list:
if 4 <= len(value) < 1:
raise ParameterError(
"Terms list must include form 1 to 4 items.")
for item in value:
if 'field' not in item or 'term' not in item:
raise ParameterError(
"Invalid advanced search terms format."
"The 'field' or 'term' is missing.")
if item['field'] not in Fields.values():
raise ParameterError("Unknown field name.")
if item['term'] is None or type(item['term']) is not str \
or len(item['term']) < 2:
raise ParameterError("Term should be non-empty string.")
return value
raise ParameterError("Expected a list of pairs field <-> term.")
@staticmethod
def _validate_response_format(value: str):
if value.lower() in [Client.JSON_FORMAT, Client.XML_FORMAT]:
return value.lower()
raise ParameterError(
f"Response format must be {Client.JSON_FORMAT} "
f"or {Client.XML_FORMAT}")
@staticmethod
def _validate_mode(value: str):
if value.lower() in [Client.PREVIEW_MODE, Client.PURCHASE_MODE]:
return value.lower()
raise ParameterError(
f"Mode must be {Client.PREVIEW_MODE} or {Client.PURCHASE_MODE}")
@staticmethod
def _validate_punycode(value: bool):
if value in [True, False]:
return value
raise ParameterError(
"Punycode parameter value must be True or False")
@staticmethod
def _validate_date(value: datetime.date or None):
if value is None or isinstance(value, datetime.date):
return str(value)
raise ParameterError(Client.__DATETIME_OR_NONE_MSG)
@staticmethod
def _build_payload(
api_key,
basic_terms,
advanced_terms,
mode,
since_date,
punycode,
response_format,
created_date_from,
created_date_to,
updated_date_from,
updated_date_to,
expired_date_from,
expired_date_to,
) -> dict:
tmp = {
'apiKey': api_key,
'basicSearchTerms': basic_terms,
'advancedSearchTerms': advanced_terms,
'mode': mode,
'sinceDate': since_date,
'punycode': punycode,
'responseFormat': response_format,
'createdDateFrom': created_date_from,
'createdDateTo': created_date_to,
'updatedDateFrom': updated_date_from,
'updatedDateTo': updated_date_to,
'expiredDateFrom': expired_date_from,
'expiredDateTo': expired_date_to,
}
payload = {}
for k, v in tmp.items():
if v is not None:
payload[k] = v
return payload
| StarcoderdataPython |
3373551 | <filename>scripts/type_extractor/tests/merge_files_tests.py
"""Unit tests for the merge_files module."""
import json
import unittest
from type_extractor.merge_files import choose_one_type
from type_extractor.merge_files import merge_functions
from type_extractor.merge_files import merge_types
class ChooseOneTypeTests(unittest.TestCase):
def test_choose_new_struct_with_members(self):
struct1 = {'type': 'structure', 'members': []}
struct2 = {'type': 'structure', 'members': [('int', 'x')]}
self.assertEqual(choose_one_type(struct1, struct2, {}), struct2)
def test_choose_existing_struct_with_members(self):
struct1 = {'type': 'structure', 'members': [('int', 'x')]}
struct2 = {'type': 'structure', 'members': []}
self.assertEqual(choose_one_type(struct1, struct2, {}), struct1)
def test_choose_new_union_with_members(self):
struct1 = {'type': 'union', 'members': []}
struct2 = {'type': 'union', 'members': [('int', 'x')]}
self.assertEqual(choose_one_type(struct1, struct2, {}), struct2)
def test_choose_existing_union_with_members(self):
struct1 = {'type': 'union', 'members': [('int', 'x')]}
struct2 = {'type': 'union', 'members': []}
self.assertEqual(choose_one_type(struct1, struct2, {}), struct1)
def test_choose_existing_typedef_with_known_typedefed_type(self):
t1 = {'type': 'typedef', 'typedefed_type': 'struct s'}
t2 = {'type': 'typedef', 'typedefed_type': 'unknown'}
self.assertEqual(choose_one_type(t1, t2, {}), t1)
def test_choose_new_typedef_when_existing_is_unknown(self):
t1 = {'type': 'typedef', 'typedefed_type': 'unknown'}
t2 = {'type': 'typedef', 'typedefed_type': 'struct s'}
self.assertEqual(choose_one_type(t1, t2, {}), t2)
def test_choose_one_type_when_they_are_same(self):
t1 = {'type': 'int'}
t2 = {'type': 'int'}
self.assertEqual(choose_one_type(t1, t2, {}), t1)
class MergeFunctionsTests(unittest.TestCase):
def test_merge_functions_chooses_first_function(self):
merged = {'f1': 'int f1(int a);'}
new = {'f1': 'int f1();', 'f2': 'int f2(void);'}
expected = {'f1': 'int f1(int a);', 'f2': 'int f2(void);'}
merge_functions(merged, new)
self.assertEqual(merged, expected)
class MergeTypesTests(unittest.TestCase):
def test_merge_types_calls_choose_one_type_for_conflicts(self):
merged = {'t1': {'type': 'int'}, 't2': 'struct'}
new = {'t1': {'type': 'int'}, 't3': 'typedefed_type'}
expected = {'t1': {'type': 'int'}, 't2': 'struct', 't3': 'typedefed_type'}
merge_types(merged, new)
self.assertEqual(merged, expected)
def test_circular_typedefs_created_while_merging_are_broken_to_unknown(self):
json1_types = json.loads(
"""
{
"0ff7d695c742c443b5c3c60175ffb84414ea7bc7": {
"name": "A",
"type": "typedef",
"typedefed_type": "unknown"
},
"ac574f36b4e34657059d13210778a209d24cecc0": {
"name": "B",
"type": "typedef",
"typedefed_type": "0ff7d695c742c443b5c3c60175ffb84414ea7bc7"
}
}
"""
)
json2_types = json.loads(
"""
{
"0ff7d695c742c443b5c3c60175ffb84414ea7bc7": {
"name": "A",
"type": "typedef",
"typedefed_type": "ac574f36b4e34657059d13210778a209d24cecc0"
},
"ac574f36b4e34657059d13210778a209d24cecc0": {
"name": "B",
"type": "typedef",
"typedefed_type": "unknown"
}
}
"""
)
merged = {}
A_index = '0ff7d695c742c443b5c3c60175ffb84414ea7bc7'
B_index = 'ac574f36b4e34657059d13210778a209d24cecc0'
merge_types(merged, json1_types)
merge_types(merged, json2_types)
self.assertTrue(
merged[A_index]['typedefed_type'] == 'unknown' or
merged[B_index]['typedefed_type'] == 'unknown'
)
| StarcoderdataPython |
1997307 | <gh_stars>1-10
""""""
"""
Copyright (c) 2021 <NAME> as part of Airlab Amsterdam
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#%%
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams.update({'font.size': 12})
matplotlib.rcParams.update({'font.weight': 'normal'})
datasets = ['uci_electricity','uci_traffic','kaggle_favorita','kaggle_webtraffic']
#%% Plot
def create_plot(dataset, ylim):
experiment_dir = 'experiments/'+dataset
file_experiments = experiment_dir + f'/experiments_{dataset}.csv'
df = pd.read_csv(file_experiments, sep=';')
algorithms = ['deepar', 'transformer_conv', 'tcn', 'wavenet', 'mlrnn', 'bitcn']
learning_rates = [0.001, 0.0005, 0.0001]
n_seeds = 1
n_epochs = 100
seeds = np.arange(n_seeds)
plots = ['r-','g--','b-.']
fig, axs = plt.subplots(nrows = len(algorithms), ncols = len(learning_rates), sharex=True, sharey=True)
x = np.arange(n_epochs)
# Loop over algorithms and learning rates
for i, algorithm in enumerate(algorithms):
for j, learning_rate in enumerate(learning_rates):
batch_sizes = np.sort(df[df.algorithm == algorithm]['batch_size'].unique()).tolist()
# This is to catch omittance of this single expceriment for MLRNN which kept giving errors
if (dataset == 'uci_traffic') & (algorithm == 'mlrnn') & (learning_rate == 0.0001):
batch_sizes = [64, 256]
axs[i, j].grid()
Y = np.zeros((len(batch_sizes), len(seeds), n_epochs))
for k, batch_size in enumerate(batch_sizes):
for l, seed in enumerate(seeds):
df_current = df[(df.algorithm == algorithm) & (df.seed == seed) & (df.batch_size == batch_size) & (df.learning_rate == learning_rate)]
d_hidden = int(df_current['d_hidden'])
filename_loss = f"{experiment_dir}/{algorithm}/{algorithm}_seed={seed}_hidden={d_hidden}_lr={learning_rate}_bs={batch_size}_loss.csv"
df_loss_current = pd.read_csv(filename_loss, usecols=['Validation_loss'])
df_loss_current.loc[df_loss_current['Validation_loss'] == 0] = np.nan
Y[k, l, :] = df_loss_current['Validation_loss'].values
# Plot
axs[i, j].plot(x, Y[k].mean(axis=0), plots[k], label=f'{batch_size}', linewidth=3, )
axs[i, j].legend(loc = 'upper right')
axs[i, j].grid()
# axs[i, j].xaxis.set_ticks(np.arange(min(x), max(x)+1, 5))
axs[i, j].yaxis.set_tick_params(which='both', labelbottom=True)
axs[i, j].xaxis.set_tick_params(which='both', labelbottom=True)
axs[i, j].set_title(f'{algorithm} / {learning_rate}')
axs[i, j].set_ylim(ylim[0], ylim[1])
# Only plot ylabel for first column
if j == 0:
axs[i, j].set_ylabel('Validation loss')
# Only plot xlabel for last row
if i == len(algorithms) - 1:
axs[i, j].set(xlabel='Epochs')
#%% Electricity
dataset = datasets[0]
ylim = [-1.2, -0.2]
create_plot(dataset, ylim)
#%% Traffic
dataset = datasets[1]
ylim = [-4.8, -2]
create_plot(dataset, ylim)
#%% Favorita
dataset = datasets[2]
ylim = [-3.4, 0]
create_plot(dataset, ylim)
#%% Webtraffic
dataset = datasets[3]
ylim = [0.2, 1.5]
create_plot(dataset, ylim) | StarcoderdataPython |
49410 | <filename>qnarre/prep/tokens/perceiver.py<gh_stars>0
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from ...tokens.utils import AddedToken, PreTrainedTokenizer
class Tokenizer(PreTrainedTokenizer):
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
pad="[PAD]",
bos="[BOS]",
eos="[EOS]",
msk="[MASK]",
cls="[CLS]",
sep="[SEP]",
model_max_length=2048,
**kw,
):
pad = AddedToken(pad, lstrip=False, rstrip=False) if isinstance(pad, str) else pad
bos = AddedToken(bos, lstrip=False, rstrip=False) if isinstance(bos, str) else bos
eos = AddedToken(eos, lstrip=False, rstrip=False) if isinstance(eos, str) else eos
msk = AddedToken(msk, lstrip=False, rstrip=False) if isinstance(msk, str) else msk
cls = AddedToken(cls, lstrip=False, rstrip=False) if isinstance(cls, str) else cls
sep = AddedToken(sep, lstrip=False, rstrip=False) if isinstance(sep, str) else sep
super().__init__(
pad=pad,
bos=bos,
eos=eos,
msk=msk,
cls=cls,
sep=sep,
model_max_length=model_max_length,
**kw,
)
self._utf_vocab_size = 2**8
self.special_tokens_encoder = {
self.pad: 0,
self.bos: 1,
self.eos: 2,
self.msk: 3,
self.cls: 4,
self.sep: 5,
}
self._num_special_tokens = len(self.special_tokens_encoder)
self.special_tokens_decoder = {v: k for k, v in self.special_tokens_encoder.items()}
def get_vocab(self):
vocab = self.special_tokens_encoder.copy()
vocab.update(self.added_tokens_encoder)
for i in range(self._utf_vocab_size):
token = chr(i)
vocab[token] = i + len(self.special_tokens_encoder)
return vocab
@property
def s_vocab(self):
return self._utf_vocab_size + self._num_special_tokens
def get_special_tokens_mask(
self,
toks_0,
toks_1=None,
has_specials=False,
):
if has_specials:
return super().get_special_tokens_mask(toks_0=toks_0, toks_1=toks_1, has_specials=True)
if toks_1 is None:
return [1] + [0] * len(toks_0) + [1]
return [1] + ([0] * len(toks_0)) + [1] + ([0] * len(toks_1)) + [1]
def build_inputs_with_special_tokens(self, toks_0, toks_1=None):
if toks_1 is None:
return [self.cls_token_id] + toks_0 + [self.sep_token_id]
else:
return [self.cls_token_id] + toks_0 + [self.sep_token_id] + toks_1 + [self.sep_token_id]
def _tokenize(self, text):
tokens = [chr(i) for i in text.encode("utf-8")]
return tokens
def _convert_token_to_id(self, token):
if token in self.special_tokens_encoder:
token_id = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
token_id = self.added_tokens_encoder[token]
elif len(token) != 1:
token_id = self.unk_token_id
else:
token_id = ord(token) + self._num_special_tokens
return token_id
def _convert_id_to_token(self, index):
if index in self.special_tokens_decoder:
token = self.special_tokens_decoder[index]
elif index in self.added_tokens_decoder:
token = self.added_tokens_decoder[index]
else:
token = chr(index - self._num_special_tokens)
return token
def convert_tokens_to_string(self, tokens):
bstring = b""
for token in tokens:
if token in self.special_tokens_decoder:
tok_string = self.special_tokens_decoder[token].encode("utf-8")
elif token in self.added_tokens_decoder:
tok_string = self.special_tokens_decoder[token].encode("utf-8")
elif token in self.special_tokens_encoder:
tok_string = token.encode("utf-8")
elif token in self.added_tokens_encoder:
tok_string = token.encode("utf-8")
else:
tok_string = bytes([ord(token)])
bstring += tok_string
string = bstring.decode("utf-8", errors="replace")
return string
def save_vocabulary(self, dir, pre=None):
return ()
| StarcoderdataPython |
1978849 | <filename>open_discussions_api/utils_test.py<gh_stars>0
"""Tests for api utils"""
import jwt
import pytest
from open_discussions_api.utils import get_token
def test_get_token():
"""Test that get_token encodes a token decodable by the secret"""
token = get_token(
'secret',
'username',
['<PASSWORD>'],
expires_delta=100,
extra_payload=dict(auth_url='auth', session_url='session')
)
decoded = jwt.decode(token, 'secret', algorithms=['HS256'])
assert decoded['username'] == 'username'
assert decoded['roles'] == ['test_role']
assert decoded['exp'] == decoded['orig_iat'] + 100
assert decoded['auth_url'] == 'auth'
assert decoded['session_url'] == 'session'
def test_get_token_error():
"""Test that get_token raises an ArgumentError if a bad extra_payload arg is passed"""
with pytest.raises(AttributeError):
get_token(
'secret',
'username',
['<PASSWORD>_<PASSWORD>'],
expires_delta=100,
extra_payload=dict(auth_url='auth', username='username') # username is a default item in the payload
)
| StarcoderdataPython |
6549839 | #------------------------------------------------------------------------------#
# hsd: package for manipulating HSD-formatted data #
# Copyright (C) 2011 - 2020 DFTB+ developers group #
# #
# See the LICENSE file for terms of usage and distribution. #
#------------------------------------------------------------------------------#
#
"""
Implements common functionalities for the HSD package
"""
class HsdException(Exception):
"""Base class for exceptions in the HSD package."""
pass
class HsdQueryError(HsdException):
"""Base class for errors detected by the HsdQuery object.
Attributes:
filename: Name of the file where error occured (or empty string).
line: Line where the error occurred (or -1).
tag: Name of the tag with the error (or empty string).
"""
def __init__(self, msg="", node=None):
"""Initializes the exception.
Args:
msg: Error message
node: HSD element where error occured (optional).
"""
super().__init__(msg)
if node is not None:
self.tag = node.gethsd(HSDATTR_TAG, node.tag)
self.file = node.gethsd(HSDATTR_FILE, -1)
self.line = node.gethsd(HSDATTR_LINE, None)
else:
self.tag = ""
self.file = -1
self.line = None
class HsdParserError(HsdException):
"""Base class for parser related errors."""
pass
def unquote(txt):
"""Giving string without quotes if enclosed in those."""
if len(txt) >= 2 and (txt[0] in "\"'") and txt[-1] == txt[0]:
return txt[1:-1]
return txt
# Name for default attribute (when attribute name is not specified)
DEFAULT_ATTRIBUTE = "attribute"
HSDATTR_PROC = "processed"
HSDATTR_EQUAL = "equal"
HSDATTR_FILE = "file"
HSDATTR_LINE = "line"
HSDATTR_TAG = "tag"
| StarcoderdataPython |
4935546 | <reponame>wilsaj/flask-admin-old
import sys
from flask import Flask, redirect
from flask.ext import admin
from flask.ext.admin.datastore.sqlalchemy import SQLAlchemyDatastore
from sqlalchemy import create_engine, Table
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Text, String, Float, Time, Enum
from sqlalchemy.orm import relationship
from sqlalchemy.schema import ForeignKey
Base = declarative_base()
# ----------------------------------------------------------------------
# Association tables
# ----------------------------------------------------------------------
course_student_association_table = Table(
'course_student_association',
Base.metadata,
Column('student_id', Integer, ForeignKey('student.id')),
Column('course_id', Integer, ForeignKey('course.id')))
# ----------------------------------------------------------------------
# Models
# ----------------------------------------------------------------------
class Course(Base):
__tablename__ = 'course'
id = Column(Integer, primary_key=True)
subject = Column(String)
teacher_id = Column(Integer, ForeignKey('teacher.id'), nullable=False)
start_time = Column(Time)
end_time = Column(Time)
teacher = relationship('Teacher', backref='courses')
students = relationship('Student',
secondary=course_student_association_table,
backref='courses')
def __repr__(self):
return self.subject
class Student(Base):
__tablename__ = 'student'
id = Column(Integer, primary_key=True)
name = Column(String(120), unique=True)
def __repr__(self):
return self.name
class Teacher(Base):
__tablename__ = 'teacher'
id = Column(Integer, primary_key=True)
name = Column(String(120), unique=True)
def __repr__(self):
return self.name
def create_app(database_uri='sqlite://'):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'not secure'
app.engine = create_engine(database_uri, convert_unicode=True)
app.db_session = scoped_session(sessionmaker(
autocommit=False, autoflush=False, bind=app.engine))
datastore1 = SQLAlchemyDatastore(
(Student, Teacher), app.db_session)
admin_blueprint1 = admin.create_admin_blueprint(
datastore1, name='admin1')
datastore2 = SQLAlchemyDatastore(
(Course,), app.db_session)
admin_blueprint2 = admin.create_admin_blueprint(
datastore2, name='admin2')
app.register_blueprint(admin_blueprint1, url_prefix='/admin1')
app.register_blueprint(admin_blueprint2, url_prefix='/admin2')
Base.metadata.create_all(bind=app.engine)
return app
if __name__ == '__main__':
app = create_app('sqlite:///simple.db')
app.run(debug=True)
| StarcoderdataPython |
6556940 | <gh_stars>1-10
# Generated by Django 3.1.6 on 2021-02-20 01:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cityid', models.IntegerField(db_column='CityID')),
('cityname', models.CharField(db_column='CityName', max_length=128)),
('countrycode', models.IntegerField(db_column='CountryCode', max_length=128)),
('countryid', models.IntegerField(blank=True, db_column='CountryID', null=True)),
],
options={
'db_table': 'City',
'managed': False,
},
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('countryid', models.IntegerField(db_column='CountryID')),
('countryname', models.CharField(db_column='CountryName', max_length=128)),
('countrycode', models.CharField(db_column='CountryCode', max_length=128)),
],
options={
'db_table': 'Country',
'managed': False,
},
),
]
| StarcoderdataPython |
364548 | <filename>main.py
from torchvision.transforms import transforms
#from RandAugment import RandAugment
import RandAugment
normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
# Add RandAugment with N, M(hyperparameter)
N = 3
M = 1
transform_train.transforms.insert(0, RandAugment.RandAugment(N, M))
RandAugment.augmentations.ShearX
| StarcoderdataPython |
1799832 | <reponame>titikid/mmsegmentation<gh_stars>1-10
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class publaynet_split1Dataset(CustomDataset):
"""table_structure1
"""
# CLASSES = ('title', 'text', 'figure', 'table', 'list')
# PALETTE = [[50, 255, 0],[255, 0, 0],[0, 255, 255],[255, 192, 203],[100, 0, 255]]
CLASSES = ('background','text', 'table', 'figure')
PALETTE = [[120, 120, 120],[255, 0, 0],[0, 255, 0],[0, 0, 255]]
def __init__(self, **kwargs):
super(publaynet_split1Dataset, self).__init__(
img_suffix='.jpg',
seg_map_suffix='.png',
reduce_zero_label=False,
**kwargs) | StarcoderdataPython |
12808500 | <filename>aiotunnel/tunnel.py<gh_stars>10-100
# BSD 3-Clause License
#
# Copyright (c) 2018, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import ssl
import asyncio
import logging
import aiohttp
from .protocol import LocalTunnelProtocol
logger = logging.getLogger(__name__)
async def create_endpoint(url, client_addr, target_addr, ssl_context=None):
"""Create a server endpoint TCP.
Args:
-----
:type url: str
:param url: The URL of the server part to communicate with using HTTP calls
:type client_addr: tuple
:param client_addr: A tuple (host, port) to expose a port on an address
:type target_addr: tuple
:param target_addr: A tuple (host, port) to expose a address:port on the server side in order to
let clients connection to the tunnel.
"""
# Get a reference to the event loop as we plan to use
# low-level APIs.
host, port = client_addr
target_host, target_port = target_addr
remote_host = target_host + ':' + str(target_port)
scheme = 'HTTPS' if ssl_context else 'HTTP'
logger.info("Listening on port %s", port)
logger.info("Opening %s connection to %s:%s", scheme, target_host, target_port)
loop = asyncio.get_running_loop()
# Start the server and serve forever
server = await loop.create_server(
lambda: LocalTunnelProtocol(remote_host, url, ssl_context),
host, port
)
async with server:
await server.serve_forever()
async def open_connection(url, client_addr, target_addr, ssl_context=None):
"""Open a TCP connection
Args:
-----
:type url: str
:param url: The URL of the server part to communicate with using HTTP calls
:type client_addr: tuple
:param client_addr: A tuple (host, port) to expose a port on an address
:type target_addr: tuple
:param target_addr: A tuple (host, port) to expose a address:port on the server side in order to
let clients connection to the tunnel.
"""
remote = f'{client_addr[0]}:{client_addr[1]}'
host, port = target_addr
scheme = 'HTTPS' if ssl_context else 'HTTP'
logger.info("Opening a TCP connection to %s:%s (target)", host, port)
logger.info("Opening %s connection to %s (source)", scheme, remote)
loop = asyncio.get_running_loop()
on_con_lost = loop.create_future()
try:
transport, _ = await loop.create_connection(
lambda: LocalTunnelProtocol(remote, url, on_con_lost, ssl_context),
host, port
)
await on_con_lost
except Exception as e:
logger.critical("Unable to connect: %s", str(e))
finally:
transport.close()
def start_tunnel(url, client_addr, target_addr,
reverse=False, cafile=None, certfile=None, keyfile=None):
ssl_context = None
if cafile:
ssl_context = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH, cafile=cafile)
ssl_context.load_cert_chain(certfile, keyfile)
try:
if not reverse:
asyncio.run(create_endpoint(url, client_addr, target_addr, ssl_context))
else:
asyncio.run(open_connection(url, client_addr, target_addr, ssl_context))
except:
pass
| StarcoderdataPython |
3319026 | <filename>Idat_Python2022/Semana_6/PRACTICA/1_EJERCICIO.py<gh_stars>0
"""Analizar los siguientes ejercicios de condiciones, representarlos mediante algoritmos en Python. """
#1.-Leer 2 números; si son iguales que los multiplique, si el primero es mayor que el segundo que los reste y si no que los sume.
Numero01=int(input("Ingresa El N° 01="))
Numero02=int(input("Ingresa El N° 02="))
if Numero01==Numero02 :
resultado=Numero01*Numero02
elif Numero01 >Numero02:
resultado=Numero01-Numero02
else:
resultado=Numero01+Numero02
print("Resultado de comprativo: ", resultado)
| StarcoderdataPython |
3531796 | class Solution:
def alienOrder(self, words: List[str]) -> str:
child = collections.defaultdict(set)
parent = collections.defaultdict(int)
chars = set()
for w1,w2 in zip(words[:-1], words[1:]):
if len(w1)>len(w2) and w1[:len(w2)] == w2: ##invalid case['abc', 'ab']
return ''
for c1, c2 in zip(w1,w2):
if c1 != c2:
if c1 in child[c2]: #invalid case['x', 'z', 'x']
return ''
if c2 not in child[c1]:
child[c1].add(c2)
parent[c2] += 1
break
q = collections.deque()
for w in words:
for c in w:
chars.add(c)
for char in chars:
if parent[char] == 0:
del parent[char]
q.append(char)
res = ''
while q:
char = q.popleft()
res += char
for ch in child[char]:
parent[ch] -= 1
if parent[ch] == 0:
q.append(ch)
del parent[ch]
return res if len(res) == len(chars) else '' | StarcoderdataPython |
8027244 | <gh_stars>10-100
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019, Nokia'
__email__ = '<EMAIL>'
import pytest
from moler.exceptions import CommandFailure
from moler.cmd.unix.unzip import Unzip
def test_unzip_returns_fail(buffer_connection):
"""
Test if proper alarm is raised when unzip tries to extract the invalid file.
:param buffer_connection: Simulation of a real connection with a device.
:return: Nothing.
"""
command_output = """
ute@debdev:~$ unzip test.zip
unzip: cannot find or open test.zip, test.zip.zip or test.zip.ZIP.
ute@debdev:~$
"""
buffer_connection.remote_inject_response([command_output])
cmd = Unzip(connection=buffer_connection.moler_connection, zip_file="test.zip")
with pytest.raises(CommandFailure):
cmd()
def test_unzip_forbidden_to_overwrite(buffer_connection):
"""
Test if proper alarm is raised when unzip is not allowed to overwrite the existing file.
:param buffer_connection: Simulation of a real connection with a device.
:return: Nothing.
"""
command_output = """
host:~ # unzip test.zip
Archive: test.zip
replace test.txt? [y]es, [n]o, [A]ll, [N]one, [r]ename: N
host:~ # """
buffer_connection.remote_inject_response([command_output])
cmd = Unzip(connection=buffer_connection.moler_connection, zip_file="test.zip")
with pytest.raises(CommandFailure):
cmd()
def test_unzip_filename_not_matched(buffer_connection):
"""
Test if exception is raised when a directory cannot be created.
:param buffer_connection: Simulation of a real connection with a device.
:return: Nothing.
"""
command_output = """
host:~ # unzip test.zip -d test/test
Archive: test.zip
checkdir: cannot create extraction directory: test/test
No such file or directory
host:~ # """
buffer_connection.remote_inject_response([command_output])
cmd = Unzip(connection=buffer_connection.moler_connection, zip_file="test.zip", extract_dir="test/test")
with pytest.raises(CommandFailure):
cmd()
| StarcoderdataPython |
11360373 | from django.db import models
from django.urls import reverse
from django.utils import timezone
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFill
from uuslug import uuslug
class Tag(models.Model):
name = models.CharField(max_length=48)
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=48)
rank = models.PositiveSmallIntegerField(default=0)
class Meta:
verbose_name_plural = 'categories'
ordering = ['rank']
def __str__(self):
return self.name
class PostQuerySet(models.QuerySet):
def published(self):
return self.active().filter(pub_date__lte=timezone.now())
def active(self):
return self.filter(is_active=True)
class Post(models.Model):
title = models.CharField(max_length=100)
summary = models.TextField()
body = models.TextField()
category = models.ForeignKey(Category)
tags = models.ManyToManyField(Tag, blank=True)
slug = models.SlugField(unique_for_date='pub_date', editable=False)
pub_date = models.DateTimeField()
views = models.IntegerField(default=0)
author = models.CharField(max_length=48)
is_active = models.BooleanField(default=False)
image = models.ImageField(upload_to='blog')
image_thumbnail = ImageSpecField(source='image',
processors=[ResizeToFill(400, 205)],
format='JPEG',
options={'quality': 60})
objects = PostQuerySet.as_manager()
class Meta:
ordering = ('-pub_date',)
get_latest_by = 'pub_date'
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = uuslug(self.title, instance=self)
super().save(*args, **kwargs)
def get_absolute_url(self):
kwargs = {
'year': self.pub_date.year,
'month': self.pub_date.strftime('%b').lower(),
'day': self.pub_date.strftime('%d').lower(),
'slug': self.slug,
}
return reverse('blog:detail', kwargs=kwargs)
def increase_views(self):
self.views += 1
self.save(update_fields=['views'])
def is_published(self):
""" Return True if the post is publicly accessible. """
return self.is_active and self.pub_date <= timezone.now()
class Comment(models.Model):
author = models.CharField(max_length=48)
text = models.TextField(max_length=2000)
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
ip_address = models.CharField(max_length=24, default='')
post = models.ForeignKey(Post, related_name='comments')
class Meta(object):
db_table = 'blog_comments'
def get_ding_url(self):
return
| StarcoderdataPython |
11396213 | from base64 import b64encode
import googleapiclient.discovery
from oauth2client.client import GoogleCredentials
# Change this values to match your project
IMAGE_FILE = "text.png"
CREDENTIALS_FILE = "credentials.json"
# Connect to the Google Cloud-ML Service
credentials = GoogleCredentials.from_stream(CREDENTIALS_FILE)
service = googleapiclient.discovery.build('vision', 'v1', credentials=credentials)
# Read file and convert it to a base64 encoding
with open(IMAGE_FILE, "rb") as f:
image_data = f.read()
encoded_image_data = b64encode(image_data).decode('UTF-8')
# Create the request object for the Google Vision API
batch_request = [{
'image': {
'content': encoded_image_data
},
'features': [
{
'type': 'TEXT_DETECTION'
}
]
}]
request = service.images().annotate(body={'requests': batch_request})
# Send the request to Google
response = request.execute()
# Check for errors
if 'error' in response:
raise RuntimeError(response['error'])
# Print the results
extracted_texts = response['responses'][0]['textAnnotations']
# Print the first piece of text found in the image
extracted_text = extracted_texts[0]
print(extracted_text['description'])
# Print the location where the text was detected
print(extracted_text['boundingPoly']) | StarcoderdataPython |
9757240 | from datetime import datetime
from typing import Optional
import mongoengine as ME # type: ignore[import]
from mongoengine.queryset.visitor import Q # type: ignore[import]
class ProductBooking(ME.Document):
"""Model for storing product booking information
"""
# NOTE: For a realistic app you would want to capture user ID
# user_id: str
product_code = ME.StringField()
timestamp = ME.DateTimeField(default=datetime.utcnow)
from_date = ME.DateTimeField()
to_date = ME.DateTimeField()
meta = {
'indexes': [
'timestamp',
'from_date',
'to_date',
]
}
class ProductReturn(ME.Document):
"""Model for storing product return information
"""
# NOTE: For a realistic app you would want to capture user ID
# user_id: str
booking_id = ME.StringField(unique=True)
product_code = ME.StringField()
timestamp = ME.DateTimeField(default=datetime.utcnow)
total_fee = ME.FloatField()
meta = {
'indexes': [
'timestamp',
'total_fee',
]
}
# -----------------------------------------------------------------------------
# CRUD Operations =============================================================
# NOTE: As project complexity increases DB query functions should be
# moved in to a separate `crud` package which implements all
# relavant CRUD operations on resources available through the API
def create_booking(
product_code,
from_date,
to_date,
timestamp=datetime.utcnow()
) -> str: # noqa E125
booking_doc = ProductBooking(
product_code=product_code,
from_date=from_date,
to_date=to_date,
timestamp=timestamp
).save()
return f'{booking_doc.pk}'
def get_booking(booking_id) -> Optional[ProductBooking]:
return ProductBooking.objects(pk=booking_id).first()
def is_product_booked(
product_code: str,
from_date: datetime,
to_date: datetime,
) -> bool: # noqa E125
"""
"""
# BR0: Booking request start date
# BR1: Booking request end date
# B0: Existing booking start date
# B1: Existing booking end date
# Find an overlapping existing booking
bookings = ProductBooking.objects(
# Case 0: [B0----[BR0----(B1)]----BR1]
(Q(to_date__lte=to_date) & Q(to_date__gte=from_date)) |
# Case 1: [BR0----[(B0)----BR1]----B1]
(Q(from_date__lte=to_date) & Q(from_date__gte=from_date)) |
# Case 2: [(B0)----[BR0----BR1]----(B1)]
(Q(from_date__lte=from_date) & Q(to_date__gte=to_date))
).all()
if bookings:
booking_ids = [str(x.pk) for x in bookings]
return_docs = ProductReturn.objects(booking_id__in=booking_ids).all()
if len(bookings) > len(return_docs):
return True
else:
return False
else:
return False
def create_return(
booking_id: str,
product_code: str,
total_fee: float,
timestamp: datetime = datetime.utcnow()
) -> str: # noqa E125
return_doc = ProductReturn(
booking_id=booking_id,
product_code=product_code,
total_fee=total_fee,
timestamp=timestamp
).save()
return f'{return_doc.pk}'
def get_return(booking_id) -> Optional[ProductReturn]:
return ProductReturn.objects(booking_id=booking_id).first()
| StarcoderdataPython |
8132307 | import setuptools
from setuptools import setup
LONG_DESCRIPTION = \
'''Converts nifti files (raw and mask) and mrtrix tck files to DICOM format readable by Brainlab surgical planning and navigation systems. Tck files are converted to 3D objects that can be manipulated by Brainlab tools. Label images are converted to a dicom segmentation format and can also be manipulated as 3D objects in Brainlab'''
setup(
name='karawun',
version='0.2.0.2',
packages=['karawun'],
python_requires='>=3.6',
package_dir={'karawun': 'karawun'},
url='',
license='',
author='<NAME>',
author_email='<EMAIL>',
description=('DICOM image, segmgmentation image and fibre object converter'),
long_description=(LONG_DESCRIPTION),
install_requires=["numpy>=1.13.0",
"pydicom==1.3",
"SimpleITK==1.2.0"],
entry_points={'console_scripts': ['importTractography = karawun.commandline:import_tractography_cl']}
)
| StarcoderdataPython |
6646065 | <filename>LeetCode/top_interview_easy/array/array_08.py
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
# assign non-zero elements in increasing order
# remember the number of non-zero elements
non_zero = 0
for i in nums:
if i != 0:
nums[non_zero] = i
non_zero +=1
# assign zero elements after non-zero elements
for i in range(len(nums) - non_zero):
nums[-1-i] = 0
| StarcoderdataPython |
8009019 | <filename>tests/test_scaler.py
from unittest import TestCase
from fffw.scaler import Scaler
class ScalerTestCase(TestCase):
""" Tests for scaling helpers class."""
def test_scaler(self):
""" Scaler smoke test and feature demo
* Source video 1280x960, square pixels
* Scaled to 640x480 then cropped on top/bottom to 640x360
* Scaled to 480x360 to fit to 640x360
"""
scaler = Scaler((1280, 960), accuracy=1)
fit = scaler.scale_fit((640, 360))
crop, fields = scaler.scale_crop((640, 360))
self.assertTupleEqual(fit.source_size, (480, 360))
self.assertTupleEqual(crop.source_size, (640, 480))
self.assertTupleEqual(fields, (0, 120, 1280, 720))
def test_accuracy(self):
""" Resulting dimensions are dividable to 16."""
scaler = Scaler((1280, 720), accuracy=16)
fit = scaler.scale_fit((640, 360))
self.assertTupleEqual(fit.source_size, (640, 352))
def test_rotation(self):
""" Rotation handling."""
scaler = Scaler((1280, 720), rotation=90)
fit = scaler.scale_fit((360, 640))
self.assertTupleEqual(fit.source_size, (360, 640))
def test_pixel_aspect_ratip(self):
""" Non-square pixels support."""
scaler = Scaler((720, 720), par=16./9.)
fit = scaler.scale_fit((640, 360))
self.assertTupleEqual(fit.source_size, (640, 360))
| StarcoderdataPython |
4914076 | """GaussianFeaturesWithKmenasテストケース
Copyright (c) 2020, <NAME>, All rights reserved.
"""
import unittest
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from numpy.testing import assert_array_equal
from gauss_kmeans import GaussianFeaturesWithKmeans
class GaussianFeaturesWithKMeansTestCase(unittest.TestCase):
"""GaussianFeaturesWithKMeansのテストケース
GaussianFeaturesWithKMeansクラスのユニットテストを定義
"""
def test_construction(self):
"""コンストラクタのテスト"""
# 設定
M = 10
# ターゲットのインスタンス化
phi = GaussianFeaturesWithKmeans(nbfs=M)
# 期待値
expctd_nbfs = M
expctd_width_factor = 1.0
expctd_prekmeans = True
# 実現値
actual_nbfs = phi.nbfs
actual_width_factor = phi.width_factor
actual_prekmeans = phi.prekmeans
# 評価
self.assertEqual(actual_nbfs,expctd_nbfs)
self.assertEqual(actual_width_factor,expctd_width_factor)
self.assertEqual(actual_prekmeans,expctd_prekmeans)
def test_x1d_wo_kmeans(self):
"""単変量のテスト"""
# 設定
M = 10
nSamples = 50
rng = np.random.RandomState(1)
x = 10 * rng.rand(nSamples)
X = x.reshape(-1,1)
# ターゲットのインスタンス化
phi = GaussianFeaturesWithKmeans(nbfs=M,prekmeans=False)
phi.fit(X)
# 期待値
expctd_nbfs = M
expctd_width_factor = 1.0
expctd_prekmeans = False
expctd_centers = np.linspace(X.min(), X.max(), M)
expctd_widths = (expctd_centers[1] - expctd_centers[0])*np.ones(M)
# 実現値
actual_nbfs = phi.nbfs
actual_width_factor = phi.width_factor
actual_prekmeans = phi.prekmeans
actual_centers = phi.centers_
actual_widths = phi.widths_
# 評価
self.assertEqual(actual_nbfs,expctd_nbfs)
self.assertEqual(actual_width_factor,expctd_width_factor)
self.assertEqual(actual_prekmeans,expctd_prekmeans)
assert_array_equal(actual_widths,expctd_widths,'widths')
assert_array_equal(actual_centers,expctd_centers,'centers')
def test_x1d_w_kmeans(self):
"""K平均法前処理のテスト"""
# 設定
M = 10
nSamples = 50
rng = np.random.RandomState(1)
x = 10 * rng.rand(nSamples)
y = np.sin(x) + 0.1 * rng.randn(nSamples)
X = x.reshape(-1,1)
# ターゲットのインスタンス化
phi = GaussianFeaturesWithKmeans(nbfs=M,prekmeans=True)
phi.fit(X)
# 期待値
scaler = StandardScaler()
kmeans = KMeans(n_clusters=M,random_state=0)
X = scaler.fit_transform(X)
kmeans.fit(X)
expctd_nbfs = M
expctd_width_factor = 1.0
expctd_prekmeans = True
expctd_centers = scaler.inverse_transform(kmeans.cluster_centers_).reshape(-1,)
labels = kmeans.predict(X).reshape(-1,1)
clusters = pd.DataFrame(np.concatenate((labels,X),axis=1)).groupby([0])
expctd_widths = (scaler.scale_ * clusters.std(ddof=0)).to_numpy().reshape(-1,)
# 実現値
actual_nbfs = phi.nbfs
actual_width_factor = phi.width_factor
actual_prekmeans = phi.prekmeans
actual_centers = phi.centers_
actual_widths = phi.widths_
# 評価
self.assertEqual(actual_nbfs,expctd_nbfs)
self.assertEqual(actual_width_factor,expctd_width_factor)
self.assertEqual(actual_prekmeans,expctd_prekmeans)
assert_array_equal(actual_widths,expctd_widths,'widths')
assert_array_equal(actual_centers,expctd_centers,'centers')
def test_x2d_w_kmeans(self):
"""K平均法前処理,二変量のテスト"""
# 設定
M = 10
nSamples = 50
rng = np.random.RandomState(1)
x1 = 10 * rng.rand(nSamples)
x2 = 0.1 * rng.rand(nSamples)
X = np.concatenate((x1.reshape(-1,1),x2.reshape(-1,1)),axis=1)
y = np.sin(x1) + np.cos(x2) + 0.1 * rng.randn(nSamples)
# ターゲットのインスタンス化
phi = GaussianFeaturesWithKmeans(nbfs=M,prekmeans=True)
phi.fit(X)
# 期待値
scaler = StandardScaler()
kmeans = KMeans(n_clusters=M,random_state=0)
X = scaler.fit_transform(X)
kmeans.fit(X)
expctd_nbfs = M
expctd_width_factor = 1.0
expctd_prekmeans = True
expctd_centers = scaler.inverse_transform(kmeans.cluster_centers_).reshape(-1,2,1).transpose(2,1,0)
labels = kmeans.predict(X).reshape(-1,1)
clusters = pd.DataFrame(np.concatenate((labels,X),axis=1)).groupby([0])
expctd_widths = (scaler.scale_ * clusters.std(ddof=0)).to_numpy().reshape(-1,2,1).transpose(2,1,0)
# 実現値
actual_nbfs = phi.nbfs
actual_width_factor = phi.width_factor
actual_prekmeans = phi.prekmeans
actual_centers = phi.centers_
actual_widths = phi.widths_
# 評価
self.assertEqual(actual_nbfs,expctd_nbfs)
self.assertEqual(actual_width_factor,expctd_width_factor)
self.assertEqual(actual_prekmeans,expctd_prekmeans)
assert_array_equal(actual_widths,expctd_widths,'widths')
assert_array_equal(actual_centers,expctd_centers,'centers')
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
11309642 | import numpy as np
from .wrap.array_data import ArrayData
from .wrap import array_ops
from . import elementwise
from . import base
from . import helpers
class ndarray(object):
def __init__(self, shape, dtype=None, np_data=None, array_data=None,
array_owner=None):
shape = helpers.require_iterable(shape)
if shape == ():
shape = (1,)
self.shape = shape
self.transposed = False
self.isbool = False
if dtype is None:
if np_data is None:
dtype = np.dtype(base.float_)
else:
dtype = np_data.dtype
if dtype == np.dtype('float64'):
dtype = np.dtype(base.float_)
elif dtype == np.dtype('int64'):
dtype = np.dtype(base.int_)
elif dtype == np.dtype('bool'):
dtype = np.dtype(base.bool_)
self.isbool = True
else:
dtype = np.dtype(dtype)
if np_data is not None:
np_data = np.require(np_data, dtype=dtype, requirements='C')
if array_data is None:
self._data = ArrayData(self.size, dtype, np_data)
else:
self._data = array_data
def __array__(self):
np_array = np.empty(self.shape, dtype=self.dtype)
self._data.to_numpy(np_array)
if self.isbool:
np_array = np_array.astype(np.dtype('bool'))
return np_array
def __str__(self):
return self.__array__().__str__()
def __repr__(self):
return self.__array__().__repr__()
def _same_array(self, other):
return self.data == other.data
@property
def data(self):
return self._data.data
@property
def dtype(self):
return self._data.dtype
@property
def itemsize(self):
return self._data.dtype.itemsize
@property
def nbytes(self):
return self.size*self.itemsize
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
return helpers.prod(self.shape)
@property
def T(self):
return base.transpose(self)
def view(self):
return ndarray(self.shape, self.dtype, None, self._data)
def fill(self, value):
array_ops._fill(self._data, self.size, value)
def __len__(self):
return self.shape[0]
def __add__(self, other):
return elementwise.add(self, other)
def __radd__(self, other):
return elementwise.add(other, self)
def __iadd__(self, other):
return elementwise.add(self, other, self)
def __sub__(self, other):
return elementwise.subtract(self, other)
def __rsub__(self, other):
return elementwise.subtract(other, self)
def __isub__(self, other):
return elementwise.subtract(self, other, self)
def __mul__(self, other):
return elementwise.multiply(self, other)
def __rmul__(self, other):
return elementwise.multiply(other, self)
def __imul__(self, other):
return elementwise.multiply(self, other, self)
def __div__(self, other):
return elementwise.divide(self, other)
def __rdiv__(self, other):
return elementwise.divide(other, self)
def __idiv__(self, other):
return elementwise.divide(self, other, self)
def __truediv__(self, other):
return elementwise.divide(self, other)
def __rtruediv__(self, other):
return elementwise.divide(other, self)
def __itruediv__(self, other):
return elementwise.divide(self, other, self)
def __pow__(self, other):
return elementwise.power(self, other)
def __rpow__(self, other):
return elementwise.power(other, self)
def __ipow__(self, other):
return elementwise.power(self, other, self)
def __eq__(self, other):
return elementwise.equal(self, other)
def __gt__(self, other):
return elementwise.greater(self, other)
def __ge__(self, other):
return elementwise.greater_equal(self, other)
def __lt__(self, other):
return elementwise.less(self, other)
def __le__(self, other):
return elementwise.less_equal(self, other)
def __ne__(self, other):
return elementwise.not_equal(self, other)
def __neg__(self):
return elementwise.negative(self)
def __ineg__(self):
return elementwise.negative(self, self)
def __getitem__(self, indices):
if isinstance(indices, int):
# Speedup case with a single index
view_shape = self.shape[1:]
view_size = helpers.prod(view_shape)
offset = indices * view_size
data_view = ArrayData(view_size, self.dtype, owner=self._data,
offset=offset)
return ndarray(view_shape, self.dtype, np_data=None,
array_data=data_view)
elif isinstance(indices, slice):
indices = (indices,)
# Standardize indices to a list of slices
elif len(indices) > len(self.shape):
raise IndexError('too many indices for array')
view_shape = []
rest_must_be_contiguous = False
offset = 0
for i, dim in enumerate(self.shape):
start = 0
stop = dim
append_dim = True
if i < len(indices):
idx = indices[i]
if isinstance(idx, int):
append_dim = False
start = idx
stop = idx+1
elif isinstance(idx, slice):
if idx.start is not None:
start = idx.start
if idx.stop is not None:
stop = idx.stop
if idx.step is not None:
raise NotImplementedError('only contiguous indices '
+ 'are supported')
elif idx is Ellipsis:
diff = self.ndim - len(indices)
indices = indices[:i] + [slice(None)]*diff + indices[i:]
return self[indices]
else:
raise IndexError('only integers, slices and ellipsis are '
+ 'valid indices')
view_dim = stop-start
offset = offset * dim + start
if append_dim:
view_shape.append(view_dim)
if rest_must_be_contiguous and view_dim < dim:
raise NotImplementedError('only contiguous indices are '
+ 'supported')
if view_dim > 1:
rest_must_be_contiguous = True
view_shape = tuple(view_shape)
view_size = helpers.prod(view_shape)
# Construct view
data_view = ArrayData(view_size, self.dtype, owner=self._data,
offset=offset)
return ndarray(view_shape, self.dtype, np_data=None,
array_data=data_view)
def __setitem__(self, indices, c):
view = self.__getitem__(indices)
base.copyto(view, c)
def array(object, dtype=None, copy=True):
np_array = np.array(object)
return ndarray(np_array.shape, np_data=np_array)
def empty(shape, dtype=None):
return ndarray(shape, dtype=dtype)
def empty_like(a, dtype=None):
if not isinstance(a, (np.ndarray, ndarray)):
a = np.array(a)
return ndarray(a.shape, dtype=a.dtype)
def ones(shape, dtype=None):
return array(np.ones(shape, dtype=dtype))
def ones_like(a, dtype=None):
if not isinstance(a, (np.ndarray, ndarray)):
a = np.array(a)
return array(np.ones_like(a, dtype=dtype))
def zeros(shape, dtype=None):
a = empty(shape, dtype)
a.fill(0)
return a
def zeros_like(a, dtype=None):
if not isinstance(a, (np.ndarray, ndarray)):
a = np.array(a)
return array(np.zeros_like(a, dtype=dtype))
| StarcoderdataPython |
11237895 | """ Imagine you are constructing roads, you need to figure out how many of them are connected"""
# Interesting practical application
class DisjointSetUnionFast:
def __init__(self, n):
self.parent = list(range(n))
self.size = [1] * n
self.num_sets = n
def find(self, a):
acopy = a
while a != self.parent[a]:
a = self.parent[a]
while acopy != a:
self.parent[acopy], acopy = a, self.parent[acopy]
return a
def union(self, a, b):
a, b = self.find(a), self.find(b)
if a != b:
if self.size[a] < self.size[b]:
a, b = b, a
self.num_sets -= 1
self.parent[b] = a
self.size[a] += self.size[b]
def set_size(self, a):
return self.size[self.find(a)]
def __len__(self):
return self.num_sets
if __name__ == '__main__':
cities, roads = list(map(int, input().split()))
i = 0
dsu = DisjointSetUnionFast(cities)
while i < roads:
u, v = list(map(int, input().split()))
u = u - 1
v = v - 1
if dsu.find(u) != dsu.find(v):
dsu.union(u, v)
print(len(dsu), max(dsu.size))
i += 1 | StarcoderdataPython |
6648958 | <reponame>matthiasrohmer/grow
"""Grow local development server."""
import logging
import mimetypes
import os
import re
import sys
import traceback
import urllib
import jinja2
import webob
# NOTE: exc imported directly, webob.exc doesn't work when frozen.
from webob import exc as webob_exc
from werkzeug import routing
from werkzeug import utils as werkzeug_utils
from werkzeug import wrappers
from werkzeug import serving
from werkzeug import wsgi
from grow.common import config
from grow.common import utils
from grow.routing import router
from grow.pods import errors
from grow.pods import ui
from grow.server import api
class Request(wrappers.BaseRequest):
pass
class ReRouteRequest(webob.Request):
pass
class Response(webob.Response):
default_conditional_response = True
# Use grow's logger instead of werkzeug's default.
class RequestHandler(serving.WSGIRequestHandler):
@property
def server_version(self):
return 'Grow/{}'.format(config.VERSION)
def log(self, *args, **kwargs):
pass
def serve_console(pod, request, values):
kwargs = {'pod': pod}
values_to_templates = {
'content': 'collections.html',
'preprocessors': 'preprocessors.html',
'translations': 'catalogs.html',
}
value = values.get('page')
template_path = values_to_templates.get(value, 'main.html')
if value == 'translations' and values.get('locale'):
kwargs['locale'] = values.get('locale')
template_path = 'catalog.html'
env = ui.create_jinja_env()
template = env.get_template('/views/{}'.format(template_path))
content = template.render(kwargs)
response = wrappers.Response(content)
response.headers['Content-Type'] = 'text/html'
return response
def serve_console_reroute(pod, _request, _matched, **_kwargs):
"""Serve the default console page."""
kwargs = {'pod': pod}
env = ui.create_jinja_env()
template = env.get_template('/views/base-reroute.html')
content = template.render(kwargs)
response = wrappers.Response(content)
response.headers['Content-Type'] = 'text/html'
return response
def serve_editor_reroute(pod, _request, matched, meta=None, **_kwargs):
"""Serve the default console page."""
kwargs = {
'pod': pod,
'meta': meta,
'path': matched.params['path'] if 'path' in matched.params else '',
}
env = ui.create_jinja_env()
template = env.get_template('/views/editor.html')
content = template.render(kwargs)
response = wrappers.Response(content)
response.headers['Content-Type'] = 'text/html'
return response
def serve_pod(pod, request, values):
path = urllib.unquote(request.path) # Support escaped paths.
controller, params = pod.routes.match(path, request.environ)
controller.validate(params)
headers = controller.get_http_headers(params)
if 'X-AppEngine-BlobKey' in headers:
return Response(headers=headers)
content = controller.render(params)
response = Response(body=content)
response.headers.update(headers)
if pod.podcache.is_dirty:
pod.podcache.write()
return response
def serve_pod_reroute(pod, request, matched, **_kwargs):
"""Serve pod contents using the new routing."""
controller = pod.router.get_render_controller(
request.path, matched.value, params=matched.params)
response = None
headers = controller.get_http_headers()
if 'X-AppEngine-BlobKey' in headers:
return Response(headers=headers)
jinja_env = pod.render_pool.get_jinja_env(
controller.doc.locale) if controller.use_jinja else None
rendered_document = controller.render(jinja_env=jinja_env)
content = rendered_document.read()
response = Response(body=content)
response.headers.update(headers)
if pod.podcache.is_dirty:
pod.podcache.write()
return response
def serve_ui_tool(pod, request, values):
tool_path = 'node_modules/{}'.format(values.get('tool'))
response = wrappers.Response(pod.read_file(tool_path))
guessed_type = mimetypes.guess_type(tool_path)
mime_type = guessed_type[0] or 'text/plain'
response.headers['Content-Type'] = mime_type
return response
def serve_ui_tool_reroute(pod, request, values, **_kwargs):
tool_path = 'node_modules/{}'.format(values.get('tool'))
response = wrappers.Response(pod.read_file(tool_path))
guessed_type = mimetypes.guess_type(tool_path)
mime_type = guessed_type[0] or 'text/plain'
response.headers['Content-Type'] = mime_type
return response
def serve_run_preprocessor(pod, request, values):
name = values.get('name')
if name:
pod.preprocess([name])
out = 'Finished preprocessor run -> {}'.format(name)
else:
out = 'No preprocessor found.'
response = wrappers.Response(out)
response.headers['Content-Type'] = 'text/plain'
return response
class PodServer(object):
def __init__(self, pod, debug=False):
logging.warn(
'WARNING: Using old routing. '
'The old routing will be removed in a future version. '
'Please file issues on GitHub if you are having issues with the new routing.')
rule = routing.Rule
self.pod = pod
self.debug = debug
self.url_map = routing.Map([
rule('/', endpoint=serve_pod),
rule('/_grow/ui/tools/<path:tool>', endpoint=serve_ui_tool),
rule('/_grow/preprocessors/run/<path:name>',
endpoint=serve_run_preprocessor),
rule('/_grow/<any("translations"):page>/<path:locale>',
endpoint=serve_console),
rule('/_grow/<path:page>', endpoint=serve_console),
rule('/_grow', endpoint=serve_console),
rule('/<path:path>', endpoint=serve_pod),
], strict_slashes=False)
# Start off the server with a clean dependency graph.
self.pod.podcache.dependency_graph.mark_clean()
def dispatch_request(self, request):
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return endpoint(self.pod, request, values)
except routing.RequestRedirect as e:
return werkzeug_utils.redirect(e.new_url)
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
try:
return self.wsgi_app(environ, start_response)
except Exception as e:
request = Request(environ)
response = self.handle_exception(request, e)
return response(environ, start_response)
def handle_exception(self, request, exc):
self.debug = True
log = logging.exception if self.debug else self.pod.logger.error
if isinstance(exc, webob_exc.HTTPException):
status = exc.status_int
log('{}: {}'.format(status, request.path))
elif isinstance(exc, errors.RouteNotFoundError):
status = 404
log('{}: {}'.format(status, request.path))
else:
status = 500
log('{}: {} - {}'.format(status, request.path, exc))
env = ui.create_jinja_env()
template = env.get_template('/views/error.html')
if (isinstance(exc, errors.BuildError)):
tb = exc.traceback
else:
unused_error_type, unused_value, tb = sys.exc_info()
formatted_traceback = [
re.sub('^ ', '', line)
for line in traceback.format_tb(tb)]
formatted_traceback = '\n'.join(formatted_traceback)
kwargs = {
'exception': exc,
'is_web_exception': isinstance(exc, webob_exc.HTTPException),
'pod': self.pod,
'status': status,
'traceback': formatted_traceback,
}
try:
home_doc = self.pod.get_home_doc()
if home_doc:
kwargs['home_url'] = home_doc.url.path
except:
pass
if (isinstance(exc, errors.BuildError)):
kwargs['build_error'] = exc.exception
if (isinstance(exc, errors.BuildError)
and isinstance(exc.exception, jinja2.TemplateSyntaxError)):
kwargs['template_exception'] = exc.exception
elif isinstance(exc, jinja2.TemplateSyntaxError):
kwargs['template_exception'] = exc
content = template.render(**kwargs)
response = wrappers.Response(content, status=status)
response.headers['Content-Type'] = 'text/html'
return response
class PodServerReRoute(PodServer):
def __init__(self, pod, host, port, debug=False):
logging.warn(
'NOTICE: Using new routing, use --old-routing to use the older routing.')
self.pod = pod
self.host = host
self.port = port
self.pod.render_pool.pool_size = 1
self.debug = debug
self.routes = self.pod.router.routes
self.routes.add('/_grow/ui/tools/:tool', router.RouteInfo('console', {
'handler': serve_ui_tool_reroute,
}))
editor_meta = {
'handler': serve_editor_reroute,
'meta': {
'app': self,
},
}
self.routes.add('/_grow/editor/*path',
router.RouteInfo('console', editor_meta))
self.routes.add('/_grow/editor',
router.RouteInfo('console', editor_meta))
self.routes.add('/_grow/api/*path', router.RouteInfo('console', {
'handler': api.serve_api,
}))
self.routes.add('/_grow', router.RouteInfo('console', {
'handler': serve_console_reroute,
}))
# Trigger the dev handler hook.
self.pod.extensions_controller.trigger(
'dev_handler', self.routes, debug=debug)
# Start off the server with a clean dependency graph.
self.pod.podcache.dependency_graph.mark_clean()
def dispatch_request(self, request):
path = urllib.unquote(request.path) # Support escaped paths.
matched = self.routes.match(path)
if not matched:
text = '{} was not found in routes.'
raise errors.RouteNotFoundError(text.format(path))
kind = matched.value.kind
if kind == 'console':
if 'handler' in matched.value.meta:
handler_meta = None
if 'meta' in matched.value.meta:
handler_meta = matched.value.meta['meta']
return matched.value.meta['handler'](
self.pod, request, matched, meta=handler_meta)
return serve_console_reroute(self.pod, request, matched)
return serve_pod_reroute(self.pod, request, matched)
def wsgi_app(self, environ, start_response):
request = ReRouteRequest(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def create_wsgi_app(pod, host, port, debug=False):
if pod.use_reroute:
podserver_app = PodServerReRoute(pod, host, port, debug=debug)
else:
podserver_app = PodServer(pod, debug=debug)
assets_path = os.path.join(utils.get_grow_dir(), 'ui', 'admin', 'assets')
ui_path = os.path.join(utils.get_grow_dir(), 'ui', 'dist')
return wsgi.SharedDataMiddleware(podserver_app, {
'/_grow/ui': ui_path,
'/_grow/assets': assets_path,
})
| StarcoderdataPython |
12827022 | # --------------
import pandas as pd
from collections import Counter
# Load dataset
data = pd.read_csv(path)
data.isnull().sum()
# --------------
import seaborn as sns
from matplotlib import pyplot as plt
sns.set_style(style='darkgrid')
# Store the label values
label = data.iloc[:,-1]
label.head(5)
sns.countplot(data=data,x='Activity')
# plot the countplot
# --------------
import numpy as np
# make the copy of dataset
data_copy = data.copy()
mask = ('WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS')
# Create an empty column
data_copy['duration'] = ''
# Calculate the duration
duration_df = data_copy.groupby([label.mask(label!= 'WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS'), 'subject'])['duration'].count() * 1.28
duration_df = pd.DataFrame(duration_df)
plot_data = duration_df.sort_values(by='duration',ascending= False)
plot_data.reset_index(inplace=True)
replaced_value = {'WALKING_UPSTAIRS':'Upstairs','WALKING_DOWNSTAIRS':'Downstairs'}
plot_data['Activity'] = plot_data['Activity'].map(replaced_value)
sns.barplot(data=plot_data,x='subject',y='duration')
# Sort the values of duration
# --------------
#exclude the Activity column and the subject column
feature_cols = data.select_dtypes(exclude=['object','int']).columns
#Calculate the correlation values
correlated_values = data[feature_cols].corr().stack().reset_index()
#stack the data and convert to a dataframe
correlated_values = pd.DataFrame(correlated_values)
correlated_values.rename(columns = {'level_0':'Feature_1','level_1':'Feature_2',0:'Correlation_score'},inplace=True)
#create an abs_correlation column
correlated_values['abs_correlation'] = correlated_values['Correlation_score'].abs()
#Picking most correlated features without having self correlated pairs
s_corr_list = correlated_values.sort_values(by='abs_correlation',ascending=False)
top_corr_fields = s_corr_list[(s_corr_list['abs_correlation']>0.8)]
top_corr_fields = top_corr_fields[(top_corr_fields['Feature_1'])!=(top_corr_fields['Feature_2'])]
print(top_corr_fields.head())
# --------------
# importing neccessary libraries
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import precision_recall_fscore_support as error_metric
from sklearn.metrics import confusion_matrix, accuracy_score
# Encoding the target variable
le = LabelEncoder()
le.fit(data['Activity'])
data['Activity'] = le.transform(data['Activity'])
# split the dataset into train and test
X = data.drop('Activity',1)
y = data['Activity']
X_train, X_test, y_train ,y_test = train_test_split(X,y,test_size=0.3,random_state=40)
classifier = SVC()
clf = classifier.fit(X_train,y_train)
y_pred = clf.predict(X_test)
precision, accuracy , f_score, _ = error_metric(y_test,y_pred,average = 'weighted')
model1_score = accuracy_score(y_test,y_pred)
print(precision)
print(accuracy)
print(f_score)
print(model1_score)
# --------------
# importing libraries
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
# Feature selection using Linear SVC
lsvc = LinearSVC(C=0.01,penalty = 'l1',dual = False,random_state =42)
lsvc.fit(X_train,y_train)
model_2 = SelectFromModel(lsvc,prefit=True)
new_train_features= model_2.transform(X_train)
new_test_features = model_2.transform(X_test)
classifier_2 = SVC()
clf_2 = classifier_2.fit(new_train_features,y_train)
y_pred_new = clf_2.predict(new_test_features)
model2_score = accuracy_score(y_test,y_pred_new)
precision, accuracy , f_score, _ = error_metric(y_test,y_pred_new,average = 'weighted')
# model building on reduced set of features
# --------------
# Importing Libraries
from sklearn.model_selection import GridSearchCV
# Set the hyperparmeters
parameters = {'kernel':['linear','rbf'],'C':[100, 20, 1, 0.1]}
# Usage of grid search to select the best hyperparmeters
svc = SVC()
selector = GridSearchCV(svc,parameters,scoring='accuracy')
selector.fit(new_train_features,y_train)
print(selector.best_params_)
print(selector.cv_results_)
means = selector.cv_results_['mean_test_score']
stds = selector.cv_results_['std_test_score']
params = selector.cv_results_['params']
print(means,stds,params)
classifier_3 = SVC(C=20,kernel='rbf')
clf_3 = classifier_3.fit(new_train_features,y_train)
y_pred_final = clf_3.predict(new_test_features)
model3_score = accuracy_score(y_test,y_pred_final)
precision,recall,f_score,_ = error_metric(y_test,y_pred_final,average='weighted')
print(precision)
print(recall)
print(f_score)
print(model3_score)
# Model building after Hyperparameter tuning
| StarcoderdataPython |
8145710 | # Codejam 2021, Qualification Round: Median Sort
import sys
def query(*x):
print(*x, sep=" ", flush=True)
response = input()
if response == '-1':
sys.exit()
return int(response)
def split_list(L, pivots):
left, middle, right = [], [], []
while L:
val = L.pop()
median = query(val, *pivots)
if median == val:
middle.append(val)
elif median == pivots[0]:
left.append(val)
else:
right.append(val)
return left, middle, right
def order_list(L, left=None, right=None):
if len(L) < 2:
return L
pivots = (L.pop(), L.pop())
if left is not None:
median = query(left, *pivots)
if median != pivots[0]:
pivots = pivots[::-1]
elif right is not None:
median = query(right, *pivots)
if median != pivots[1]:
pivots = pivots[::-1]
left, middle, right = split_list(L, pivots)
new_left = order_list(left, right=pivots[0]) + [pivots[0]]
new_middle = order_list(middle, left=pivots[0], right=pivots[1])
new_right = [pivots[1]] + order_list(right, left=pivots[1])
return new_left + new_middle + new_right
# I/O Code
T, N, Q = map(int, input().split())
for _ in range(1, T + 1):
L = order_list(list(range(1, N + 1)))
_ = query(*L)
sys.exit()
| StarcoderdataPython |
6478706 | import json
from channels.generic.websocket import AsyncWebsocketConsumer
from asgiref.sync import sync_to_async
from .models import Message
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
''' Connect to a chat room '''
# Get and initialize variables
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = f'chat_{self.room_name}'
# Join room
await self.channel_layer.group_add(self.room_group_name, self.channel_name)
await self.accept()
async def disconnect(self, close_code):
''' Disconnect from a room'''
await self.channel_layer.group_discard(self.room_group_name, self.channel_name)
async def receive(self, text_data):
''' Receive a message from user and send it to the room '''
data = json.loads(text_data)
message = data['message']
username = data['username']
room = data['room']
await self.save_message(username, room, message)
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message,
'username': username
}
)
async def chat_message(self, event):
''' Receive a message from chat group and send it to user '''
message = event['message']
username = event['username']
await self.send(text_data=json.dumps({
'message': message,
'username': username
}))
@sync_to_async
def save_message(self, username, room, message):
Message.objects.create(username=username, room=room, content=message) | StarcoderdataPython |
9792657 | #!/usr/bin/env python
import torch
import torch.nn as nn
from colossalai.nn import CheckpointModule
from .utils.dummy_data_generator import DummyDataGenerator
from .registry import non_distributed_component_funcs
class NetWithRepeatedlyComputedLayers(CheckpointModule):
"""
This model is to test with layers which go through forward pass multiple times.
In this model, the fc1 and fc2 call forward twice
"""
def __init__(self, checkpoint=False) -> None:
super().__init__(checkpoint=checkpoint)
self.fc1 = nn.Linear(5, 5)
self.fc2 = nn.Linear(5, 5)
self.fc3 = nn.Linear(5, 2)
self.layers = [self.fc1, self.fc2, self.fc1, self.fc2, self.fc3]
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
class DummyDataLoader(DummyDataGenerator):
def generate(self):
data = torch.rand(16, 5)
label = torch.randint(low=0, high=2, size=(16,))
return data, label
@non_distributed_component_funcs.register(name='repeated_computed_layers')
def get_training_components():
def model_builder(checkpoint=True):
return NetWithRepeatedlyComputedLayers(checkpoint)
trainloader = DummyDataLoader()
testloader = DummyDataLoader()
criterion = torch.nn.CrossEntropyLoss()
return model_builder, trainloader, testloader, torch.optim.Adam, criterion
| StarcoderdataPython |
9789277 | <filename>installer/lambda_function.py
"""
Installs the AWS Integration bundle on the target Reveal(x)
and creates an Open Data Stream endpoint to Amazon SNS.
"""
# COPYRIGHT 2020 BY EXTRAHOP NETWORKS, INC.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
# This file is part of an ExtraHop Supported Integration. Make NO MODIFICATIONS below this line
import json
import logging
import re
import os
import sys
import boto3
from crhelper import CfnResource
from extrahop import ExtraHopClient
from requests.exceptions import HTTPError
from aws_secretsmanager_caching import SecretCache, SecretCacheConfig
from botocore.exceptions import ClientError
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
helper = CfnResource()
class ExtraHopConnection(object):
"""Class to encapsulate an ExtraHop client and related data."""
def __init__(self, host, api_key, verify_certs=False):
self.client = ExtraHopClient(host, api_key, verify_certs)
self.appliance_data = self._get_appliance_data()
self.uuid = self.appliance_data.get("uuid").replace("-", "")
self.platform = self.appliance_data.get("platform")
def _get_appliance_data(self):
try:
rsp = self.client.get("appliances")
appliances = rsp.json()
except (HTTPError, json.JSONDecodeError) as err:
LOGGER.error(f"Error getting information from ExtraHop: {err}")
else:
local_appliances = [
app for app in appliances if app["connection_type"] == "local"
]
if len(local_appliances) > 0:
return local_appliances.pop()
else:
LOGGER.error(f"Could not find appliance UUID.")
return {}
def get_eca_nodes(self):
try:
rsp = self.client.get("nodes")
data = rsp.json()
except (HTTPError, json.JSONDecodeError) as err:
LOGGER.error(f"Could not retrieve bundle info from ExtraHop: {err}")
return {}
else:
return {node["uuid"]: node["id"] for node in data}
def get_all_bundles(self):
"""Get all installed bundles from ExtraHop.
Returns:
List(dict): JSON response from ExtraHop, if successful
None: if error
"""
try:
rsp = self.client.get("bundles")
data = rsp.json()
except (HTTPError, json.JSONDecodeError) as err:
LOGGER.error(f"Could not retrieve bundle info from ExtraHop: {err}")
return None
else:
return data
def post_bundle(self, bundle):
"""Posts a bundle to ExtraHop
Args:
bundle (JSON): ExtraHop bundle
Returns:
int: API ID for bundle, if successful
None: if error
"""
try:
rsp = self.client.post("bundles", json=bundle)
data = rsp.json()
except (HTTPError, json.JSONDecodeError) as err:
LOGGER.error(f"Could not install bundle file: {err}")
return None
else:
bundle_id = rsp.headers["Location"].split("/")[-1]
return bundle_id
def apply_bundle(self, bundle_id, node_ids=[]):
"""Applies an installed bundle on ExtraHop
Args:
bundle_id (int): API ID for bundle
"""
try:
options = {"include_assignments": True, "policy": "skip"}
if node_ids:
options["node_ids"] = node_ids
rsp = self.client.post(f"bundles/{bundle_id}/apply", json=options)
except HTTPError as err:
LOGGER.error(f"Could not apply bundle file: {err}")
def initialize_extrahop_connections():
"""Gets Reveal(x) credentials from AWS Secrets Manager and
creates ExtraHopClient conenctions to each Reveal(x).
Reference: https://aws.amazon.com/blogs/security/how-to-securely-provide-database-credentials-to-lambda-functions-by-using-aws-secrets-manager/
Returns:
List(ExtraHopConnection): ExtraHop connections
"""
SECRETS_CLIENT = boto3.client("secretsmanager")
secret_name = "extrahop/awsintegration"
try:
secret_cache = SecretCache(SecretCacheConfig(), SECRETS_CLIENT)
secret_response_value = secret_cache.get_secret_string(secret_name)
except ClientError as err:
raise err
else:
secrets = secret_response_value
secrets_dict = json.loads(secrets)
extrahops = list()
for host, api_key in secrets_dict.items():
try:
extrahop_connection = ExtraHopConnection(host=host, api_key=api_key)
except Exception as error:
LOGGER.warning(f"Could not connect to appliance at {host}: {error}")
pass
else:
extrahops.append(extrahop_connection)
return extrahops
@helper.create
def bundle_installer(event, context):
"""Installs and configures AWS Integration bundle components on ExtraHop.
Args:
event (dict): Event data passed to handler
context (object): Runtime information
"""
EXTRAHOP_CLIENTS = initialize_extrahop_connections()
BUNDLE_NAME = "AWS Integration"
BUNDLE_PATH = "bundle.json"
DETECTION_TRIGGER = "AWS Integration: Detections Publisher"
NEWDEVICE_TRIGGER = "AWS Integration: New Device Publisher"
detections_arn = event["ResourceProperties"]["DetectionsARN"]
newdevice_arn = event["ResourceProperties"]["NewDeviceARN"]
# load bundle file
with open(BUNDLE_PATH) as bundle_fp:
bundle = json.load(bundle_fp)
# edit triggers in bundle code
# replace the ARN placeholders with the real value from CloudFormation
for trigger in bundle["Trigger"]:
if trigger["name"] == DETECTION_TRIGGER:
pattern = "const SNS_TOPIC_ARN = (.*?);\\n"
replacement = f'const SNS_TOPIC_ARN = "{detections_arn}";\\n'
trigger["script"] = re.sub(pattern, replacement, trigger["script"])
elif trigger["name"] == NEWDEVICE_TRIGGER:
pattern = "const SNS_TOPIC_ARN = (.*?);\\n"
replacement = f'const SNS_TOPIC_ARN = "{newdevice_arn}";\\n'
trigger["script"] = re.sub(pattern, replacement, trigger["script"])
command_list = [item for item in EXTRAHOP_CLIENTS if item.platform == "command"]
discover_list = [item for item in EXTRAHOP_CLIENTS if item.platform == "discover"]
discover_uuids = set([item.uuid for item in discover_list])
# If COMMAND appliance is present, push bundle to Command, and install from there
for eca in command_list:
bundles = eca.get_all_bundles()
if bundles is None:
LOGGER.error(
f"Couldn't get bundles from Command appliance {eca.client.host}. Aborting install."
)
continue
bundle_names = [bun["name"] for bun in bundles]
if BUNDLE_NAME in bundle_names:
LOGGER.info(
f"{BUNDLE_NAME} bundle already installed on {eca.client.host}. Aborting install."
)
continue
bundle_id = eca.post_bundle(bundle)
if bundle_id is None:
LOGGER.error(
f"{BUNDLE_NAME} bundle failed to install on {eca.client.host}."
)
continue
target_nodes_for_install = []
attached_nodes = eca.get_eca_nodes()
# only install on Discover appliances we have API keys for
for node_uuid, node_id in attached_nodes.items():
if node_uuid in discover_uuids:
target_nodes_for_install.append(node_id)
LOGGER.debug(
f"Applying bundle to nodes {target_nodes_for_install} on ECA {eca.client.host}."
)
eca.apply_bundle(bundle_id, node_ids=target_nodes_for_install)
for eda in discover_list:
# Try to install bundle on all Discover appliances
bundles = eda.get_all_bundles()
if bundles is None:
LOGGER.error(
f"Couldn't get bundles from Command appliance {eda.client.host}. Aborting install."
)
continue
bundle_names = [bun["name"] for bun in bundles]
if BUNDLE_NAME in bundle_names:
LOGGER.info(
f"{BUNDLE_NAME} bundle already installed on {eda.client.host}. Aborting install."
)
continue
bundle_id = eda.post_bundle(bundle)
if bundle_id is None:
LOGGER.error(
f"{BUNDLE_NAME} bundle failed to install on {eda.client.host}."
)
continue
LOGGER.debug(f"Applying bundle to {eda.client.host}")
eda.apply_bundle(bundle_id)
@helper.update
@helper.delete
def no_op(event, context):
"""Don't do anything.
Args:
event (dict): Event data passed to handler
context (object): Runtime information
"""
# Just return a success code.
return True
def lambda_handler(event, context):
"""Lambda handler called by CF Template.
Args:
event (dict): Event data passed to handler
context (object): Runtime information
"""
# Just pass everything to the CF Custom Resource handler.
helper(event, context)
| StarcoderdataPython |
6571429 | """Test cases for Multitrack class."""
import numpy as np
from pytest import fixture
from pypianoroll import BinaryTrack, Multitrack, StandardTrack
from .utils import multitrack
def test_repr(multitrack):
assert repr(multitrack) == (
"Multitrack(name='test', resolution=24, "
"downbeat=array(shape=(96,), dtype=bool), tracks=["
"StandardTrack(name='track_1', program=0, is_drum=False, "
"pianoroll=array(shape=(96, 128), dtype=uint8)), "
"BinaryTrack(name='track_2', program=0, is_drum=True, "
"pianoroll=array(shape=(96, 128), dtype=bool))])"
)
def test_len(multitrack):
assert len(multitrack) == 2
def test_slice(multitrack):
sliced = multitrack[1]
assert isinstance(sliced, BinaryTrack)
assert sliced.pianoroll.shape == (96, 128)
def test_is_valid(multitrack):
multitrack.validate()
def test_get_length(multitrack):
assert multitrack.get_length() == 95
def test_get_downbeat_steps(multitrack):
assert np.all(multitrack.get_downbeat_steps() == [0])
def test_set_nonzeros(multitrack):
multitrack.set_nonzeros(50)
assert isinstance(multitrack.tracks[1], StandardTrack)
assert multitrack.tracks[1].pianoroll[0, 36] == 50
def test_set_resolution(multitrack):
multitrack.set_resolution(4)
assert multitrack.tracks[0].pianoroll[15, 60] == 100
assert np.all(
multitrack.tracks[1].pianoroll[[0, 3, 5, 8, 11, 13], 36] == 1
)
def test_copy(multitrack):
copied = multitrack.copy()
assert id(copied) != id(multitrack)
assert id(copied.downbeat) != id(multitrack.downbeat)
assert id(copied.tracks[0]) != id(multitrack.tracks[0])
assert id(copied.tracks[0].pianoroll) != id(multitrack.tracks[0].pianoroll)
def test_count_downbeat(multitrack):
assert multitrack.count_downbeat() == 1
def test_stack(multitrack):
stacked = multitrack.stack()
assert stacked.shape == (2, 96, 128)
@fixture
def multitrack_to_blend():
pianoroll_1 = np.zeros((96, 128), np.uint8)
pianoroll_1[:95, [60, 64, 67, 72]] = 100
track_1 = StandardTrack(
name="track_1", program=0, is_drum=False, pianoroll=pianoroll_1
)
pianoroll_2 = np.zeros((96, 128), np.uint8)
pianoroll_2[:95, [60, 64, 67, 72]] = 100
track_2 = StandardTrack(
name="track_2", program=0, is_drum=True, pianoroll=pianoroll_2
)
downbeat = np.zeros((96,), bool)
downbeat[0] = True
return Multitrack(
name="test",
resolution=24,
downbeat=downbeat,
tracks=[track_1, track_2],
)
def test_blend_any(multitrack_to_blend):
blended = multitrack_to_blend.blend("any")
assert blended.dtype == np.bool_
assert not blended[0, 0]
assert blended[0, 60]
def test_blend_sum(multitrack_to_blend):
blended = multitrack_to_blend.blend("sum")
assert blended.dtype == np.uint8
assert blended[0, 0] == 0
assert blended[0, 60] == 127
def test_blend_max(multitrack_to_blend):
blended = multitrack_to_blend.blend("max")
assert blended.dtype == np.uint8
assert blended[0, 0] == 0
assert blended[0, 60] == 100
def test_append(multitrack):
pianoroll = np.zeros((96, 128), np.bool_)
pianoroll[:95:16, 41] = True
track_to_append = BinaryTrack(name="track_3", pianoroll=pianoroll)
multitrack.append(track_to_append)
assert len(multitrack.tracks) == 3
assert multitrack.tracks[2].name == "track_3"
def test_binarize(multitrack):
multitrack.binarize()
assert isinstance(multitrack.tracks[0], BinaryTrack)
assert multitrack.tracks[0].pianoroll[0, 60] == 1
def test_clip(multitrack):
multitrack.clip(upper=60)
assert multitrack.tracks[0].pianoroll[0, 60] == 60
def test_pad_to_same(multitrack):
pianoroll_1 = np.zeros((96, 128), np.uint8)
pianoroll_1[0:95, [60, 64, 67, 72]] = 100
track_1 = StandardTrack(
name="track_1", program=0, is_drum=False, pianoroll=pianoroll_1
)
pianoroll_2 = np.zeros((96, 128), np.bool)
pianoroll_2[0:95:16, 36] = True
track_2 = BinaryTrack(
name="track_2", program=0, is_drum=True, pianoroll=pianoroll_2
)
downbeat = np.zeros((96,), bool)
downbeat[0] = True
multitrack = Multitrack(
name="test",
resolution=24,
downbeat=downbeat,
tracks=[track_1, track_2],
)
multitrack.pad_to_same()
assert multitrack.tracks[0].pianoroll.shape[0] == 96
assert multitrack.tracks[1].pianoroll.shape[0] == 96
def test_remove_empty():
pianoroll_1 = np.zeros((96, 128), np.uint8)
pianoroll_1[0:95, [60, 64, 67, 72]] = 100
track_1 = StandardTrack(
name="track_1", program=0, is_drum=False, pianoroll=pianoroll_1
)
pianoroll_2 = np.zeros((96, 128), np.bool)
track_2 = StandardTrack(
name="track_2", program=0, is_drum=True, pianoroll=pianoroll_2
)
downbeat = np.zeros((96,), bool)
downbeat[0] = True
multitrack = Multitrack(
name="test",
resolution=24,
downbeat=downbeat,
tracks=[track_1, track_2],
)
multitrack.remove_empty()
assert len(multitrack) == 1
def test_trim(multitrack):
multitrack.trim()
assert multitrack.tracks[0].pianoroll.shape == (95, 128)
assert multitrack.tracks[1].pianoroll.shape == (95, 128)
| StarcoderdataPython |
1875214 | <reponame>MingboPeng/honeybee-schema
from honeybee_schema.energy.simulation import SimulationParameter
import os
# target folder where all of the samples live
root = os.path.dirname(os.path.dirname(__file__))
target_folder = os.path.join(root, 'samples', 'simulation_parameter')
def test_detailed_simulation_par():
file_path = os.path.join(target_folder, 'simulation_par_detailed.json')
SimulationParameter.parse_file(file_path)
def test_simple_simulation_par():
file_path = os.path.join(target_folder, 'simulation_par_simple.json')
SimulationParameter.parse_file(file_path)
| StarcoderdataPython |
6642201 | from flask import Flask,make_response,request,jsonify
app = Flask(__name__)
@app.route('/peticion_get',methods=['GET'])
def peticion_get():
if request.method=='GET':
return "Es una petición GET"
@app.route('/peticion_post',methods=['POST'])
def peticion_post():
if request.method=='POST':
usuario=request.form.get('usuario')
password=request.form.get('pass')
if usuario=='admin' and password=='<PASSWORD>':
return "Se autentico correctamente"
else:
return 'Error en usuario o contraseña'
portal={
"estudiante1":90,
"estudiante2":58
}
@app.route('/portal/<nombre>/<nota>',methods=['PUT'])
def peticion_put(nombre,nota):
if nota in portal:
portal[nombre]=int(nota)
update=make_response(jsonify(portal),200)
return update
#Si no hay un registro se crea el estudiante y la nota
portal[nombre]=int(nota)
update=make_response(jsonify(portal),2001)
return update
@app.route('/portal/<nombre>',methods=['DELETE'])
def peticion_delete(nombre):
if nombre in portal:
del portal[nombre]
el=make_response(jsonify(portal),200)
return el
el=make_response(jsonify({"error":"No se encuentra el estudiante"}),404)
return el
if __name__=="__main__":
app.run(debug=True,port=5000,threaded=True) | StarcoderdataPython |
4853098 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^customer/$', views.CustomerList.as_view()),
url(r'^customer/(?P<pk>[0-9]+)/$', views.CustomerDetail.as_view()),
]
| StarcoderdataPython |
5173723 | import math
import torch
from torch.nn import Module, Parameter
import torch.nn.init as init
import torch.nn.functional as F
class _BayesBatchNorm(Module):
r"""
Applies Bayesian Batch Normalization over a 2D or 3D input
Arguments:
prior_mu (Float): mean of prior normal distribution.
prior_sigma (Float): sigma of prior normal distribution.
.. note:: other arguments are following batchnorm of pytorch 1.2.0.
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
"""
_version = 2
__constants__ = ['prior_mu', 'prior_sigma', 'track_running_stats',
'momentum', 'eps', 'weight', 'bias',
'running_mean', 'running_var', 'num_batches_tracked',
'num_features', 'affine']
def __init__(self, prior_mu, prior_sigma, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True):
super(_BayesBatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.prior_mu = prior_mu
self.prior_sigma = prior_sigma
self.prior_log_sigma = math.log(prior_sigma)
self.weight_mu = Parameter(torch.Tensor(num_features))
self.weight_log_sigma = Parameter(torch.Tensor(num_features))
self.register_buffer('weight_eps', None)
self.bias_mu = Parameter(torch.Tensor(num_features))
self.bias_log_sigma = Parameter(torch.Tensor(num_features))
self.register_buffer('bias_eps', None)
else:
self.register_parameter('weight_mu', None)
self.register_parameter('weight_log_sigma', None)
self.register_buffer('weight_eps', None)
self.register_parameter('bias_mu', None)
self.register_parameter('bias_log_sigma', None)
self.register_buffer('bias_eps', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
# Initialization method of Adv-BNN.
self.weight_mu.data.uniform_()
self.weight_log_sigma.data.fill_(self.prior_log_sigma)
self.bias_mu.data.zero_()
self.bias_log_sigma.data.fill_(self.prior_log_sigma)
# Initilization method of the original torch nn.batchnorm.
# init.ones_(self.weight_mu)
# self.weight_log_sigma.data.fill_(self.prior_log_sigma)
# init.zeros_(self.bias_mu)
# self.bias_log_sigma.data.fill_(self.prior_log_sigma)
def freeze(self) :
if self.affine :
self.weight_eps = torch.randn_like(self.weight_log_sigma)
self.bias_eps = torch.randn_like(self.bias_log_sigma)
def unfreeze(self) :
if self.affine :
self.weight_eps = None
self.bias_eps = None
def _check_input_dim(self, input):
raise NotImplementedError
def forward(self, input):
self._check_input_dim(input)
if self.momentum is None:
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
if self.training and self.track_running_stats:
if self.num_batches_tracked is not None:
self.num_batches_tracked += 1
if self.momentum is None:
exponential_average_factor = 1.0 / float(self.num_batches_tracked)
else:
exponential_average_factor = self.momentum
if self.affine :
if self.weight_eps is None :
weight = self.weight_mu + torch.exp(self.weight_log_sigma) * torch.randn_like(self.weight_log_sigma)
bias = self.bias_mu + torch.exp(self.bias_log_sigma) * torch.randn_like(self.bias_log_sigma)
else :
weight = self.weight_mu + torch.exp(self.weight_log_sigma) * self.weight_eps
bias = self.bias_mu + torch.exp(self.bias_log_sigma) * self.bias_eps
else :
weight = None
bias = None
return F.batch_norm(
input, self.running_mean, self.running_var, weight, bias,
self.training or not self.track_running_stats,
exponential_average_factor, self.eps)
def extra_repr(self):
return '{prior_mu}, {prior_sigma}, {num_features}, ' \
'eps={eps}, momentum={momentum}, affine={affine}, ' \
'track_running_stats={track_running_stats}'.format(**self.__dict__)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if (version is None or version < 2) and self.track_running_stats:
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key not in state_dict:
state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)
super(_BayesBatchNorm, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
class BayesBatchNorm2d(_BayesBatchNorm):
r"""
Applies Bayesian Batch Normalization over a 2D input
Arguments:
prior_mu (Float): mean of prior normal distribution.
prior_sigma (Float): sigma of prior normal distribution.
.. note:: other arguments are following batchnorm of pytorch 1.2.0.
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
"""
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim())) | StarcoderdataPython |
4830855 | <filename>plugins/zenhub/komand_zenhub/actions/get_issue_events/schema.py
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Get the ZenHub Events for a GitHub Issue"
class Input:
ISSUE_NUMBER = "issue_number"
REPO_ID = "repo_id"
class Output:
EVENTS = "events"
class GetIssueEventsInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"issue_number": {
"type": "integer",
"title": "Issue Number",
"description": "GitHub Issue Number",
"order": 2
},
"repo_id": {
"type": "integer",
"title": "Repository ID",
"description": "GitHub Repository ID e.g. 24237263",
"order": 1
}
},
"required": [
"issue_number",
"repo_id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetIssueEventsOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"events": {
"type": "array",
"title": "Issue Events",
"description": "List of ZenHub Issue Events",
"items": {
"$ref": "#/definitions/issue_event"
},
"order": 1
}
},
"definitions": {
"issue_event": {
"type": "object",
"title": "issue_event",
"properties": {
"created_at": {
"type": "string",
"title": "Created At",
"displayType": "date",
"description": "Date of creation",
"format": "date-time",
"order": 3
},
"from_estimate_value": {
"type": "integer",
"title": "From Estimate Value",
"description": "From estimate value",
"order": 6
},
"from_pipeline_name": {
"type": "string",
"title": "From Pipeline Name",
"description": "From pipeline name",
"order": 4
},
"to_estimate_value": {
"type": "integer",
"title": "To Estimate Value",
"order": 7
},
"to_pipeline_name": {
"type": "string",
"title": "To Pipeline Name",
"description": "To pipeline name",
"order": 5
},
"type": {
"type": "string",
"title": "Type",
"description": "Type",
"order": 2
},
"user_id": {
"type": "integer",
"title": "User ID",
"description": "User id",
"order": 1
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| StarcoderdataPython |
11218722 | <reponame>Miryad3108/schema.data.gouv.fr
BASE_DOMAIN = "https://schema.data.gouv.fr"
VALIDATION_DOC_URL = "https://schema.data.gouv.fr/documentation/validation-schemas"
| StarcoderdataPython |
3558369 | <filename>models.py<gh_stars>0
"""
모델을 빌드한다.
"""
import tensorflow as tf
import options as opt
def build_model():
# 모델을 반환한다.
model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(opt.SIGHT, 1)),
tf.keras.layers.LSTM(16, return_sequences=True, dropout=0.2),
tf.keras.layers.LSTM(16, dropout=0.2),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(opt.Y_N, activation=tf.keras.layers.LeakyReLU()),
])
optim = tf.keras.optimizers.Adam(lr=opt.lr)
model.compile(optimizer=optim, loss="mse") # regression에 유리한 MSE loss 사용
return model
if __name__ == "__main__":
model = build_model()
model.summary()
| StarcoderdataPython |
349117 | n = int(input("Enter n: "))
x = 2
y = 2
x1 = 0
flag = False
''' Gives false at 2 and 3 because powers start from 2'''
if n == 1:
print("Output: True\n" + str(n) + " can be expressed as " + str(1) + "^(0, 1, 2, 3, 4, 5, ......)")
else:
while x <= n:
#print(x)
while x1 < n:
x1 = x**y
#print(x1)
if x1 == n:
print("Output: True\n" + str(n) + " can be expressed as " + str(x) + "^" + str(y))
flag = True
break
y += 1
y = 2
x1 = 0
x += 1
if flag == False:
print("Output: False")
| StarcoderdataPython |
8193997 | <reponame>MSAdministrator/art-parser
import os
from ..core import Core
from jinja2 import Template
from pyattck import Attck
class Base(Core):
atomic_markdown_template = os.path.join(
os.path.abspath(
os.path.dirname(os.path.dirname(__file__))
),
'data',
'atomic_doc.jinja2'
)
atomic_matrix_template = os.path.join(
os.path.abspath(
os.path.dirname(os.path.dirname(__file__))
),
'data',
'matrix.jinja2'
)
def __init__(self):
self.custom_jinja2_function_dict = {
"path_replacement": self.path_replacement,
"replace_command_string": self.replace_command_string,
"format_strings_with_spaces": self.format_strings_with_spaces,
"replace_string": self.replace_string,
"create_path": self.create_path
}
def get_template(self, value):
with open(value) as file:
template = Template(file.read())
template.globals.update(self.custom_jinja2_function_dict)
return template
def get_tactic(self, attack_technique):
for technique in self.attck.enterprise.techniques:
if technique.id == attack_technique:
for tactic in technique.tactics:
return tactic.name, tactic.id
def get_platforms(self, attack_technique):
for technique in self.attck.enterprise.techniques:
if technique.id == attack_technique:
if technique.platforms:
return technique.platforms
def get_technique_name(self, attack_technique):
for technique in self.attck.enterprise.techniques:
if technique.id == attack_technique:
return technique.name
| StarcoderdataPython |
12831600 | <filename>distribution.py
"""
distribution.py
Author: <NAME>
Credit: https://developers.google.com/edu/python/sorting
Assignment:
Write and submit a Python program (distribution.py) that computes and displays
the distribution of characters in a given sample of text.
Output of your program should look like this:
Please enter a string of text (the bigger the better): The rain in Spain stays mainly in the plain.
The distribution of characters in "The rain in Spain stays mainly in the plain." is:
iiiiii
nnnnnn
aaaaa
sss
ttt
ee
hh
ll
pp
yy
m
r
Notice about this example:
* The text: 'The rain ... plain' is provided by the user as input to your program.
* Uppercase characters are converted to lowercase
* Spaces and punctuation marks are ignored completely.
* Characters that are more common appear first in the list.
* Where the same number of characters occur, the lines are ordered alphabetically.
For example, in the printout above, the letters e, h, l, p and y both occur twice
in the text and they are listed in the output in alphabetical order.
* Letters that do not occur in the text are not listed in the output at all.
"""
import string
text = str(input("Please enter a string of text (the bigger the better): "))
print('The distribution of characters in ''"' + text + '" is:')
text = text.lower()
alpha = list(string.ascii_lowercase)
newtext = []
for l in alpha:
if text.count(l) != 0:
newtext.append(l * text.count(l))
p = (sorted(newtext, key=len, reverse = True))
for i in p:
print(i) | StarcoderdataPython |
1916900 | """
Wrap up PostgreSQL and PostGIS into a convenient class.
Examples
--------
Create a database and import a shapefile:
>>> import postgis_helpers as pGIS
>>> db = pGIS.PostgreSQL("my_database_name")
>>> db.create()
>>> db.import_geodata("bike_lanes", "http://url.to.shapefile")
>>> bike_gdf = db.query_as_geo_df("select * from bike_lanes")
"""
import os
import subprocess
import pandas as pd
import geopandas as gpd
import psycopg2
import sqlalchemy
from geoalchemy2 import Geometry, WKTElement
from typing import Union
from pathlib import Path
from .sql_helpers import sql_hex_grid_function_definition
from .general_helpers import now, report_time_delta, dt_as_time
from .geopandas_helpers import spatialize_point_dataframe
from .console import _console, RichStyle, RichSyntax
from .config_helpers import DEFAULT_DATA_INBOX, DEFAULT_DATA_OUTBOX
class PostgreSQL:
"""
This class encapsulates interactions with a ``PostgreSQL``
database. It leverages ``psycopg2``, ``sqlalchemy``, and ``geoalchemy2``
as needed. It stores connection information that includes:
- database name
- username & password
- host & port
- superusername & password
- the SQL cluster's master database
- ``verbosity`` level, which controls how much gets printed out
"""
def __init__(
self,
working_db: str,
un: str = "postgres",
pw: str = "<PASSWORD>",
host: str = "localhost",
port: int = 5432,
sslmode: str = None,
super_db: str = "postgres",
super_un: str = "postgres",
super_pw: str = "<PASSWORD>",
active_schema: str = "public",
verbosity: str = "full",
data_inbox: Path = DEFAULT_DATA_INBOX,
data_outbox: Path = DEFAULT_DATA_OUTBOX,
):
"""
Initialize a database object with placeholder values.
:param working_db: Name of the database you want to connect to
:type working_db: str
:param un: User name within the database, defaults to "postgres"
:type un: str, optional
:param pw: Password for the user, defaults to "<PASSWORD>"
:type pw: str, optional
:param host: Host where the database lives, defaults to "localhost"
:type host: str, optional
:param port: Port number on the host, defaults to 5432
:type port: int, optional
:param sslmode: False or string like "require", defaults to False
:type sslmode: Union[bool, str], optional
:param super_db: SQL cluster root db, defaults to "postgres"
:type super_db: str, optional
:param super_un: SQL cluster root user, defaults to "postgres"
:type super_un: str, optional
:param super_pw: SQL cluster root password, defaults to "<PASSWORD>"
:type super_pw: str, optional
:param verbosity: Control how much gets printed out to the console,
defaults to ``"full"``. Other options include
``"minimal"`` and ``"errors"``
:type verbosity: str, optional
TODO: add data box, print style, schema params
"""
self.DATABASE = working_db
self.USER = un
self.PASSWORD = pw
self.HOST = host
self.PORT = port
self.SSLMODE = sslmode
self.SUPER_DB = super_db
self.SUPER_USER = super_un
self.SUPER_PASSWORD = <PASSWORD>
self.ACTIVE_SCHEMA = active_schema
for folder in [data_inbox, data_outbox]:
if not folder.exists():
folder.mkdir(parents=True)
self.DATA_INBOX = data_inbox
self.DATA_OUTBOX = data_outbox
verbosity_options = ["full", "minimal", "errors"]
if verbosity in verbosity_options:
self.VERBOSITY = verbosity
else:
msg = f"verbosity must be one of: {verbosity_options}"
raise ValueError(msg)
if not self.exists():
self.db_create()
msg = f":person_surfing::water_wave: {self.DATABASE} @ {self.HOST} :water_wave::water_wave:"
self._print(3, msg)
def connection_details(self) -> dict:
"""
Return a dictionary that can be used to
instantiate other database connections on the
same SQL cluster.
:return: Dictionary with all of the SQL cluster connection info
:rtype: dict
"""
details = {
"un": self.USER,
"pw": self.PASSWORD,
"host": self.HOST,
"port": self.PORT,
"sslmode": self.SSLMODE,
"super_db": self.SUPER_DB,
"super_un": self.SUPER_USER,
"super_pw": self.SUPER_PASSWORD,
}
return details
def _print(self, level: int, message: str):
"""
Messages will print out depending on the VERBOSITY property
and the importance level provided.
VERBOSITY options include: ``full``, ``minimal``, and ``errors``
1 = Only prints in ``full``
2 = Prints in ``full`` and ``minimal``,
but does not print in ``errors``
3 = Always prints out
:param level: [description]
:type level: int
:param message: [description]
:type message: str
"""
print_out = False
if level == 1:
prefix = "\t"
style = RichStyle()
elif level == 2:
prefix = ":backhand_index_pointing_right: "
style = RichStyle()
elif level == 3:
prefix = ""
# style.color = "blue"
style = RichStyle(color="green4", bold=True)
if self.VERBOSITY == "full" and level in [1, 2, 3]:
print_out = True
elif self.VERBOSITY == "minimal" and level in [2, 3]:
print_out = True
elif self.VERBOSITY == "errors" and level in [3]:
print_out = True
if print_out:
if type(message) == str:
msg = prefix + message
_console.print(msg, style=style)
elif type(message) == RichSyntax:
_console.print(message)
else:
_console.print(f"Type error: {type(message)}")
def timer(func):
"""
Decorator function that will record &
report on how long it takes for another
function to execute.
:param func: the function to be timed
:type func: function
"""
def magic(self, *args, **kwargs):
start_time = now()
msg = f":hourglass_not_done: starting @ {dt_as_time(start_time)}"
self._print(1, msg)
function_return_value = func(self, *args, **kwargs)
end_time = now()
# Print runtime out when "full"
msg = f":hourglass_done: finished @ {dt_as_time(end_time)}"
self._print(1, msg)
runtime_msg = report_time_delta(start_time, end_time)
self._print(1, runtime_msg)
return function_return_value
return magic
def add_schema(self, schema: str) -> None:
"""
Add a schema if it does not yet exist.
:param schema: any valid name for a SQL schema
:type query: str
"""
self.execute(f"CREATE SCHEMA IF NOT EXISTS {schema}")
# QUERY the database
# ------------------
def query_as_list(self, query: str, super_uri: bool = False) -> list:
"""
Query the database and get the result as a ``list``
:param query: any valid SQL query string
:type query: str
:param super_uri: flag that will execute against the
super db/user, defaults to False
:type super_uri: bool, optional
:return: list with each item being a row from the query result
:rtype: list
"""
self._print(1, "... querying ...")
code_w_highlight = RichSyntax(query, "sql", theme="monokai", line_numbers=True)
self._print(1, code_w_highlight)
uri = self.uri(super_uri=super_uri)
connection = psycopg2.connect(uri)
cursor = connection.cursor()
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
connection.close()
return result
def query_as_df(self, query: str, super_uri: bool = False) -> pd.DataFrame:
"""
Query the database and get the result as a ``pandas.DataFrame``
:param query: any valid SQL query string
:type query: str
:param super_uri: flag that will execute against the
super db/user, defaults to False
:type super_uri: bool, optional
:return: dataframe with the query result
:rtype: pd.DataFrame
"""
self._print(1, "... querying ...")
code_w_highlight = RichSyntax(query, "sql", theme="monokai", line_numbers=True)
self._print(1, code_w_highlight)
uri = self.uri(super_uri=super_uri)
engine = sqlalchemy.create_engine(uri)
df = pd.read_sql(query, engine)
engine.dispose()
return df
def query_as_geo_df(self, query: str, geom_col: str = "geom") -> gpd.GeoDataFrame:
"""
Query the database and get the result as a ``geopandas.GeoDataFrame``
:param query: any valid SQL query string
:type query: str
:param geom_col: name of the column that holds the geometry,
defaults to 'geom'
:type geom_col: str
:return: geodataframe with the query result
:rtype: gpd.GeoDataFrame
"""
self._print(1, "... querying ...")
code_w_highlight = RichSyntax(query, "sql", theme="monokai", line_numbers=True)
self._print(1, code_w_highlight)
connection = psycopg2.connect(self.uri())
gdf = gpd.GeoDataFrame.from_postgis(query, connection, geom_col=geom_col)
connection.close()
return gdf
def query_as_single_item(self, query: str, super_uri: bool = False):
"""
Query the database and get the result as a SINGLETON.
For when you want to transform ``[(True,)]`` into ``True``
:param query: any valid SQL query string
:type query: str
:param super_uri: flag that will execute against the
super db/user, defaults to False
:type super_uri: bool, optional
:return: result from the query
:rtype: singleton
"""
self._print(1, "... querying ...")
code_w_highlight = RichSyntax(query, "sql", theme="monokai", line_numbers=True)
self._print(1, code_w_highlight)
result = self.query_as_list(query, super_uri=super_uri)
return result[0][0]
# EXECUTE queries to make them persistent
# ---------------------------------------
def execute(self, query: str, autocommit: bool = False):
"""
Execute a query for a persistent result in the database.
Use ``autocommit=True`` when creating and deleting databases.
:param query: any valid SQL query string
:type query: str
:param autocommit: flag that will execute against the
super db/user, defaults to False
:type autocommit: bool, optional
"""
self._print(1, "... executing ...")
if len(query) < 5000:
code_w_highlight = RichSyntax(query, "sql", theme="monokai", line_numbers=True)
self._print(1, code_w_highlight)
uri = self.uri(super_uri=autocommit)
connection = psycopg2.connect(uri)
if autocommit:
connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = connection.cursor()
cursor.execute(query)
cursor.close()
connection.commit()
connection.close()
# DATABASE-level helper functions
# -------------------------------
def uri(self, super_uri: bool = False) -> str:
"""
Create a connection string URI for this database.
:param super_uri: Flag that will provide access to cluster
root if True, defaults to False
:type super_uri: bool, optional
:return: Connection string URI for PostgreSQL
:rtype: str
"""
# If super_uri is True, use the super un/pw/db
if super_uri:
user = self.SUPER_USER
pw = self.SUPER_PASSWORD
database = self.SUPER_DB
# Otherwise, use the normal connection info
else:
user = self.USER
pw = self.PASSWORD
database = self.DATABASE
connection_string = f"postgresql://{user}:{pw}@{self.HOST}:{self.PORT}/{database}"
if self.SSLMODE:
connection_string += f"?sslmode={self.SSLMODE}"
return connection_string
def exists(self) -> bool:
"""
Does this database exist yet? Returns True or False
:return: True or False if the database exists on the cluster
:rtype: bool
"""
sql_db_exists = f"""
SELECT EXISTS(
SELECT datname FROM pg_catalog.pg_database
WHERE lower(datname) = lower('{self.DATABASE}')
);
"""
return self.query_as_single_item(sql_db_exists, super_uri=True)
def db_create(self) -> None:
"""
Create this database if it doesn't exist yet
"""
if self.exists():
self._print(1, f"Database {self.DATABASE} already exists")
else:
self._print(3, f"Creating database: {self.DATABASE} on {self.HOST}")
sql_make_db = f"CREATE DATABASE {self.DATABASE};"
self.execute(sql_make_db, autocommit=True)
# Add PostGIS if not already installed
if "geometry_columns" in self.all_tables_as_list():
self._print(1, "PostGIS comes pre-installed")
else:
self._print(1, "Installing PostGIS")
sql_add_postgis = "CREATE EXTENSION postgis;"
self.execute(sql_add_postgis)
# Load the custom Hexagon Grid function
self._print(1, "Installing custom hexagon grid function")
self.execute(sql_hex_grid_function_definition)
def db_delete(self) -> None:
"""Delete this database (if it exists)"""
if not self.exists():
self._print(1, "This database does not exist, nothing to delete!")
else:
self._print(3, f"Deleting database: {self.DATABASE} on {self.HOST}")
sql_drop_db = f"DROP DATABASE {self.DATABASE};"
self.execute(sql_drop_db, autocommit=True)
@timer
def db_export_pgdump_file(self, output_folder: Path = None) -> Path:
"""
Save this database to a ``.sql`` file.
Requires ``pg_dump`` to be accessible via the command line.
:param output_folder: Folder path to write .sql file to
:type output_folder: pathlib.Path
:return: Filepath to SQL file that was created
:rtype: str
"""
if not output_folder:
output_folder = self.DATA_OUTBOX
# Get a string for today's date and time,
# like '2020_06_10' and '14_13_38'
rightnow = str(now())
today = rightnow.split(" ")[0].replace("-", "_")
timestamp = rightnow.split(" ")[1].replace(":", "_").split(".")[0]
# Use pg_dump to save the database to disk
sql_name = f"{self.DATABASE}_d_{today}_t_{timestamp}.sql"
sql_file = output_folder / sql_name
self._print(2, f"Exporting {self.DATABASE} to {sql_file}")
system_call = f'pg_dump {self.uri()} > "{sql_file}" '
os.system(system_call)
return sql_file
@timer
def db_load_pgdump_file(self, sql_dump_filepath: Path, overwrite: bool = True) -> None:
"""
Populate the database by loading from a SQL file that
was previously created by ``pg_dump``.
:param sql_dump_filepath: filepath to the ``.sql`` dump file
:type sql_dump_filepath: Union[Path, str]
:param overwrite: flag that controls whether or not this
function will replace the existing database
:type overwrite: bool
"""
if self.exists():
if overwrite:
self.db_delete()
self.db_create()
else:
self._print(
3,
f"Database named {self.DATABASE} already exists and overwrite=False!",
)
return
self._print(2, f"Loading {self.DATABASE} from {sql_dump_filepath}")
system_command = f'psql "{self.uri()}" < "{sql_dump_filepath}"'
os.system(system_command)
# LISTS of things inside this database (or the cluster at large)
# --------------------------------------------------------------
def all_tables_as_list(self, schema: str = None) -> list:
"""
Get a list of all tables in the database.
Optionally filter to a schema
:param schema: name of the schema to filter by
:type schema: str
:return: List of tables in the database
:rtype: list
"""
sql_all_tables = """
SELECT table_name
FROM information_schema.tables
"""
if schema:
sql_all_tables += f"""
WHERE table_schema = '{schema}'
"""
tables = self.query_as_list(sql_all_tables)
return [t[0] for t in tables]
def all_spatial_tables_as_dict(self, schema: str = None) -> dict:
"""
Get a dictionary of all spatial tables in the database.
Return value is formatted as: ``{table_name: epsg}``
:return: Dictionary with spatial table names as keys
and EPSG codes as values.
:rtype: dict
"""
sql_all_spatial_tables = """
SELECT f_table_name AS tblname, srid
FROM geometry_columns
"""
if schema:
sql_all_spatial_tables += f"""
WHERE f_table_schema = '{schema}'
"""
spatial_tables = self.query_as_list(sql_all_spatial_tables)
return {t[0]: t[1] for t in spatial_tables}
def all_databases_on_cluster_as_list(self) -> list:
"""
Get a list of all databases on this SQL cluster.
:return: List of all databases on the cluster
:rtype: list
"""
sql_all_databases = f"""
SELECT datname FROM pg_database
WHERE datistemplate = false
AND datname != '{self.SUPER_DB}'
AND LEFT(datname, 1) != '_';
"""
database_list = self.query_as_list(sql_all_databases, super_uri=True)
return [d[0] for d in database_list]
# TABLE-level helper functions
# ----------------------------
def table_columns_as_list(self, table_name: str, schema: str = None) -> list:
"""
Get a list of all columns in a table.
:param table_name: Name of the table
:type table_name: str
:return: List of all columns in a table
:rtype: list
"""
if not schema:
schema = self.ACTIVE_SCHEMA
sql_all_cols_in_table = f"""
SELECT column_name
FROM information_schema.columns
WHERE table_schema = '{schema}'
AND table_name = '{table_name}';
"""
column_list = self.query_as_list(sql_all_cols_in_table)
column_names = [c[0] for c in column_list]
return column_names
def table_add_or_nullify_column(
self, table_name: str, column_name: str, column_type: str, schema: str = None
) -> None:
"""
Add a new column to a table.
Overwrite to ``NULL`` if it already exists.
:param table_name: Name of the table
:type table_name: str
:param column_name: Name of the new column
:type column_name: str
:param column_type: Data type of the column. Must be valid in PgSQL
:type column_type: str
"""
if not schema:
schema = self.ACTIVE_SCHEMA
msg = f"Adding {column_type} col named {column_name} to {schema}.{table_name}"
self._print(1, msg)
existing_columns = self.table_columns_as_list(table_name, schema=schema)
if column_name in existing_columns:
query = f"""
UPDATE {schema}.{table_name} SET {column_name} = NULL;
"""
else:
query = f"""
ALTER TABLE {schema}.{table_name}
ADD COLUMN {column_name} {column_type};
"""
self.execute(query)
def table_add_uid_column(
self, table_name: str, schema: str = None, uid_col: str = "uid"
) -> None:
"""
Add a serial primary key column named 'uid' to the table.
:param table_name: Name of the table to add a uid column to
:type table_name: str
"""
if not schema:
schema = self.ACTIVE_SCHEMA
self._print(1, f"Adding uid column to {schema}.{table_name}")
sql_unique_id_column = f"""
ALTER TABLE {schema}.{table_name} DROP COLUMN IF EXISTS {uid_col};
ALTER TABLE {schema}.{table_name} ADD {uid_col} serial PRIMARY KEY;
"""
self.execute(sql_unique_id_column)
def table_add_spatial_index(self, table_name: str, schema: str = None) -> None:
"""
Add a spatial index to the 'geom' column in the table.
:param table_name: Name of the table to make the index on
:type table_name: str
"""
if not schema:
schema = self.ACTIVE_SCHEMA
self._print(1, f"Creating a spatial index on {schema}.{table_name}")
sql_make_spatial_index = f"""
CREATE INDEX ON {schema}.{table_name}
USING GIST (geom);
"""
self.execute(sql_make_spatial_index)
def table_reproject_spatial_data(
self,
table_name: str,
old_epsg: Union[int, str],
new_epsg: Union[int, str],
geom_type: str,
schema: str = None,
) -> None:
"""
Transform spatial data from one EPSG into another EPSG.
This can also be used with the same old and new EPSG. This
is useful when making a new geotable, as this SQL code
will update the table's entry in the ``geometry_columns`` table.
:param table_name: name of the table
:type table_name: str
:param old_epsg: Current EPSG of the data
:type old_epsg: Union[int, str]
:param new_epsg: Desired new EPSG for the data
:type new_epsg: Union[int, str]
:param geom_type: PostGIS-valid name of the
geometry you're transforming
:type geom_type: str
"""
if not schema:
schema = self.ACTIVE_SCHEMA
msg = f"Reprojecting {schema}.{table_name} from {old_epsg} to {new_epsg}"
self._print(1, msg)
sql_transform_geom = f"""
ALTER TABLE {schema}.{table_name}
ALTER COLUMN geom TYPE geometry({geom_type}, {new_epsg})
USING ST_Transform( ST_SetSRID( geom, {old_epsg} ), {new_epsg} );
"""
self.execute(sql_transform_geom)
def table_delete(self, table_name: str, schema: str = None) -> None:
"""
Delete the table, cascade.
:param table_name: Name of the table you want to delete.
:type table_name: str
"""
if not schema:
schema = self.ACTIVE_SCHEMA
self._print(2, f"Deleting table: {schema}.{table_name}")
sql_drop_table = f"""
DROP TABLE {schema}.{table_name} CASCADE;
"""
self.execute(sql_drop_table)
def table_spatialize_points(
self,
src_table: str,
x_lon_col: str,
y_lat_col: str,
epsg: int,
if_exists: str = "replace",
new_table: str = None,
schema: str = None,
) -> gpd.GeoDataFrame:
if not schema:
schema = self.ACTIVE_SCHEMA
if not new_table:
new_table = f"{src_table}_spatial"
df = self.query_as_df(f"SELECT * FROM {schema}.{src_table};")
gdf = spatialize_point_dataframe(df, x_lon_col=x_lon_col, y_lat_col=y_lat_col, epsg=epsg)
self.import_geodataframe(gdf, new_table, if_exists=if_exists)
self._print(2, f"Spatialized points from {src_table} into {new_table}")
# IMPORT data into the database
# -----------------------------
def import_dataframe(
self,
dataframe: pd.DataFrame,
table_name: str,
if_exists: str = "fail",
schema: str = None,
) -> None:
"""
Import an in-memory ``pandas.DataFrame`` to the SQL database.
Enforce clean column names (without spaces, caps, or weird symbols).
:param dataframe: dataframe with data you want to save
:type dataframe: pd.DataFrame
:param table_name: name of the table that will get created
:type table_name: str
:param if_exists: pandas argument to handle overwriting data,
defaults to "fail"
:type if_exists: str, optional
"""
if not schema:
schema = self.ACTIVE_SCHEMA
self._print(2, f"Importing dataframe to: {schema}.{table_name}")
# Replace "Column Name" with "column_name"
dataframe.columns = dataframe.columns.str.replace(" ", "_")
dataframe.columns = [x.lower() for x in dataframe.columns]
# Remove '.' and '-' from column names.
# i.e. 'geo.display-label' becomes 'geodisplaylabel'
for s in [".", "-", "(", ")", "+"]:
dataframe.columns = dataframe.columns.str.replace(s, "")
# Write to database after making sure schema exists
self.add_schema(schema)
engine = sqlalchemy.create_engine(self.uri())
dataframe.to_sql(table_name, engine, if_exists=if_exists, schema=schema)
engine.dispose()
def import_geodataframe(
self,
gdf: gpd.GeoDataFrame,
table_name: str,
src_epsg: Union[int, bool] = False,
if_exists: str = "replace",
schema: str = None,
uid_col: str = "uid",
):
"""
Import an in-memory ``geopandas.GeoDataFrame`` to the SQL database.
:param gdf: geodataframe with data you want to save
:type gdf: gpd.GeoDataFrame
:param table_name: name of the table that will get created
:type table_name: str
:param src_epsg: The source EPSG code can be passed as an integer.
By default this function will try to read the EPSG
code directly, but some spatial data is funky and
requires that you explicitly declare its projection.
Defaults to False
:type src_epsg: Union[int, bool], optional
:param if_exists: pandas argument to handle overwriting data,
defaults to "replace"
:type if_exists: str, optional
"""
if not schema:
schema = self.ACTIVE_SCHEMA
# Read the geometry type. It's possible there are
# both MULTIPOLYGONS and POLYGONS. This grabs the MULTI variant
geom_types = list(gdf.geometry.geom_type.unique())
geom_typ = max(geom_types, key=len).upper()
self._print(2, f"Importing {geom_typ} geodataframe to: {schema}.{table_name}")
# Manually set the EPSG if the user passes one
if src_epsg:
gdf.crs = f"epsg:{src_epsg}"
epsg_code = src_epsg
# Otherwise, try to get the EPSG value directly from the geodataframe
else:
# Older gdfs have CRS stored as a dict: {'init': 'epsg:4326'}
if type(gdf.crs) == dict:
epsg_code = int(gdf.crs["init"].split(" ")[0].split(":")[1])
# Now geopandas has a different approach
else:
epsg_code = int(str(gdf.crs).split(":")[1])
# Sanitize the columns before writing to the database
# Make all column names lower case
gdf.columns = [x.lower() for x in gdf.columns]
# Replace the 'geom' column with 'geometry'
if "geom" in gdf.columns:
gdf["geometry"] = gdf["geom"]
gdf.drop("geom", 1, inplace=True)
# Drop the 'gid' column
if "gid" in gdf.columns:
gdf.drop("gid", 1, inplace=True)
# Rename 'uid' to 'old_uid'
if uid_col in gdf.columns:
gdf[f"old_{uid_col}"] = gdf[uid_col]
gdf.drop(uid_col, 1, inplace=True)
# Build a 'geom' column using geoalchemy2
# and drop the source 'geometry' column
gdf["geom"] = gdf["geometry"].apply(lambda x: WKTElement(x.wkt, srid=epsg_code))
gdf.drop("geometry", 1, inplace=True)
# Write geodataframe to SQL database
self.add_schema(schema)
engine = sqlalchemy.create_engine(self.uri())
gdf.to_sql(
table_name,
engine,
if_exists=if_exists,
index=True,
index_label="gid",
schema=schema,
dtype={"geom": Geometry(geom_typ, srid=epsg_code)},
)
engine.dispose()
self.table_add_uid_column(table_name, schema=schema, uid_col=uid_col)
self.table_add_spatial_index(table_name, schema=schema)
@timer
def import_csv(
self,
table_name: str,
csv_path: Path,
if_exists: str = "append",
schema: str = None,
**csv_kwargs,
):
r"""
Load a CSV into a dataframe, then save the df to SQL.
:param table_name: Name of the table you want to create
:type table_name: str
:param csv_path: Path to data. Anything accepted by Pandas works here.
:type csv_path: Path
:param if_exists: How to handle overwriting existing data,
defaults to ``"append"``
:type if_exists: str, optional
:param \**csv_kwargs: any kwargs for ``pd.read_csv()`` are valid here.
"""
if not schema:
schema = self.ACTIVE_SCHEMA
self._print(2, "Loading CSV to dataframe")
# Read the CSV with whatever kwargs were passed
df = pd.read_csv(csv_path, **csv_kwargs)
self.import_dataframe(df, table_name, if_exists=if_exists, schema=schema)
return df
def import_geodata(
self,
table_name: str,
data_path: Path,
src_epsg: Union[int, bool] = False,
if_exists: str = "fail",
schema: str = None,
):
"""
Load geographic data into a geodataframe, then save to SQL.
:param table_name: Name of the table you want to create
:type table_name: str
:param data_path: Path to the data. Anything accepted by Geopandas
works here.
:type data_path: Path
:param src_epsg: Manually declare the source EPSG if needed,
defaults to False
:type src_epsg: Union[int, bool], optional
:param if_exists: pandas argument to handle overwriting data,
defaults to "replace"
:type if_exists: str, optional
"""
if not schema:
schema = self.ACTIVE_SCHEMA
self._print(2, "Loading spatial data to geodataframe")
# Read the data into a geodataframe
gdf = gpd.read_file(data_path)
# Drop null geometries
gdf = gdf[gdf["geometry"].notnull()]
# Explode multipart to singlepart and reset the index
gdf = gdf.explode()
gdf["explode"] = gdf.index
gdf = gdf.reset_index()
self.import_geodataframe(
gdf, table_name, src_epsg=src_epsg, if_exists=if_exists, schema=schema
)
# CREATE data within the database
# -------------------------------
def make_geotable_from_query(
self,
query: str,
new_table_name: str,
geom_type: str,
epsg: int,
schema: str = None,
uid_col: str = "uid",
) -> None:
"""
TODO: docstring
"""
if not schema:
schema = self.ACTIVE_SCHEMA
self._print(2, f"Making new geotable in DB : {new_table_name}")
valid_geom_types = [
"POINT",
"MULTIPOINT",
"POLYGON",
"MULTIPOLYGON",
"LINESTRING",
"MULTILINESTRING",
]
if geom_type.upper() not in valid_geom_types:
for msg in [
f"Geometry type of {geom_type} is not valid.",
f"Please use one of the following: {valid_geom_types}",
"Aborting",
]:
self._print(3, msg)
return
sql_make_table_from_query = f"""
DROP TABLE IF EXISTS {schema}.{new_table_name};
CREATE TABLE {schema}.{new_table_name} AS
{query}
"""
self.add_schema(schema)
self.execute(sql_make_table_from_query)
self.table_add_uid_column(new_table_name, schema=schema, uid_col=uid_col)
self.table_add_spatial_index(new_table_name, schema=schema)
self.table_reproject_spatial_data(
new_table_name, epsg, epsg, geom_type=geom_type.upper(), schema=schema
)
def make_hexagon_overlay(
self,
new_table_name: str,
table_to_cover: str,
desired_epsg: int,
hexagon_size: float,
schema: str = None,
) -> None:
"""
Create a new spatial hexagon grid covering another
spatial table. EPSG must be specified for the hexagons,
as well as the size in square KM.
:param new_table_name: Name of the new table to create
:type new_table_name: str
:param table_to_cover: Name of the existing table you want to cover
:type table_to_cover: str
:param desired_epsg: integer for EPSG you want the hexagons to be in
:type desired_epsg: int
:param hexagon_size: Size of the hexagons, 1 = 1 square KM
:type hexagon_size: float
"""
if not schema:
schema = self.ACTIVE_SCHEMA
self._print(2, f"Creating hexagon table named: {schema}.{new_table_name}")
sql_create_hex_grid = f"""
DROP TABLE IF EXISTS {schema}.{new_table_name};
CREATE TABLE {schema}.{new_table_name} (
gid SERIAL NOT NULL PRIMARY KEY,
geom GEOMETRY('POLYGON', {desired_epsg}, 2) NOT NULL
)
WITH (OIDS=FALSE);
INSERT INTO {schema}.{new_table_name} (geom)
SELECT
hex_grid(
{hexagon_size},
(select st_xmin(st_transform(st_collect(geom), 4326))
from {schema}.{table_to_cover}),
(select st_ymin(st_transform(st_collect(geom), 4326))
from {schema}.{table_to_cover}),
(select st_xmax(st_transform(st_collect(geom), 4326))
from {schema}.{table_to_cover}),
(select st_ymax(st_transform(st_collect(geom), 4326))
from {schema}.{table_to_cover}),
4326,
{desired_epsg},
{desired_epsg}
);
"""
self.add_schema(schema)
self.execute(sql_create_hex_grid)
self.table_add_spatial_index(new_table_name, schema=schema)
# TODO: reproject?
# EXPORT data to file / disk
# --------------------------
@timer
def export_shapefile(
self,
table_name: str,
output_folder: Path,
where_clause: str = None,
schema: str = None,
) -> gpd.GeoDataFrame:
"""Save a spatial SQL table to shapefile.
Add an optional filter with the ``where_clause``:
``'WHERE speed_limit <= 35'``
:param table_name: Name of the table to export
:type table_name: str
:param output_folder: Folder path to write to
:type output_folder: Path
:param where_clause: Any valid SQL where clause, defaults to False
:type where_clause: str, optional
"""
if not schema:
schema = self.ACTIVE_SCHEMA
self._print(2, f"Exporting {schema}.{table_name} to shapefile")
query = f"SELECT * FROM {schema}.{table_name} "
if where_clause:
query += where_clause
self._print(1, f"WHERE clause applied: {where_clause}")
gdf = self.query_as_geo_df(query)
# Force any boolean columns into strings
for c in gdf.columns:
datatype = gdf[c].dtype.name
if datatype == "bool":
gdf[c] = gdf[c].astype(str)
output_path = os.path.join(output_folder, f"{table_name}.shp")
gdf.to_file(output_path)
self._print(1, f"Saved to {output_path}")
return gdf
def export_all_shapefiles(self, output_folder: Path) -> None:
"""
Save all spatial tables in the database to shapefile.
:param output_folder: Folder path to write to
:type output_folder: Path
"""
for table in self.all_spatial_tables_as_dict():
self.export_shapefile(table, output_folder)
# IMPORT/EXPORT data with shp2pgsql / pgsql2shp
# ---------------------------------------------
def pgsql2shp(
self, table_name: str, output_folder: Path = None, extra_args: list = None
) -> Path:
"""
Use the command-line ``pgsql2shp`` utility.
TODO: check if pgsql2shp exists and exit early if not
TODO: check if schema is supported
``extra_args`` is a list of tuples, passed in as
``[(flag1, val1), (flag2, val2)]``
For example:
``extra_args = [("-g", "custom_geom_column"), ("-b", "")]``
For more info, see
http://www.postgis.net/docs/manual-1.3/ch04.html#id436110
:param table_name: name of the spatial table to dump
:type table_name: str
:param output_folder: output folder, defaults to DATA_OUTBOX
:type output_folder: Path, optional
:param extra_args: [description], defaults to None
:type extra_args: list, optional
:return: path to the newly created shapefile
:rtype: Path
"""
# Use the default data outbox if none provided
if not output_folder:
output_folder = self.DATA_OUTBOX
# Put this shapefile into a subfolder
output_folder = output_folder / table_name
if not output_folder.exists():
output_folder.mkdir(parents=True)
output_file = output_folder / table_name
# Start out the command
cmd = f'pgsql2shp -f "{output_file}"'
# Add the default arguments needed for connecting
required_args = [
("-h", self.HOST),
("-p", self.PORT),
("-u", self.USER),
("-P", self.PASSWORD),
]
for flag, val in required_args:
cmd += f" {flag} {val}"
# Add any extra arguments passed in by the user
if extra_args:
for flag, val in extra_args:
cmd += f" {flag} {val}"
# Finish the command by adding the DB and table names
cmd += f" {self.DATABASE} {table_name}"
subprocess.call(cmd, shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
self._print(2, cmd)
self._print(2, f"Exported {table_name} to {output_file}")
return output_folder / f"{table_name}.shp"
def shp2pgsql(self, table_name: str, src_shapefile: Path, new_epsg: int = None) -> str:
"""
TODO: Docstring
TODO: add schema option
:param table_name: [description]
:type table_name: str
:param src_shapefile: [description]
:type src_shapefile: Path
:param new_epsg: [description], defaults to None
:type new_epsg: int, optional
:return: [description]
:rtype: str
"""
shapefile_without_extension = str(src_shapefile).replace(".shp", "")
# TODO: document default settings
cmd = "shp2pgsql -d -e -I -S"
# Use geopandas to figure out the source EPSG
src_epsg = gpd.read_file(src_shapefile).crs.to_epsg()
if new_epsg:
cmd += f" -s {src_epsg}:{new_epsg}"
else:
cmd += f" -s {src_epsg}"
cmd += f" {shapefile_without_extension} {table_name}"
cmd += f" | psql {self.uri()}"
os.system(cmd)
return cmd
# TRANSFER data to another database
# ---------------------------------
def transfer_data_to_another_db(
self, table_name: str, other_postgresql_db, schema: str = None
) -> None:
"""
Copy data from one SQL database to another.
:param table_name: Name of the table to copy
:type table_name: str
:param other_postgresql_db: ``PostgreSQL()`` object for target database
:type other_postgresql_db: PostgreSQL
"""
if not schema:
schema = self.ACTIVE_SCHEMA
query = f"SELECT * FROM {schema}.{table_name}"
# If the data is spatial use a geodataframe
if table_name in self.all_spatial_tables_as_dict():
gdf = self.query_as_geo_df(query)
other_postgresql_db.import_geodataframe(gdf, table_name)
# Otherwise use a normal dataframe
else:
df = self.query_as_df(query)
other_postgresql_db.import_dataframe(df, table_name)
def connect_via_uri(
uri: str,
verbosity: str = "full",
super_db: str = "postgres",
super_user: str = None,
super_pw: str = None,
):
"""
Create a ``PostgreSQL`` object from a URI. Note that
this process must make assumptions about the super-user
of the database. Proceed with caution.
:param uri: database connection string
:type uri: str
:param verbosity: level of printout desired, defaults to "full"
:type verbosity: str, optional
:param super_db: name of the SQL cluster master DB,
defaults to "postgres"
:type super_db: str, optional
:return: ``PostgreSQL()`` object
:rtype: PostgreSQL
"""
uri_list = uri.split("?")
base_uri = uri_list[0]
# Break off the ?sslmode section
if len(uri_list) > 1:
sslmode = uri_list[1]
else:
sslmode = False
# Get rid of postgresql://
base_uri = base_uri.replace(r"postgresql://", "")
# Split values up to get component parts
un_pw, host_port_db = base_uri.split("@")
username, password = un_pw.split(":")
host, port_db = host_port_db.split(":")
port, db_name = port_db.split(r"/")
if not super_pw:
super_pw = password
if not super_user:
super_user = username
values = {
"host": host,
"un": username,
"pw": password,
"port": port,
"sslmode": sslmode,
"verbosity": "full",
"super_db": super_db,
"super_un": super_user,
"super_pw": super_pw,
}
return PostgreSQL(db_name, **values)
| StarcoderdataPython |
1629244 | #!/usr/bin/env python
# encoding: utf-8
from .converter import Csv2Weka # noqa
from .version import __version__ # noqa
__all__ = ['Csv2Weka', '__version__']
| StarcoderdataPython |
3267967 | <filename>gravity/state.py
""" Classes to represent and manipulate gravity's stored configuration and
state data.
"""
import enum
import errno
import yaml
from gravity.util import AttributeDict
class GracefulMethod(enum.Enum):
DEFAULT = 0
SIGHUP = 1
class Service(AttributeDict):
service_type = "service"
service_name = "_default_"
graceful_method = GracefulMethod.DEFAULT
def __init__(self, *args, **kwargs):
super(Service, self).__init__(*args, **kwargs)
if "service_type" not in kwargs:
self["service_type"] = self.__class__.service_type
if "service_name" not in kwargs:
self["service_name"] = self.__class__.service_name
def __eq__(self, other):
return self["config_type"] == other["config_type"] and self["service_type"] == other["service_type"] and self["service_name"] == other["service_name"]
def full_match(self, other):
return set(self.keys()) == set(other.keys()) and all([self[k] == other[k] for k in self if not k.startswith("_")])
class GalaxyGunicornService(Service):
service_type = "gunicorn"
service_name = "gunicorn"
graceful_method = GracefulMethod.SIGHUP
command_template = "gunicorn 'galaxy.webapps.galaxy.fast_factory:factory()' --timeout 300" \
" --pythonpath lib -k galaxy.webapps.galaxy.workers.Worker -b {bind_address}:{bind_port}"
class GalaxyUnicornHerderService(Service):
service_type = "unicornherder"
service_name = "unicornherder"
graceful_method = GracefulMethod.SIGHUP
command_template = "unicornherder --pidfile {supervisor_state_dir}/{program_name}.pid --" \
" 'galaxy.webapps.galaxy.fast_factory:factory()' --timeout 300" \
" --pythonpath lib -k galaxy.webapps.galaxy.workers.Worker -b {bind_address}:{bind_port}" \
" --access-logfile {log_dir}/gunicorn.access.log" \
" --error-logfile {log_dir}/gunicorn.error.log --capture-output"
class GalaxyCeleryService(Service):
service_type = "celery"
service_name = "celery"
command_template = "celery --app galaxy.celery worker --concurrency 2 -l debug"
class GalaxyCeleryBeatService(Service):
service_type = "celery-beat"
service_name = "celery-beat"
command_template = "celery --app galaxy.celery beat -l debug"
class GalaxyStandaloneService(Service):
service_type = "standalone"
service_name = "standalone"
# FIXME: supervisor-specific
command_template = "python ./lib/galaxy/main.py -c {galaxy_conf} --server-name={server_name}{attach_to_pool_opt}" \
" --pid-file={supervisor_state_dir}/{program_name}.pid"
class ConfigFile(AttributeDict):
def __init__(self, *args, **kwargs):
super(ConfigFile, self).__init__(*args, **kwargs)
services = []
for service in self.get("services", []):
service_class = SERVICE_CLASS_MAP.get(service["service_type"], Service)
services.append(service_class(**service))
self.services = services
@property
def defaults(self):
return {
"instance_name": self["instance_name"],
"galaxy_root": self["attribs"]["galaxy_root"],
"log_dir": self["attribs"]["log_dir"],
"bind_address": self["attribs"]["bind_address"],
"bind_port": self["attribs"]["bind_port"],
}
class GravityState(AttributeDict):
@classmethod
def open(cls, name):
try:
s = cls.loads(open(name).read())
except (OSError, IOError) as exc:
if exc.errno == errno.ENOENT:
yaml.dump({}, open(name, "w"))
s = cls()
s._name = name
return s
def __init__(self, *args, **kwargs):
super(GravityState, self).__init__(*args, **kwargs)
for key in ("config_files",):
if key not in self:
self[key] = {}
for config_file, config_dict in self[key].items():
self[key][config_file] = ConfigFile(config_dict)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
with open(self._name, "w") as fh:
self.dump(fh)
def set_name(self, name):
self._name = name
def service_for_service_type(service_type):
try:
return SERVICE_CLASS_MAP[service_type]
except KeyError:
raise RuntimeError(f"Unknown service type: {service_type}")
# TODO: better to pull this from __class__.service_type
SERVICE_CLASS_MAP = {
"gunicorn": GalaxyGunicornService,
"unicornherder": GalaxyUnicornHerderService,
"celery": GalaxyCeleryService,
"celery-beat": GalaxyCeleryBeatService,
"standalone": GalaxyStandaloneService,
}
| StarcoderdataPython |
5096805 | <reponame>rdg7739/coronavirus_bot
from .app import CoronaBot
if __name__ == '__main__':
CoronaBot.run()
| StarcoderdataPython |
8177916 | from django.shortcuts import render,redirect,get_object_or_404
from django.contrib.auth.models import User
from django.http import HttpResponse
from django import template
def tags(request):
return render(request,'tags/index.html')
| StarcoderdataPython |
9798543 | # Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
import os
import requests
import torch.distributed as dist
import torchvision.utils
from imaginaire.utils.distributed import is_master
def save_pilimage_in_jpeg(fullname, output_img):
r"""Save PIL Image to JPEG.
Args:
fullname (str): Full save path.
output_img (PIL Image): Image to be saved.
"""
dirname = os.path.dirname(fullname)
os.makedirs(dirname, exist_ok=True)
output_img.save(fullname, 'JPEG', quality=99)
def save_intermediate_training_results(
visualization_images, logdir, current_epoch, current_iteration):
r"""Save intermediate training results for debugging purpose.
Args:
visualization_images (tensor): Image where pixel values are in [-1, 1].
logdir (str): Where to save the image.
current_epoch (int): Current training epoch.
current_iteration (int): Current training iteration.
"""
visualization_images = (visualization_images + 1) / 2
output_filename = os.path.join(
logdir, 'images',
'epoch_{:05}iteration{:09}.jpg'.format(
current_epoch, current_iteration))
print('Save output images to {}'.format(output_filename))
os.makedirs(os.path.dirname(output_filename), exist_ok=True)
image_grid = torchvision.utils.make_grid(
visualization_images.data, nrow=1, padding=0, normalize=False)
torchvision.utils.save_image(image_grid, output_filename, nrow=1)
def download_file_from_google_drive(file_id, destination):
r"""Download a file from the google drive by using the file ID.
Args:
file_id: Google drive file ID
destination: Path to save the file.
Returns:
"""
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': file_id}, stream=True)
token = get_confirm_token(response)
if token:
params = {'id': file_id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
r"""Get confirm token
Args:
response: Check if the file exists.
Returns:
"""
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
r"""Save response content
Args:
response:
destination: Path to save the file.
Returns:
"""
chunk_size = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk:
f.write(chunk)
def get_checkpoint(checkpoint_path, url=''):
r"""Get the checkpoint path. If it does not exist yet, download it from
the url.
Args:
checkpoint_path (str): Checkpoint path.
url (str): URL to download checkpoint.
Returns:
(str): Full checkpoint path.
"""
if 'TORCH_HOME' not in os.environ:
os.environ['TORCH_HOME'] = os.getcwd()
save_dir = os.path.join(os.environ['TORCH_HOME'], 'checkpoints')
os.makedirs(save_dir, exist_ok=True)
full_checkpoint_path = os.path.join(save_dir, checkpoint_path)
if not os.path.exists(full_checkpoint_path):
os.makedirs(os.path.dirname(full_checkpoint_path), exist_ok=True)
if is_master():
print('Download {}'.format(url))
download_file_from_google_drive(url, full_checkpoint_path)
if dist.is_available() and dist.is_initialized():
dist.barrier()
return full_checkpoint_path
| StarcoderdataPython |
6606738 | <gh_stars>100-1000
import redis
import json
from util.utils import login_expire
class RedisUtil(object):
def __init__(self):
self.client = redis.Redis()
def check_user_registered(self, user):
return self.client.sadd('qa_system:user:duplicate', user) == 0
def save_session(self, session_id, session_info):
session_json = json.dumps(session_info)
self.client.hset('qa_system:session', session_id, session_json)
def delete_session(self, session_id):
self.client.hdel('qa_system:session', session_id)
def fetch_session(self, session_id):
if not session_id:
return {}
session_json = self.client.hget('qa_system:session', session_id)
if not session_json:
return {}
session_data = json.loads(session_json.decode())
if login_expire(session_data=session_data):
return {}
return session_data
def check_user_answer_question(self, user, question_id):
return self.client.hexists('qa_system:answer', user + question_id)
def set_answer_flag(self, question, user):
self.client.hset('qa_system:answer', user+question, 1) | StarcoderdataPython |
3568869 | ###############################################
#### Written By: <NAME> ####
#### Written On: 04-Apr-2020 ####
#### ####
#### Objective: This script is a config ####
#### file, contains all the keys for ####
#### Azure 2 OCI API. Application will ####
#### process these information & perform ####
#### the call to our newly developed Azure ####
#### API in OCI. ####
###############################################
import os
import platform as pl
class clsConfig(object):
Curr_Path = os.path.dirname(os.path.realpath(__file__))
os_det = pl.system()
if os_det == "Windows":
sep = '\\'
else:
sep = '/'
conf = {
'APP_ID': 1,
"comp": "ocid1.compartment.oc1..xxxxxxxxxxxxxxxxxxxxxxxxyyyyyyyyyyyyyyyyyyxxxxxx",
"URL":"https://xxxxxxxxxx.yyyyyyyyyyyyyyyy.net/api/getDynamicCovidStats",
"appType":"application/json",
"conType":"keep-alive",
"limRec":10,
"CACHE":"no-cache",
"colList": "date, state, positive, negative",
"typSel": "Cols",
"LOG_PATH":Curr_Path + sep + 'log' + sep,
"STREAM_NAME":"Covid19-Stream",
"PARTITIONS":1
}
| StarcoderdataPython |
11222999 | <filename>usage/api/models/skipgram.py
import argparse
import sys
import tensorflow as tf
def read_dictionary():
with open('models/skipgram/skipgram.tsv', 'r') as file:
words = file.read().split()
dictionary = {}
for (i, word) in enumerate(words):
dictionary[word] = i
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return dictionary, reversed_dictionary
def evaluate(embeddings, word=None, embedding=None):
if word != None:
word_embedding = tf.nn.embedding_lookup(embeddings, [dictionary.get(word, 0)])
else:
word_embedding = embedding
similarity = tf.matmul(word_embedding, embeddings, transpose_b=True)
sim = similarity.eval()
nearest = (-sim).argsort()[0]
return nearest[1:11]
dictionary, reversed_dictionary = read_dictionary()
def get_nearest_skipgram(word):
with tf.Session() as sess:
saver = tf.train.import_meta_graph('models/skipgram/skipgram.ckpt.meta')
saver.restore(sess, 'models/skipgram/skipgram.ckpt')
embeddings = tf.get_variable_scope().global_variables()[0]
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))
normalized_embeddings = embeddings / norm
if dictionary.get(word, -1) == -1:
return None
nearest = evaluate(normalized_embeddings, word=word)
nearest_words = [reversed_dictionary[id] for id in nearest]
return nearest_words
| StarcoderdataPython |
8089274 | import os
import sys
from imageio import imread, imwrite
from skimage.transform import resize
target_dir = sys.argv[1]
if sys.argv[2] == 'omniglot':
img_size = [28, 28]
else:
img_size = [84, 84]
_ids = []
for root, dirnames, filenames in os.walk(target_dir):
for filename in filenames:
if filename.endswith(('.jpg', '.webp', '.JPEG', '.png', 'jpeg')):
_ids.append(os.path.join(root, filename))
for i, path in enumerate(_ids):
img = imread(path)
print('{}/{} size: {}'.format(i, len(_ids), img.shape))
imwrite(path, resize(img, img_size))
| StarcoderdataPython |
370650 | <reponame>awslabs/improving-forecast-accuracy-with-machine-learning
# #####################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# #####################################################################################################################
from pathlib import Path
from typing import Dict
import aws_cdk.aws_iam as iam
import aws_cdk.aws_stepfunctions as sfn
import aws_cdk.aws_stepfunctions_tasks as tasks
from aws_cdk.aws_lambda import Runtime, Function, IFunction
from aws_cdk.aws_s3 import IBucket
from aws_cdk.core import (
Construct,
Duration,
Aws,
CfnParameter,
CfnResource,
CfnMapping,
CfnCondition,
)
from solutions.cfn_nag import add_cfn_nag_suppressions, CfnNagSuppression
from stepfunctions.lambda_builder import LambdaBuilder
from stepfunctions.policies import SfnPolicies
class LambdaFunctions(Construct):
def __init__(
self,
scope: Construct,
id: str,
log_level: CfnParameter,
forecast_kms: CfnCondition,
forecast_kms_key_arn: str,
):
super().__init__(scope, id)
self.topic = None
self.subscription = None
self.functions: Dict[Function] = {}
self.policies = SfnPolicies(self)
self.forecast_kms = forecast_kms
self.forecast_kms_key_arn = forecast_kms_key_arn
# create any policies that can be created upfront
self.policies.create_forecast_kms_read_policy(
forecast_kms, forecast_kms_key_arn
)
self.policies.create_forecast_kms_write_policy(
forecast_kms, forecast_kms_key_arn
)
# build functions
self.lambda_builder = LambdaBuilder(
self,
log_level=log_level.value_as_string,
source_path=Path(__file__).parent.parent.parent.parent,
)
self.create_functions()
# step function steps
check_error = sfn.Choice(self, "Check-Error")
notify_failed = tasks.LambdaInvoke(
self,
"Notify-Failed",
lambda_function=self.functions["SNS"],
payload_response_only=True,
retry_on_service_exceptions=True,
result_path=None,
)
notify_failed.next(sfn.Fail(self, "FailureState"))
create_dataset_group = tasks.LambdaInvoke(
self,
"Create-DatasetGroup",
lambda_function=self.functions["CreateDatasetGroup"],
result_path="$.DatasetGroupNames",
payload_response_only=True,
retry_on_service_exceptions=True,
)
create_dataset_group.add_retry(
backoff_rate=1.05,
interval=Duration.seconds(5),
errors=["ResourcePending"],
)
create_dataset_group.add_catch(
notify_failed, errors=["ResourceFailed"], result_path="$.serviceError"
)
create_dataset_group.add_catch(
notify_failed, errors=["States.ALL"], result_path="$.statesError"
)
create_glue_table_name = tasks.LambdaInvoke(
self,
"Create-Glue-Table-Name",
lambda_function=self.functions["CreateGlueTableName"],
result_path="$.glue_table_name",
payload_response_only=True,
retry_on_service_exceptions=True,
)
import_data = tasks.LambdaInvoke(
self,
"Import-Data",
lambda_function=self.functions["CreateDatasetImportJob"],
result_path="$.DatasetImportJobArn",
payload_response_only=True,
retry_on_service_exceptions=True,
)
import_data.add_retry(
backoff_rate=1.05,
interval=Duration.seconds(5),
max_attempts=100,
errors=["ResourcePending"],
)
import_data.add_catch(
notify_failed, errors=["ResourceFailed"], result_path="$.serviceError"
)
import_data.add_catch(
notify_failed, errors=["States.ALL"], result_path="$.statesError"
)
update_not_required = sfn.Succeed(self, "Update-Not-Required")
notify_success = tasks.LambdaInvoke(
self,
"Notify-Success",
lambda_function=self.functions["SNS"],
payload_response_only=True,
retry_on_service_exceptions=True,
result_path=None,
)
notify_prediction_failed = tasks.LambdaInvoke(
self,
"Notify-Prediction-Failed",
lambda_function=self.functions["SNS"],
payload_response_only=True,
retry_on_service_exceptions=True,
result_path=None,
)
notify_prediction_failed.next(sfn.Fail(self, "Prediction-Failed"))
create_predictor = tasks.LambdaInvoke(
self,
"Create-Predictor",
lambda_function=self.functions["CreatePredictor"],
result_path="$.PredictorArn",
payload_response_only=True,
retry_on_service_exceptions=True,
)
create_predictor.add_retry(
backoff_rate=1.02,
interval=Duration.seconds(120),
max_attempts=100,
errors=["ResourcePending", "DatasetsImporting"],
)
create_predictor.add_catch(
notify_prediction_failed,
errors=["ResourceFailed"],
result_path="$.serviceError",
)
create_predictor.add_catch(
notify_prediction_failed, errors=["States.ALL"], result_path="$.statesError"
)
create_predictor.add_catch(update_not_required, errors=["NotMostRecentUpdate"])
create_predictor_backtest_export = tasks.LambdaInvoke(
self,
"Create-Predictor-Backtest-Export",
lambda_function=self.functions["CreatePredictorBacktestExport"],
result_path="$.PredictorArn",
payload_response_only=True,
retry_on_service_exceptions=True,
)
create_predictor_backtest_export.add_retry(
backoff_rate=1.05,
interval=Duration.seconds(5),
max_attempts=100,
errors=["ResourcePending"],
)
create_forecast = tasks.LambdaInvoke(
self,
"Create-Forecast",
lambda_function=self.functions["CreateForecast"],
result_path="$.ForecastArn",
payload_response_only=True,
retry_on_service_exceptions=True,
)
create_forecast.add_retry(
backoff_rate=1.05,
interval=Duration.seconds(5),
max_attempts=100,
errors=["ResourcePending"],
)
create_forecast.add_catch(
notify_prediction_failed,
errors=["ResourceFailed"],
result_path="$.serviceError",
)
create_forecast.add_catch(
notify_prediction_failed, errors=["States.ALL"], result_path="$.statesError"
)
create_forecast_export = tasks.LambdaInvoke(
self,
"Create-Forecast-Export",
lambda_function=self.functions["CreateForecastExport"],
result_path="$.PredictorArn",
payload_response_only=True,
retry_on_service_exceptions=True,
)
create_forecast_export.add_retry(
backoff_rate=1.05,
interval=Duration.seconds(5),
max_attempts=100,
errors=["ResourcePending"],
)
create_quicksight_analysis = tasks.LambdaInvoke(
self,
"Create-QuickSight-Analysis",
lambda_function=self.functions["CreateQuickSightAnalysis"],
result_path=sfn.JsonPath.DISCARD,
payload_response_only=True,
retry_on_service_exceptions=True,
)
create_quicksight_analysis.add_catch(
notify_prediction_failed,
errors=["ResourceFailed"],
result_path="$.serviceError",
)
create_quicksight_analysis.add_catch(
notify_prediction_failed, errors=["States.ALL"], result_path="$.statesError"
)
forecast_etl = tasks.GlueStartJobRun(
self,
"Forecast-ETL",
glue_job_name=f"{Aws.STACK_NAME}-Forecast-ETL",
integration_pattern=sfn.IntegrationPattern.RUN_JOB,
result_path=sfn.JsonPath.DISCARD,
arguments=sfn.TaskInput.from_object(
{
"--dataset_group": sfn.JsonPath.string_at("$.dataset_group_name"),
"--glue_table_name": sfn.JsonPath.string_at("$.glue_table_name"),
}
),
)
create_forecasts = sfn.Map(
self,
"Create-Forecasts",
items_path="$.DatasetGroupNames",
parameters={
"bucket.$": "$.bucket",
"dataset_file.$": "$.dataset_file",
"dataset_group_name.$": "$$.Map.Item.Value",
"config.$": "$.config",
},
)
parallel_export = sfn.Parallel(
self,
"Export-Predictor-Backtest-And-Forecast",
result_path=sfn.JsonPath.DISCARD,
)
parallel_export.branch(create_forecast_export)
parallel_export.branch(create_predictor_backtest_export)
parallel_export.add_catch(
notify_prediction_failed,
errors=["ResourceFailed"],
result_path="$.serviceError",
)
parallel_export.add_catch(
notify_prediction_failed, errors=["States.ALL"], result_path="$.statesError"
)
# step function definition
definition = (
check_error.when(sfn.Condition.is_present("$.serviceError"), notify_failed)
.otherwise(create_dataset_group) # temporary; for testing
.afterwards()
.next(import_data)
.next(
create_forecasts.iterator(
create_predictor.next(create_forecast)
.next(parallel_export)
.next(create_glue_table_name)
.next(forecast_etl)
.next(create_quicksight_analysis)
.next(notify_success)
)
)
)
self.state_machine = sfn.StateMachine(
self,
"DeployStateMachine",
definition=definition,
state_machine_name=f"Improving-Forecast-Accuracy-{Aws.STACK_NAME}",
)
add_cfn_nag_suppressions(
resource=self.state_machine.role.node.children[1].node.default_child,
suppressions=[
CfnNagSuppression(
"W76",
"Large step functions need larger IAM roles to access all managed lambda functions",
)
],
)
def set_forecast_permissions(self, name, data_bucket_name_resource: CfnResource):
"""All operations require access to read from S3"""
function = self.functions[name]
function.role.attach_inline_policy(
self.policies.forecast_read_write_policy(name)
)
function.role.attach_inline_policy(
self.policies.s3_bucket_read_policy(name, data_bucket_name_resource)
)
function.role.attach_inline_policy(self.policies.forecast_kms_read_policy)
def set_s3_notification_permissions(self, data_bucket_name_resource: CfnResource):
function_name = "S3NotificationLambda"
function = self.functions[function_name]
self.state_machine.grant_start_execution(function)
function.add_environment(
"STATE_MACHINE_ARN", self.state_machine.state_machine_arn
)
function.role.attach_inline_policy(
self.policies.s3_bucket_read_policy(
function_name, data_bucket_name_resource
)
)
function.role.attach_inline_policy(self.policies.forecast_kms_read_policy)
def set_forecast_s3_access_permissions(
self,
name,
function: IFunction,
data_bucket_name_resource: CfnResource,
read: bool,
write: bool,
):
if read and write:
policy = self.policies.forecast_s3_read_write_role
elif read:
policy = self.policies.forecast_s3_read_role
else:
raise ValueError("permissions must have read and write or just read access")
forecast_s3_access_role = policy(
name=name,
data_bucket_name_resource=data_bucket_name_resource,
)
function.role.attach_inline_policy(
iam.Policy(
self,
f"{function.node.id}ForecastPassRolePolicy",
statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["iam:PassRole"],
resources=[forecast_s3_access_role.role_arn],
)
],
)
)
function.add_environment("FORECAST_ROLE", forecast_s3_access_role.role_arn)
function.add_environment("FORECAST_KMS", self.forecast_kms_key_arn)
def set_forecast_etl_permissions(
self,
function: IFunction,
database: CfnResource,
workgroup: CfnResource,
quicksight_principal: CfnParameter,
quicksight_source: CfnMapping,
athena_bucket: IBucket,
data_bucket_name_resource: CfnResource,
):
function.role.attach_inline_policy(self.policies.athena_access(workgroup.ref))
function.role.attach_inline_policy(
self.policies.glue_access(
catalog=database,
athena_bucket=athena_bucket,
data_bucket_name_resource=data_bucket_name_resource,
)
)
function.role.attach_inline_policy(self.policies.quicksight_access())
function.add_environment("SCHEMA_NAME", database.ref)
function.add_environment("WORKGROUP_NAME", workgroup.ref)
function.add_environment(
"QUICKSIGHT_PRINCIPAL", quicksight_principal.value_as_string
)
function.add_environment(
"QUICKSIGHT_SOURCE",
quicksight_source.find_in_map("General", "QuickSightSourceTemplateArn"),
)
def create_functions(self):
"""
Create all AWS Lambda functions used by the solution
:return: None
"""
layer_deps = self.lambda_builder.layer_for(
name="Dependencies",
base="lambdas/lambda_dependencies",
runtimes=[Runtime.PYTHON_3_8],
)
layer_data = self.lambda_builder.layer_for(
name="DatasetUtils",
base="lambdas/lambda_datasetutils",
runtimes=[Runtime.PYTHON_3_8],
)
self.functions.update(
self.lambda_builder.functions_for(
name="S3NotificationLambda",
base="lambdas/notification",
handlers="handler.notification",
libs="shared",
layers=[layer_deps],
)
)
self.functions["S3NotificationLambda"].add_permission(
"S3NotificationLambdaS3BucketPermission",
action="lambda:InvokeFunction",
source_account=Aws.ACCOUNT_ID,
principal=iam.ServicePrincipal("s3.amazonaws.com"),
)
self.functions.update(
self.lambda_builder.functions_for(
name="CreateDatasetGroup",
base="lambdas/createdatasetgroup",
handlers="handler.createdatasetgroup",
libs="shared",
layers=[layer_deps],
)
)
self.functions.update(
self.lambda_builder.functions_for(
name="CreateDatasetImportJob",
base="lambdas/createdatasetimportjob",
handlers="handler.createdatasetimportjob",
libs="shared",
layers=[layer_deps],
)
)
self.functions.update(
self.lambda_builder.functions_for(
name="CreateForecast",
base="lambdas/createforecast",
handlers=[
"create_forecast.handler",
"create_forecast_export.handler",
],
libs="shared",
layers=[layer_deps],
)
)
self.functions.update(
self.lambda_builder.functions_for(
name="CreatePredictor",
base="lambdas/createpredictor",
handlers=[
"create_predictor.handler",
"create_predictor_backtest_export.handler",
],
libs="shared",
layers=[layer_deps],
)
)
self.functions.update(
self.lambda_builder.functions_for(
name="CreateQuickSightAnalysis",
base="lambdas/createquicksightanalysis",
handlers="handler.createquicksightanalysis",
libs="shared",
timeout=Duration.minutes(15),
layers=[layer_data],
)
)
self.functions.update(
self.lambda_builder.functions_for(
name="SNS",
base="lambdas/sns",
handlers="handler.sns",
libs="shared",
layers=[layer_deps],
)
)
self.functions.update(
self.lambda_builder.functions_for(
name="CfnResource",
base="lambdas/cloudformation_resources",
handlers=[
"bucket_name.handler",
"solution_metrics.handler",
"unique_name.handler",
],
timeout=Duration.seconds(10),
)
)
self.functions.update(
self.lambda_builder.functions_for(
name="CreateGlueTableName",
base="lambdas/creategluetablename",
handlers="handler.creategluetablename",
libs="shared",
layers=[layer_deps],
)
)
| StarcoderdataPython |
3373285 | import json
import logging
import pika
from message import IncomingMessage
from message_forwarder import MessageForwarder
class AmqpForwarder(MessageForwarder):
def __init__(self, name: str, host: str, username: str, password: str, exchange: str):
self.log = logging.getLogger(name)
self.log.info('Opening AMQP connection')
# Connect to the AMQP broker
conn_params = pika.ConnectionParameters(host=host, credentials=pika.PlainCredentials(username, password))
self.connection = pika.BlockingConnection(conn_params)
try:
# Create a channel and declare the target exchange
self.channel = self.connection.channel()
self.channel.exchange_declare(exchange=exchange, exchange_type='fanout', durable=True)
self.exchange = exchange
except Exception as ex:
# Make sure the connection is closed on exceptions
self.close()
raise ex
def forward_message(self, message: IncomingMessage):
try:
body = json.dumps(message.__dict__)
self.log.debug('Sending message: %s', body)
self.channel.basic_publish(self.exchange, '', body)
except Exception as ex:
self.log.error('Exception on sending message to AMQP: %s', ex)
def close(self):
self.log.info('Closing AMQP forwarder')
try:
self.connection.close()
except Exception as ex:
self.log.warning('Exception on closing AMQP connection: %s', ex)
| StarcoderdataPython |
3479672 | <filename>backend/apps/csyllabusapi/helper/webscraping/generate-fixtures/generate_fixtures_from_columbia_dump.py
import requests
from lxml import html
import json
columbia_fixtures_json = open("/Volumes/SSD-Thomas/Documents/GitHub/csyllabus/webapp/backend/apps/csyllabusapi/fixtures"
"/columbia_fixtures_json.json", "w")
fixtures = []
country_id = 6
fixtures.append({
"model": "csyllabusapi.country",
"pk": country_id,
"fields": {
"name": "United States of America",
"img": "",
"created": "2017-10-30T15:20:51.049Z",
"modified": "2017-10-30T15:20:52.235Z"
}
}
)
city_id = 14
fixtures.append({
"model": "csyllabusapi.city",
"pk": city_id,
"fields": {
"name": "New York",
"img": "",
"created": "2017-10-30T15:20:51.049Z",
"modified": "2017-10-30T15:20:52.235Z",
"country": country_id
}
}
)
university_id = 10
fixtures.append(
{
"model": "csyllabusapi.university",
"pk": university_id,
"fields": {
"name": "Columbia University in the City of New York",
"img": "",
"created": "2017-10-30T15:05:19.541Z",
"modified": "2017-10-30T15:05:20.945Z",
"country": country_id,
"city": city_id
}
}
)
# appending programs fixtures
program_id = 41
fixtures.append(
{
"model": "csyllabusapi.program",
"pk": program_id,
"fields": {
"name" : "Computer Science and Engineering",
"study_level": "undergraduate",
"created": "2017-10-30T15:07:40.122Z",
"modified": "2017-10-30T15:07:41.673Z"
}
}
)
fixtures.append(
{
"model": "csyllabusapi.programuniversity",
"pk": program_id,
"fields": {
"university": university_id,
"program": program_id,
"created": "2017-10-30T15:07:40.122Z"
}
}
)
fixtures.append(
{
"model": "csyllabusapi.programcity",
"pk": program_id,
"fields": {
"city": city_id,
"program": program_id,
"created": "2017-10-30T15:07:40.122Z"
}
}
)
fixtures.append(
{
"model": "csyllabusapi.programcountry",
"pk": program_id,
"fields": {
"country": country_id,
"program": program_id,
"created": "2017-10-30T15:07:40.122Z"
}
}
)
# appending courses fixtures
course_id = 1416
course_uni_id = 2639
course_program_id = 3147
# requesting data
# request url content
url = "http://www.columbia.edu/cu/bulletin/uwb/sel/COMS_Spring2018.html"
r = requests.get(url)
tree = html.fromstring(r.content)
course_idtree = tree.xpath('//div[@class="courseblock"]//p[@class="courseblocktitle"]//strong/text()')
course_desc = tree.xpath('//div[@class="courseblock"]//p[@class="courseblockdesc"]/text()')
for i in range(0, len(course_idtree)):
course_name = course_idtree[i].split('. ')[1].strip(),
course_description = course_desc[i].strip()
if course_description:
fixtures.append(
{
"model": "csyllabusapi.courseprogram",
"pk": course_program_id,
"fields": {
"course": course_id,
"program": program_id,
"created": "2017-10-30T15:07:40.122Z"
}
}
)
course_program_id = course_program_id + 1
fixtures.append(
{
"model": "csyllabusapi.courseuniversity",
"pk": course_uni_id,
"fields": {
"course": course_id,
"university": university_id,
"created": "2017-10-30T15:07:40.122Z"
}
}
)
course_uni_id = course_uni_id + 1
fixtures.append(
{
"model": "csyllabusapi.course",
"pk": course_id,
"fields": {
"name": course_name,
"description": course_description,
"ects": None,
"semester": None,
"created": "2017-10-30T15:07:40.122Z",
"modified": "2017-10-30T15:07:41.673Z"
}
}
)
course_id = course_id + 1
json.dump(fixtures, columbia_fixtures_json)
| StarcoderdataPython |
5191600 | import unittest
from .test_object_decoder import CapsuleLayerTestCase, CapsuleLikelihoodTestCase, CapsuleObjectDecoderTestCase
from .test_part_decoder import TemplateBasedImageDecoderTestCase, TemplateGeneratorTestCase
from .test_part_encoder import CapsuleImageEncoderTestCase
from .test_scae import SCAETestCase
from .test_set_transformer import SetTransformerTestCase
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(CapsuleImageEncoderTestCase))
test_suite.addTest(unittest.makeSuite(TemplateGeneratorTestCase))
test_suite.addTest(unittest.makeSuite(TemplateBasedImageDecoderTestCase))
test_suite.addTest(unittest.makeSuite(SetTransformerTestCase))
test_suite.addTest(unittest.makeSuite(CapsuleLayerTestCase))
test_suite.addTest(unittest.makeSuite(CapsuleLikelihoodTestCase))
test_suite.addTest(unittest.makeSuite(CapsuleObjectDecoderTestCase))
test_suite.addTest(unittest.makeSuite(SCAETestCase))
return test_suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| StarcoderdataPython |
4800317 | #!/usr/bin/env python
# -*- coding: utf-8; indent-tabs-mode: nil; python-indent: 2 -*-
"""Read bookmarks saved in a "Netscape bookmark" format
as exported by Microsoft Internet Explorer or Delicious.com (and
initially of course by Netscape).
Assumptions:
- The file is a Netscape bookmark file.
See a doc at http://msdn.microsoft.com/en-us/library/aa753582%28v=VS.85%29.aspx
- There is only one record by line.
- If a decription/comment/note is attached to the bookmark, it is on
a line prefixed with <DD> (and nothing else but the note should be
on the same line).
License: 2-clause BSD
Copyright (C) 2013-2019 <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED ²AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import urllib.request, urllib.error, urllib.parse
import sys
import os
import re
# The following prefix is not enough to identify a bookmark line (may
# be a folder with '<H3 FOLDED' for instance), so that the line has to
# be checked against the RE_BOOKMARK_URL too.
RE_BOOKMARK_LINE = re.compile("<DT>.*", re.IGNORECASE)
RE_BOOKMARK_NOTE = re.compile("<DD>.*", re.IGNORECASE)
RE_DOCTYPE_LINE = re.compile("^<!DOCTYPE NETSCAPE-Bookmark-file-1>.*", re.IGNORECASE)
# Regular expression to extract info about the bookmark
RE_BOOKMARK_URL = re.compile('HREF="(?P<url>[^"]+)"', re.IGNORECASE)
RE_BOOKMARK_COMPONENTS = {
"posix_timestamp" : re.compile('[^\w]ADD_DATE="(?P<posix_timestamp>\d+)"', re.IGNORECASE),
"tags" : re.compile('[^\w]TAGS="(?P<tags>[\w,]+)"', re.IGNORECASE),
"private": re.compile('[^\w]PRIVATE="(?P<private>\d)"', re.IGNORECASE),
"title" : re.compile('<A[^>]*>(?P<title>[^<]*)<', re.IGNORECASE),
}
def is_netscape_bookmarks_file(candidateFile):
"""Return True if the file looks like a valid Netscape bookmark file."""
correct_doctype_found = False
for line in candidateFile:
line = line.lstrip()
if RE_DOCTYPE_LINE.match(line):
correct_doctype_found = True
if not line and not correct_doctype_found:
return False
if correct_doctype_found:
return True
return False
def parse_netscape_bookmarks(bookmarkHTMFile):
"""Extract bookmarks and return them in a list of dictionaries formatted in the following way:
[ {"url":"http://...", "title":"the title", "private":"0"/"1", "tags":"tag1,tag2,...", "posix_timestamp"="<unix time>", "note":"description"}]
Raise a ValueError if the format is wrong.
"""
bookmark_list = []
last_line_is_bmk = False
correct_doctype_found = False
for line in bookmarkHTMFile.splitlines():
line = line.lstrip()
if RE_DOCTYPE_LINE.match(line):
correct_doctype_found = True
continue
if line.rstrip() and not correct_doctype_found:
raise ValueError("Couldn't find a correct DOCTYPE in the bookmark file (wrong format?)")
if not line.rstrip():
continue
if RE_BOOKMARK_LINE.search(line):
# we will successively apply the various regexes until we get
# all the bookmark's info
m = RE_BOOKMARK_URL.search(line)
if not m:
# No url => skip this line
continue
bmk = {"url":m.group("url")}
for cpnt_name,cpnt_re in RE_BOOKMARK_COMPONENTS.items():
m = cpnt_re.search(line)
if m: bmk[cpnt_name] = m.group(cpnt_name)
bookmark_list.append(bmk)
last_line_is_bmk = True
elif last_line_is_bmk and RE_BOOKMARK_NOTE.search(line):
last_line_is_bmk = False
bookmark_list[-1]["note"] = line[4:].strip()
else:
last_line_is_bmk = False
return bookmark_list
def expand_url(url):
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'netscape_bookmarks.py')]
initial_url = url
new_url = None
while new_url != url:
if new_url is not None:
url = new_url
try:
res = opener.open(url)
if str(res.getcode())[0] in (5,4):
# something bad happened, reutrn the url as is
print("Keeping url %s as is because of an HTTP error %s" % (initial_url,res.getcode()))
return initial_url
except urllib.error.HTTPError as e:
print("Keeping url %s as is because of an HTTP error %s (%s)" % (initial_url,e.code, e.reason))
return initial_url
except urllib.error.URLError as e:
print("Keeping url %s as is because of an URL error %s" % (initial_url,e.reason))
return initial_url
except Exception as e:
print("Keeping url %s as is because of an unexpected error %s" % (initial_url,e))
return initial_url
new_url = res.geturl()
return url
def expand_short_urls(bookmarkHTMFile,outputFile):
"""Filter the bookmark file in such a way that the shortened url are expanded."""
correct_doctype_found = False
outputLines = []
for line in bookmarkHTMFile:
line = line.lstrip()
if RE_DOCTYPE_LINE.match(line):
correct_doctype_found = True
if not line and not correct_doctype_found:
raise ValueError("Couldn't find a correct DOCTYPE in the bookmark file (wrong format?)")
if RE_BOOKMARK_LINE.search(line):
# we will successively apply the various regexes until we get
# all the bookmark's info
m = RE_BOOKMARK_URL.search(line)
if m:
bmk_url = m.group("url")
expanded_url = expand_url(bmk_url)
# if bmk_url != expanded_url:
# print "Expanding %s to %s" % (bmk_url, expanded_url)
line = line.replace(bmk_url,expanded_url)
# specific line for Delicous export that have several "None"
# titled links when the link has itself been extracted from twitter.
if "from twitter" in line:
re.sub(">None</(A)>",">%s</\1>" % expanded_url, re.IGNORECASE)
outputLines.append(line)
if len(outputLines)==1000:
print("flush %s" % outputLines[0])
outputFile.write("\n".join(outputLines))
del outputLines[:]
outputFile.write("\n".join(outputLines))
if __name__ == '__main__':
USAGE = """\
USAGE: netscape_bookmarks.py PRINT bookmarkfilepath.html
or netscape_bookmarks.py EXPAND bookmarkfilepath.html
In the second case a new file is created called bookmarkfilepath_expanded.html
"""
if len(sys.argv) !=3:
print(USAGE)
sys.exit(2)
if sys.argv[1]=="PRINT":
bookmarks = parse_netscape_bookmarks(open(sys.argv[2], 'r+').read())
print("Found %d bookmarks" % len(bookmarks))
for b in bookmarks:
print(" - %s: %s (%s)" % (b.get("title","<no title>"), b["url"], b.get("note","")))
elif sys.argv[1]=="EXPAND":
input_file_path = os.path.abspath(sys.argv[2])
input_path,input_ext = os.path.splitext(input_file_path)
new_file_path = input_path+"_expanded"+input_ext
expand_short_urls(open(input_file_path,"r+"),open(new_file_path,"w"))
print("Bookmarks file with expanded short urls is at %s" % new_file_path)
| StarcoderdataPython |
1800144 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'chengzhi'
import datetime
from tqsdk import TqApi, TqSim
from tqsdk.ta import *
api = TqApi(TqSim())
# 获得 cu1906 10秒K线的引用
klines = api.get_kline_serial("SHFE.cu1906", 10, data_length=3000)
print("K线时间", datetime.datetime.fromtimestamp(klines.iloc[-1]["datetime"] / 1e9))
print(klines)
print("ATR",ATR(klines, 26))
print("BIAS",BIAS(klines, 6))
print("BOLL",BOLL(klines, 3, 5))
print("DMI",DMI(klines, 14, 6))
print("KDJ",KDJ(klines, 9, 3, 3))
print("MA",MA(klines, 3))
print("MACD",MACD(klines, 20, 35, 10))
print("SAR",SAR(klines, 4, 0.02, 0.2))
api.close()
| StarcoderdataPython |
4813043 | <filename>pyknotid/spacecurves/__init__.py
'''.. image:: random_walk_length_30.png
:scale: 50%
:alt: A closed random walks with 30 steps
:align: center
This module contains classes and functions for working with knots and
links as three-dimensional space curves, or calling functions
elsewhere in pyknotid to perform topological analysis. Functionality
includes manipulating knots/links via translation, rotation and
scaling, plotting diagrams, finding crossings and identifying knots.
Different knot classes
----------------------
pyknotid includes the following classes for topological calculation:
- :doc:`spacecurve`: Provides functions for calculations on a single
curve, including plotting, some geometrical properties and finding
crossings in projection.
- :doc:`knot`: Provides functions for topological calculations on a
single curve, such as the Alexander polynomial or Vassiliev
invariants.
- :doc:`openknot`: Provides functions for topological calculations on
an open curve that does not form a closed loop. Open curves are
topologically trivial from a mathematical perspective, but can be
analysed in terms of the topology of different closures.
- :doc:`link`: Provides the same interface to collections of multiple
curves, and can calculate linking invariants.
- :doc:`periodiccell`: Provides some convenience functions for
managing collections of curves in periodic boundaries.
Creating space curves
---------------------
The space curve classes are specified via N by 3 arrays of points in
three dimensions, representing a piecewise linear curve.
For instance, the following code produces and plots a
:class:`~pyknotid.spacecurves.knot.Knot` from a set of manually
specified points::
import numpy as np
from pyknotid.spacecurves import Knot
points = np.array([[9.0, 0.0, 0.0],
[0.781, 4.43, 2.6],
[-4.23, 1.54, -2.6],
[-4.5, -7.79, -7.35e-16],
[3.45, -2.89, 2.6],
[3.45, 2.89, -2.6],
[-4.5, 7.79, 0.0],
[-4.23, -1.54, 2.6],
[0.781, -4.43, -2.6]])
k = Knot(points)
k.plot()
.. image:: trefoil_few_points.png
:align: center
:alt: A trefoil knot specified by vertex points
:scale: 50%
The :doc:`pyknotid.make module<../make/index>` provides functions for
creating many types of example knots, such as torus knots or some
specific knot types::
import numpy as np
from pyknotid.make import torus_knot
k = torus_knot(7, 4)
k.plot()
.. image:: p7_q4__torus_knot.png
:align: center
:scale: 50%
:alt: A p=7 and q=4 knot produced by the above code
'''
from pyknotid.spacecurves.spacecurve import SpaceCurve
from pyknotid.spacecurves.knot import Knot
from pyknotid.spacecurves.link import Link
from pyknotid.spacecurves.openknot import OpenKnot
from pyknotid.spacecurves.periodiccell import Cell
__all__ = ('SpaceCurve', 'Knot', 'Link', 'OpenKnot', 'Cell', )
| StarcoderdataPython |
4976557 | <filename>tests/test_models/test_loss_compatibility.py
# Copyright (c) OpenMMLab. All rights reserved.
"""pytest tests/test_loss_compatibility.py."""
import copy
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
@pytest.mark.parametrize('loss_bbox', [
dict(type='L1Loss', loss_weight=1.0),
dict(type='GHMR', mu=0.02, bins=10, momentum=0.7, loss_weight=10.0),
dict(type='IoULoss', loss_weight=1.0),
dict(type='BoundedIoULoss', loss_weight=1.0),
dict(type='GIoULoss', loss_weight=1.0),
dict(type='DIoULoss', loss_weight=1.0),
dict(type='CIoULoss', loss_weight=1.0),
dict(type='MSELoss', loss_weight=1.0),
dict(type='SmoothL1Loss', loss_weight=1.0),
dict(type='BalancedL1Loss', loss_weight=1.0)
])
def test_bbox_loss_compatibility(loss_bbox):
"""Test loss_bbox compatibility.
Using Faster R-CNN as a sample, modifying the loss function in the config
file to verify the compatibility of Loss APIS
"""
# Faster R-CNN config dict
config_path = '_base_/models/faster_rcnn_r50_fpn.py'
cfg_model = _get_detector_cfg(config_path)
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
if 'IoULoss' in loss_bbox['type']:
cfg_model.roi_head.bbox_head.reg_decoded_bbox = True
cfg_model.roi_head.bbox_head.loss_bbox = loss_bbox
from mmdet.models import build_detector
detector = build_detector(cfg_model)
loss = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(loss, dict)
loss, _ = detector._parse_losses(loss)
assert float(loss.item()) > 0
@pytest.mark.parametrize('loss_cls', [
dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
dict(
type='GHMC', bins=30, momentum=0.75, use_sigmoid=True, loss_weight=1.0)
])
def test_cls_loss_compatibility(loss_cls):
"""Test loss_cls compatibility.
Using Faster R-CNN as a sample, modifying the loss function in the config
file to verify the compatibility of Loss APIS
"""
# Faster R-CNN config dict
config_path = '_base_/models/faster_rcnn_r50_fpn.py'
cfg_model = _get_detector_cfg(config_path)
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# verify class loss function compatibility
# for loss_cls in loss_clses:
cfg_model.roi_head.bbox_head.loss_cls = loss_cls
from mmdet.models import build_detector
detector = build_detector(cfg_model)
loss = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(loss, dict)
loss, _ = detector._parse_losses(loss)
assert float(loss.item()) > 0
def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
num_items=None, num_classes=10,
with_semantic=False): # yapf: disable
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': np.array([1.1, 1.2, 1.1, 1.2]),
'flip': False,
'flip_direction': None,
} for _ in range(N)]
gt_bboxes = []
gt_labels = []
gt_masks = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8)
gt_masks.append(BitmapMasks(mask, H, W))
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
'gt_masks': gt_masks,
}
if with_semantic:
# assume gt_semantic_seg using scale 1/8 of the img
gt_semantic_seg = np.random.randint(
0, num_classes, (1, 1, H // 8, W // 8), dtype=np.uint8)
mm_inputs.update(
{'gt_semantic_seg': torch.ByteTensor(gt_semantic_seg)})
return mm_inputs
| StarcoderdataPython |
9751199 | def quick_sort(arr):
from random import randint
#If length of array is <=1 , The Array is itself sorted
if len(arr) <=1: return arr
#Store Values After Comparing With Pivot
smaller,equal,larger= [],[],[]
#Selecting Random Pivot Point
pivot = arr[randint(0, len(arr)-1)]
for value in arr:
if value < pivot: smaller.append(value) #values smaller than pivot
elif value == pivot: equal.append(value) #values that are equal to pivot
else: larger.append(value) #values greater than pivot
return quick_sort(smaller)+equal+quick_sort(larger) | StarcoderdataPython |
11377593 | import requests
import re
import json
class Neihan:
def __init__(self):
self.temp_url = "https://www.haha.mx/topic/13648/new/{}"
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)\
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"}
def parse_url(self, url):
resp = requests.get(url, headers=self.headers)
return resp.content.decode()
def get_first_page_content_list(self, html_str):
content_list = re.findall(r"<p class=\"word-wrap joke-main-content-text\">(.*?)</p>", html_str, re.S)
return content_list
def save_content_list(self,content_list):
with open("neihan.txt", "a", encoding="utf-8") as f:
for cont in content_list:
f.write(json.dumps(cont, ensure_ascii=False))
print("保存成功")
def run(self):
num = 1
# start_url
url = self.temp_url.format(num)
# 发送请求
html_str = self.parse_url(url)
# 提取数据
contetn_list = self.get_first_page_content_list(html_str)
# 保存
self.save_content_list(contetn_list)
has_more = True
max_time = 0.002
while has_more:
next_url = self.temp_url.format(max_time)
json_str = self.parse_url(next_url)
content_list, max_time, has_more = self.get_content_list(json_str)
self.save_content_list(content_list)
if __name__ == "__main__":
nh = Neihan()
nh.run()
| StarcoderdataPython |
114962 | from django.apps import apps
from django.forms import inlineformset_factory
from cv.models import Book, BookEdition, \
Chapter, ChapterEditorship, \
Grant, GrantCollaboration, \
Talk, Presentation, \
Course, CourseOffering
def get_authorship_fields():
"""Return list of fields for student collaborations."""
return ('collaborator', 'print_middle', 'display_order',
'student_colleague')
def authorship_formset_factory(model_name=None, **kwargs):
"""Return authorship formset for model if exists or None otherwise."""
model = apps.get_model('cv', model_name)
try:
authorship_model = apps.get_model('cv', '%sauthorship' % model_name)
return inlineformset_factory(
model, authorship_model, fields=get_authorship_fields(), **kwargs)
except LookupError:
return None
def edition_formset_factory(**kwargs):
"""Manipulate the editions of a book."""
return inlineformset_factory(
Book, BookEdition,
fields=['edition', 'pub_date', 'submission_date', 'publisher',
'place', 'num_pages', 'isbn'], **kwargs
)
def editorship_formset_factory(**kwargs):
"""Create formsets for editorships."""
return inlineformset_factory(
Chapter, ChapterEditorship,
fields=get_authorship_fields()[0:3],
**kwargs
)
def grant_collaboration_formset_factory(**kwargs):
"""Create set of forms representing grang collaborations."""
return inlineformset_factory(
Grant, GrantCollaboration,
fields=['collaborator', 'role', 'is_pi', 'display_order'],
**kwargs
)
def presentation_formset_factory(**kwargs):
return inlineformset_factory(
Talk, Presentation,
fields=['presentation_date', 'event', 'event_acronym', 'city',
'state', 'country', 'type'],
**kwargs
)
def offering_formset_factory(**kwargs):
"""Create set of forms for course offerings of a course."""
return inlineformset_factory(
Course, CourseOffering,
fields=['term', 'start_date', 'end_date', 'institution',
'course_number'])
| StarcoderdataPython |
184561 | """
Routine to add Moster et al. 2013 stellar masses
python3 lc_add_Ms_Mo13.py 115 MD10
"""
import sys
ii = int(sys.argv[1])
env = sys.argv[2] # 'MD10'
status = sys.argv[3]
import h5py # HDF5 support
import os
import glob
import numpy as n
h5_dir = os.path.join(os.environ[env], 'cluster_h5/' )
input_list = n.array(glob.glob(os.path.join(h5_dir, "hlist_?.?????.hdf5")))
input_list.sort()
from scipy.stats import lognorm
from scipy.stats import norm
import astropy.units as u
import astropy.constants as cc
from astropy.cosmology import FlatLambdaCDM
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115)
from scipy.interpolate import interp1d
"""
definitions
-----------
- Planck flat LCDM cosmoMDlogy
- :math:`m = ln(M_{500} / (10^{15} M_\odot))`
- :math:`m_{gas} = ln(M_{gas, 500} / (10^{15} M_\odot))` is the gas mass within r500
- :math:`m_{lens} = ln(M_{lens, 500} / (10^{15} M_\odot))` is the spherical mass estimate from lensing corresponding to an
idealized shear profile without statistical noise
- :math:`l = ln(L_{500} / (E(z) 10^{44} erg/s))` where L is defined as the cluster rest-frame luminosity in the 0.1 - 2.4 keV band.
- :math:`t = ln(kT_{500} / keV)` where kT is the emission weighted temperature measured in annulus from 0.15 to 1.0 r500
- :math:`E(z) = H(z)/H_0`
- :math:`\epsilon = ln(E(z))`
Parameters
----------
* normalization for parameter X, :math:`N_X`
* slope for E(z) for parameter X, :math:`slope_E_X`
* slope for M500 for parameter X, :math:`slope_{M500}_X`
Workflow
--------
- Select relaxed clusters from the DM point of view. to be defined how ... with T/U ? Look at the publications from Sembolini, Yepes, Knebe ...
- using M15, add gas density profile and temperature using scaling relations
"""
# DIETRICH 2017
N_Mgas = 31.92 # Dietrich 17
N_kT = 2.18
N_L = 103.7
N_Lce = 102.66
slope_E_Mgas = 0.05 # Dietrich 17
slope_E_kT = 0.61
slope_E_L = 1.20
slope_E_Lce = 1.82
slope_M500_Mgas= 1.398 # Dietrich 17
slope_M500_kT = 0.66
slope_M500_L = 1.43 # 1.26*(1.+0.33*0.43)
slope_M500_Lce = 1.36 # 1.06*(1.+0.33*0.88)
scatter_Mgas = 0.106 # Dietrich 17
scatter_kT = 0.18
scatter_L = 0.24
scatter_Lce = 0.17
# MANTZ 2016
#N_Mgas = 31.98
#N_kT = 2.18
#N_L = 103.7
#N_Lce = 102.66
#slope_E_Mgas = -0.11
#slope_E_kT = 0.61
#slope_E_L = 1.20
#slope_E_Lce = 1.82
#slope_M500_Mgas= 1.04
#slope_M500_kT = 0.66
#slope_M500_L = 1.26
#slope_M500_Lce = 1.06
#scatter_Mgas = 0.086
#scatter_kT = 0.18
#scatter_L = 0.24
#scatter_Lce = 0.17
E035 = cosmoMD.efunc(0.35)
# converts logM500 to clusters observables
m500_to_qty = lambda logM500, z, slope_efunc, slope_m500, normalization : n.e**normalization * (cosmoMD.efunc(z)/E035)**(slope_efunc) * (10**(logM500-n.log10(6)-14))**(slope_m500)
logM500_to_logMgas = lambda logM500, z : m500_to_qty( logM500, z, slope_E_Mgas, slope_M500_Mgas, N_Mgas)
logM500_to_kT = lambda logM500, z : m500_to_qty( logM500, z, slope_E_kT, slope_M500_kT, N_kT)
logM500_to_L = lambda logM500, z : m500_to_qty( logM500, z, slope_E_L, slope_M500_L, N_L)
logM500_to_Lce = lambda logM500, z : m500_to_qty( logM500, z, slope_E_Lce, slope_M500_Lce, N_Lce)
file_1 = input_list[ii]
print(file_1)
f1 = h5py.File(file_1, "r+")
z = f1.attrs['redshift']
log_m500c = n.log10(f1['/halo_properties/M500c'].value)
nCluster = len(log_m500c)
#rds = (n.random.rand(len(log_m500c))-0.5)*2.
Mean_Mgas = n.log10(logM500_to_logMgas (log_m500c, z))
V_scatter_Mgas = norm.rvs(loc=0,scale=scatter_Mgas,size=nCluster)
VAL_Mgas = Mean_Mgas + V_scatter_Mgas
Mean_kT = logM500_to_kT(log_m500c, z)
V_scatter_kT = norm.rvs(loc=0,scale=scatter_kT,size=nCluster)
VAL_kT = Mean_kT + V_scatter_kT
Mean_L = n.log10(logM500_to_L(log_m500c, z))
V_scatter_L = norm.rvs(loc=0,scale=scatter_L,size=nCluster)
VAL_L = Mean_L + V_scatter_L
Mean_Lce = n.log10(logM500_to_Lce(log_m500c, z))
V_scatter_Lce = norm.rvs(loc=0,scale=scatter_Lce,size=nCluster)
VAL_Lce = Mean_Lce + V_scatter_Lce
if status=='create':
ds = f1['/cluster_data'].create_dataset('log_Mgas', data = VAL_Mgas )
ds.attrs['units'] = 'log10(Msun)'
ds = f1['/cluster_data'].create_dataset('kT', data = VAL_kT )
ds.attrs['units'] = 'keV'
ds = f1['/cluster_data'].create_dataset('log_LX_05_24', data = VAL_L )
ds.attrs['units'] = 'log10(L 0.5-2.4 keV/[erg/s])'
ds = f1['/cluster_data'].create_dataset('log_LceX_05_24', data = VAL_Lce )
ds.attrs['units'] = 'log10(Lce 0.5-2.4 keV/[erg/s])'
if status=='update':
ds = f1['/cluster_data/log_Mgas'][:] = VAL_Mgas
ds = f1['/cluster_data/kT'][:] = VAL_kT
ds = f1['/cluster_data/log_LX_05_24'][:] = VAL_L
ds = f1['/cluster_data/log_LceX_05_24'][:] = VAL_Lce
f1.close()
| StarcoderdataPython |
11241644 | from DavesLogger import Color
from DavesLogger import Logs
class Log:
def __init__ (self, Message = '', Prefix = '', Suffix = ''):
self.Message = Message
self.IPrefix = Prefix
self.ISuffix = Suffix
def __call__ (self, _Message = '', _AutoReset = True):
if _Message != '' and _Message != None:
if _AutoReset:
print (self.IPrefix + Color.Reset + _Message + Color.Reset + self.ISuffix + Color.Reset)
else:
print (self.IPrefix + _Message + self.ISuffix)
else:
if _AutoReset:
print (self.IPrefix + Color.Reset + self.Message + Color.Reset + self.ISuffix + Color.Reset)
else:
print (self.IPrefix + self.Message + self.ISuffix)
def Template (self, _Template):
if not isinstance (_Template, Log):
Logs.Error ('No template found!')
return
return Log (_Template.Message, _Template.IPrefix, _Template.ISuffix)
def Prefix (self, _Prefix):
return Log (self.Message, self.IPrefix + _Prefix, self.ISuffix)
def Suffix (self, _Suffix):
return Log (self.Message, self.IPrefix, self.ISuffix + _Suffix)
| StarcoderdataPython |
6663258 | import os
import numpy
from numpy import *
import math
from scipy import integrate, linalg
from matplotlib import pyplot
from pylab import *
def build_freestream_rhs(panels, freestream):
"""
Builds the right-hand side of the system
arising from the freestream contribution.
Parameters
----------
panels: 1D array of Panel objects
List of panels.MMTBMS4N6
freestream: Freestream object
Freestream conditions.
Returns
-------
b: 1D Numpy array of floats
Freestream contribution on each panel and on the Kutta condition.
"""
b = numpy.empty(panels.size+1,dtype=float)
# freestream contribution on each panel
for i, panel in enumerate(panels):
b[i] = -freestream.u_inf * numpy.cos(freestream.alpha - panel.beta)
# freestream contribution on the Kutta condition
b[-1] = -freestream.u_inf*( numpy.sin(freestream.alpha-panels[0].beta)
+numpy.sin(freestream.alpha-panels[-1].beta) )
print(b)
return b
| StarcoderdataPython |
3235494 | <filename>morepath/tests/test_internal.py
import morepath
from webtest import TestApp as Client
def test_internal():
class app(morepath.App):
pass
@app.path(path='')
class Root(object):
pass
@app.json(model=Root)
def root_default(self, request):
return {'internal': request.view(self, name='internal')}
@app.json(model=Root, name='internal', internal=True)
def root_internal(self, request):
return 'Internal!'
c = Client(app())
response = c.get('/')
assert response.body == b'{"internal":"Internal!"}'
c.get('/internal', status=404)
| StarcoderdataPython |
8046781 | import torch.nn as nn
import pandas as pd
import torch
def create_loss ():
return nn.CrossEntropyLoss()
| StarcoderdataPython |
4990364 | import sys
import py
from pypy.translator.llvm.test.runtest import *
def setup_module(mod):
py.test.skip('skipping somewhat futile tests')
def test_GC_malloc():
def tuple_getitem(n):
x = 666
i = 0
while i < n:
l = (1,2,i,4,5,6,7,8,9,10,11)
x += l[2]
i += 1
return x
mod, f = compile_test(tuple_getitem, [int], gcpolicy="boehm")
n = 5000
result = tuple_getitem(n)
assert f(n) == result
get_heap_size = mod.GC_get_heap_size_wrapper
heap_size_start = get_heap_size()
for i in range(0,25):
assert f(n) == result
heap_size_inc = get_heap_size() - heap_size_start
assert heap_size_inc < 1000000
def test_nogc():
def tuple_getitem(n):
x = 666
i = 0
while i < n:
l = (1,2,i,4,5,6,7,8,9,10,11)
x += l[2]
i += 1
return x
mod, f = compile_test(tuple_getitem, [int], gcpolicy="none")
assert f(5000) == tuple_getitem(5000)
def test_ref():
def tuple_getitem(n):
x = 666
i = 0
while i < n:
l = (1,2,i,4,5,6,7,8,9,10,11)
x += l[2]
i += 1
return x
mod, f = compile_test(tuple_getitem, [int], gcpolicy="ref")
assert f(5000) == tuple_getitem(5000)
| StarcoderdataPython |
1856910 | import pytest
from graphene.test import Client
from blapp.api.schema import schema
@pytest.fixture
def schema_client():
return Client(schema)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.