text string | size int64 | token_count int64 |
|---|---|---|
from haystack.nodes.question_generator.question_generator import QuestionGenerator | 82 | 17 |
# -*- coding: utf-8 -*-
import uuid
from django.conf import settings
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
from django.contrib.postgres.fields import JSONField
class Factory(models.Model):
"""Factories that are potential to be illegal."""
# List of fact_type & status
factory_type_list = [
("1","金屬"),
("2-1","沖床、銑床、車床、鏜孔"),
("2-2", "焊接、鑄造、熱處理"),
("2-3", "金屬表面處理、噴漆"),
("3", "塑膠加工、射出"),
("4", "橡膠加工"),
("5", "非金屬礦物(石材)"),
("6", "食品"),
("7", "皮革"),
("8", "紡織"),
("9", "其他")
]
status_list = [
("D","已舉報"),
("F","資料不齊"),
("A","待審核")
]
# All Features
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="ID",
)
lat = models.FloatField()
lng = models.FloatField()
point = models.PointField(srid=settings.POSTGIS_SRID)
landcode = models.CharField(max_length=50, blank=True, null=True)
name = models.CharField(max_length=50, blank=True, null=True)
factory_type = models.CharField(max_length=3, choices=factory_type_list, default="9")
status = models.CharField(max_length=1, choices=status_list, default="A")
status_time = models.DateTimeField(auto_now_add=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
self.point = Point(self.lng, self.lat, srid=4326)
self.point.transform(settings.POSTGIS_SRID)
super(Factory, self).save(*args, **kwargs)
class ReportRecord(models.Model):
"""Report records send by users.
`ReportRecord` will be queried in advanced by admins from
Citizen of the Earth, Taiwan. They will filter the most recent
records out every a few weeks to catch the bad guys.
"""
id = models.AutoField(primary_key=True)
factory = models.ForeignKey("Factory", on_delete=models.PROTECT)
user_ip = models.GenericIPAddressField(default="192.168.0.1", blank=True, null=True)
action_type = models.CharField(max_length=10) # PUT, POST
action_body = JSONField() # request body
created_at = models.DateTimeField(auto_now_add=True)
contact = models.CharField(max_length=64, blank=True, null=True)
others = models.CharField(max_length=1024, blank=True)
class Image(models.Model):
"""Images of factories that are uploaded by user."""
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
)
factory = models.ForeignKey(
"Factory",
on_delete=models.PROTECT,
related_name="images",
blank=True,
null=True,
)
report_record = models.ForeignKey(
"ReportRecord",
on_delete=models.PROTECT,
blank=True,
null=True,
)
image_path = models.URLField(max_length=256) # get from Imgur
created_at = models.DateTimeField(auto_now_add=True)
# the DB saving time
orig_time = models.DateTimeField(blank=True, null=True)
# the actual photo taken time
| 3,178 | 1,152 |
import json as _json
import datetime as _datetime
def parse_timestamp(dataset, time_format="%Y-%m-%dT%H:%M:%S.000Z"):
for d in dataset:
d["timestamp"] = _datetime.datetime.strptime(d["timestamp"], time_format)
return dataset
def load_json(filename, time_format="%Y-%m-%dT%H:%M:%S.000Z"):
dictionary = dict()
with open(filename) as f:
dictionary = _json.load(f)
return parse_timestamp(dictionary, time_format)
def generate_config(dataset):
start_idx = 0
end_idx = len(dataset) - 1
return {
"test_start": dataset[start_idx]["timestamp"],
"test_end": dataset[end_idx]["timestamp"]
}
| 656 | 225 |
# -*- coding: utf-8 -*-
import copy
class Solution(object):
"""
给定 nums = [2, 7, 11, 15], target = 9
因为 nums[0] + nums[1] = 2 + 7 = 9
所以返回 [0, 1]
"""
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for i in range(len(nums)):
nums_copy = copy.copy(nums)
nums_copy.remove(nums[i])
for j in nums_copy:
if nums[i] + j == target:
return i, nums.index(j)
return None
def two_sum(self, nums, target):
for num in nums:
val = target - num
if val in nums:
return nums.index(num), nums.index(val)
return None
if __name__ == '__main__':
l = [3, 4, 10, 2, 7]
target = 9
result = Solution().twoSum(l, target)
print(result)
result1 = Solution().two_sum(l, target)
print(result1)
| 959 | 362 |
import numpy as np
from .logger import log
from .array_grid import get_next_grid_dims
from .act_on_image import ActOnImage
from .array_message import write_conjugated_message_grids
from .bpcs_steg import arr_bpcs_complexity
def remove_message_from_vessel(arr, alpha, grid_size):
messages = []
nfound, nkept, nleft = 0, 0, 0
complexities = []
for dims in get_next_grid_dims(arr, grid_size):
nfound += 1
grid = arr[tuple(dims)]
cmplx = arr_bpcs_complexity(grid)
if cmplx < alpha:
nleft += 1
continue
complexities.append(cmplx)
nkept += 1
messages.append(grid)
assert nfound == nkept + nleft
log.critical('Found {0} out of {1} grids with complexity above {2}'.format(nkept, nfound, alpha))
return messages
class BPCSDecodeImage(ActOnImage):
def modify(self, alpha):
return remove_message_from_vessel(self.arr, alpha, (8,8))
def decode(infile, outfile, alpha=0.45):
x = BPCSDecodeImage(infile, as_rgb=True, bitplane=True, gray=True, nbits_per_layer=8)
grids = x.modify(alpha)
write_conjugated_message_grids(outfile, grids, alpha)
| 1,162 | 414 |
# # Exploration of the crash severity information in CAS data
#
# In this notebook, we will explore the severity of crashes, as it will be the
# target of our predictive models.
from pathlib import Path
import numpy as np
import pandas as pd
import scipy.stats as st
import matplotlib.pyplot as plt
import seaborn as sb
from crash_prediction import cas_data
# set seaborn default style
sb.set()
# But first, we ensure we have the data or download it if needed
dset_path = Path("..") / "data" / "cas_dataset.csv"
if not dset_path.exists():
dset_path.parent.mkdir(parents=True, exist_ok=True)
cas_data.download(dset_path)
# and load it.
dset = pd.read_csv(dset_path)
dset.head()
# The CAS dataset has 4 features that can be associated with the crash severity:
#
# - `crashSeverity`, severity of a crash, determined by the worst injury
# sustained in the crash at time of entry,
# - `fatalCount`, count of the number of fatal casualties associated with this
# crash,
# - `minorInjuryCount`, count of the number of minor injuries associated with
# this crash,
# - `seriousInjuryCount`, count of the number of serious injuries associated
# with this crash.
severity_features = [
"fatalCount",
"seriousInjuryCount",
"minorInjuryCount",
"crashSeverity",
]
fig, axes = plt.subplots(2, 2, figsize=(15, 12))
for ax, feat in zip(axes.flat, severity_features):
counts = dset[feat].value_counts(dropna=False)
counts.plot.bar(ylabel="# crashes", title=feat, ax=ax)
ax.set(yscale="log")
fig.tight_layout()
# To check the geographical distribution, we will focus on Auckland and replace
# discrete levels of `crashSeverity` with number to ease plotting.
dset_auckland = dset[dset["X"].between(174.7, 174.9) & dset["Y"].between(-37, -36.8)]
mapping = {
"Non-Injury Crash": 1,
"Minor Crash": 2,
"Serious Crash": 3,
"Fatal Crash": 4,
}
dset_auckland = dset_auckland.replace({"crashSeverity": mapping})
# Given the data set imbalance, we plot the local maxima to better see the
# location of more severe car crashes.
fig, axes = plt.subplots(2, 2, figsize=(15, 15))
for ax, feat in zip(axes.flat, severity_features):
dset_auckland.plot.hexbin(
"X",
"Y",
feat,
gridsize=500,
reduce_C_function=np.max,
cmap="BuPu",
title=feat,
ax=ax,
sharex=False,
)
ax.set_xticklabels([])
ax.set_yticklabels([])
fig.tight_layout()
# Few remarks coming from these plots:
#
# - fatal counts are (hopefully) very low,
# - crashes with serious injuries are also very sparse,
# - crashes with minor injuries are denser and seem to follow major axes,
# - the crash severity feature looks like the most homogeneous feature, yet
# highlighting some roads more than others.
#
# The crash severity is probably a good go-to target, as it's quite
# interpretable and actionable. The corresponding ML problem is a supervised
# multi-class prediction problem.
# To simplify the problem, we can also just try to predict if a crash is going
# to involve an injury (minor, severe or fatal) or none. Here is how it would
# look like in Auckland
dset_auckland["injuryCrash"] = (dset_auckland["crashSeverity"] > 1) * 1.0
dset_auckland.plot.hexbin(
"X",
"Y",
"injuryCrash",
gridsize=500,
cmap="BuPu",
title="Crash with injury",
sharex=False,
figsize=(10, 10),
)
# Interestingly, the major axes do not pop up as saliently here, as we are
# averaging instead of taking the local maxima.
# This brings us to to the another question: is the fraction of crash with
# injuries constant fraction of the number of crashes in an area? This would
# imply that a simple binomial model can model locally binned data.
# We first discretize space into 0.01° wide cells and count the total number of
# crashes in each cell as well as the number of crashes with injuries.
# +
dset["X_bin"] = pd.cut(
dset["X"], pd.interval_range(dset.X.min(), dset.X.max(), freq=0.01)
)
dset["Y_bin"] = pd.cut(
dset["Y"], pd.interval_range(dset.Y.min(), dset.Y.max(), freq=0.01)
)
counts = (
dset.groupby(["X_bin", "Y_bin"], observed=True).size().reset_index(name="crash")
)
injury_counts = (
dset.groupby(["X_bin", "Y_bin"], observed=True)
.apply(lambda x: (x["crashSeverity"] != "Non-Injury Crash").sum())
.reset_index(name="injury")
)
counts = counts.merge(injury_counts)
# -
# For each number of crashes in cells, we can check the fraction of crashes with
# injuries. Here we see that cells with 1 or few crashes have a nearly 50/50
# chance of injuries, compared to cells with a larger number of accidents, where
# it goes down to about 20%.
injury_fraction = counts.groupby("crash").apply(
lambda x: x["injury"].sum() / x["crash"].sum()
)
ax = injury_fraction.plot(style=".", ylabel="fraction of injuries", figsize=(10, 7))
ax.set_xscale("log")
# Then we can also check how good is a binomial distribution at modeling binned
# data, using it to derive a 95% predictive interval.
ratio = counts["injury"].sum() / counts["crash"].sum()
xs = np.arange(1, counts["crash"].max() + 1)
pred_intervals = st.binom(xs, ratio).ppf([[0.025], [0.975]])
# +
fig, axes = plt.subplots(1, 2, figsize=(15, 7))
counts.plot.scatter(x="crash", y="injury", alpha=0.3, c="b", s=2, ax=axes[0])
axes[0].fill_between(
xs,
pred_intervals[0],
pred_intervals[1],
alpha=0.3,
color="r",
label="95% equal-tail interval for binomial",
)
axes[0].legend()
counts.plot.scatter(x="crash", y="injury", alpha=0.3, c="b", s=2, ax=axes[1])
axes[1].fill_between(
xs,
pred_intervals[0],
pred_intervals[1],
alpha=0.3,
color="r",
label="95% equal-tail interval for binomial",
)
axes[1].legend()
axes[1].set_xscale("log")
axes[1].set_yscale("log")
# -
# The predictive interval seems to have a poor coverage, overshooting the high
# counts regions and being to narrow for the regions with hundreds of crashes.
# We can compute the empirical coverage of these interval to check this.
counts["covered"] = counts["injury"].between(
pred_intervals[0, counts["crash"] - 1], pred_intervals[1, counts["crash"] - 1]
)
print(f"95% predictive interval has {counts['covered'].mean() * 100:.2f}%.")
print("95% predictive interval coverage per quartile of crash counts:")
mask = counts["crash"] > 1
counts[mask].groupby(pd.qcut(counts.loc[mask, "crash"], 4))["covered"].mean()
# So it turns out that on a macro scale, the coverage of this simple model is
# quite good, but if we split by number of crashes, the coverage isn't so good
# anymore for the cells with higher number of crashes.
#
# Hence, including the number of crashes in a vicinity could be an relevant
# predictor for the probability of crash with injury.
# ---
# ## Original computing environment
# !date -R
# !uname -a
# !pip freeze
| 6,859 | 2,405 |
import gym
def cartpole():
environment = gym.make('CartPole-v1')
environment.reset()
for i in range(1000):
# environment.render()
action = environment.action_space.sample()
observation, reward, done, info = environment.step(action)
print("Step {}:".format(i))
print("action: {}:".format(action))
print('observation: {}'.format(observation))
print('reward: {}'.format(reward))
print('done: {}'.format(done))
print('info: {}'.format(info))
if done:
break
if __name__ == '__main__':
cartpole()
| 601 | 182 |
# ------------------------------------------------------------ Imports ----------------------------------------------------------- #
# System
from typing import Optional
# Pip
from kw3 import WrappedContract, Web3
from kw3.constants import Constants as KW3Constants
# Local
from ._abi import pancakeswap_factory_abi
from ...liquidity_pool import PancakeswapLiquidityPool, PancakeswapBusdLiquidityPool, PancakeswapWbnbLiquidityPool
from ...constants import Constants
# -------------------------------------------------------------------------------------------------------------------------------- #
# --------------------------------------------------- class: PancakeswapFactory -------------------------------------------------- #
class PancakeswapFactory(WrappedContract):
# --------------------------------------------------------- Init --------------------------------------------------------- #
def __init__(
self,
web3: Web3
):
super().__init__(
web3=web3,
address=Constants.ADDRESS_PANCAKESWAP_FACTORY,
abi=pancakeswap_factory_abi
)
# ---------------------------------------------------- Public methods ---------------------------------------------------- #
# Forwarders
def liquidityPoolAddressesLength(self) -> int:
return self.functions.allPairsLength().call()
def liquidityPoolAddressAtIndex(
self,
index: int
) -> str:
return self.functions.allPairs(index).call()
def liquidityPoolAtIndex(
self,
index: int
) -> PancakeswapLiquidityPool:
return PancakeswapBusdLiquidityPool(
web3=self._web3,
address=self.liquidityPoolAddressAtIndex(
index=index
)
)
# Custom
def getPairAddress(
self,
address0: str,
address1: str
) -> Optional[str]:
return self.functions.getPair(
Web3.toChecksumAddress(address0),
Web3.toChecksumAddress(address1)
).call()
def getPair(
self,
address0: str,
address1: str
) -> Optional[PancakeswapLiquidityPool]:
return self.__getPair(
PancakeswapLiquidityPool,
address0=address0,
address1=address1
)
def getWbnbPair(
self,
token_address: str
) -> Optional[PancakeswapWbnbLiquidityPool]:
return self.__getPair(
PancakeswapWbnbLiquidityPool,
address0=KW3Constants.WBNB.ADDRESS,
address1=token_address
)
def getBusdPair(
self,
token_address: str
) -> Optional[PancakeswapBusdLiquidityPool]:
return self.__getPair(
PancakeswapBusdLiquidityPool,
address0=KW3Constants.BUSD.ADDRESS,
address1=token_address
)
# ---------------------------------------------------- Private methods --------------------------------------------------- #
def __getPair(
self,
_type,
address0: str,
address1: str
) -> Optional[PancakeswapLiquidityPool]:
pair_address = self.getPairAddress(address0, address1)
return _type(
self._web3,
pair_address
) if pair_address else None
# -------------------------------------------------------------------------------------------------------------------------------- # | 3,466 | 921 |
# The collection of functions for the Boston AirBnB dataset
# import necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar #To check holidays in the U.S
import time
import copy
def load_bnb_files():
'''Load AirBnB files'''
df_listing = pd.read_csv('./data/listings.csv')
df_calendar = pd.read_csv('./data/calendar.csv')
return df_listing, df_calendar
# Modify df_calendar for future work
# Special event : marathon, new academic season
def modify_calendar(df_calendar):
'''
This function creates 'year', 'month', 'day', 'weekday', and 'week_number' columns from 'date' coulmn of df_calendar
and remove '$' string from 'price' coulmn.
Input : a Pandas dataframe having a date data column
Output : a Pandas dataframe having year, month, day, weekday, us_holiday columns
'''
# Split date column into year, month,day, weekday columns
# The day of the week with Monday=0, Sunday=6
# Set the range of weekends from Friday to Sunday
df_calendar['year'] = pd.DatetimeIndex(df_calendar['date']).year
df_calendar['month'] = pd.DatetimeIndex(df_calendar['date']).month
df_calendar['day'] = pd.DatetimeIndex(df_calendar['date']).day
df_calendar['weekday'] = pd.DatetimeIndex(df_calendar['date']).weekday
df_calendar['week_number'] = pd.DatetimeIndex(df_calendar['date']).week
df_calendar['price']= df_calendar['price'].str.replace('$','')
df_calendar['price']=df_calendar['price'].str.replace(',','')
df_calendar['price'] = df_calendar['price'].astype(float)
# Add us_holiday column
cal = calendar()
holidays = cal.holidays(start=df_calendar.date.min(), end=df_calendar.date.max())
df_calendar['us_holiday'] = df_calendar.date.astype('datetime64').isin(holidays)
# Add weekend column #Friday, Saturday
weekend = [4,5]
df_calendar['weekend'] = df_calendar.weekday.isin(weekend)
# Replace values in weekday column
df_calendar['weekday'].replace({0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday',4:'Friday', 5:'Saturday', 6:'Sunday'}, inplace=True)
return df_calendar
def add_availabledays_price(df_listing, df_cal_modified):
'''
This function creates the columns of 'unavail_days', 'avail_days_weekends',
'avail_days_weekdays', 'price_weekend', and 'price_weekday' where calculated from df_cal_modified on df_listing.
Input :
- A Pandas dataframe made from 'listings.csv' : df_listing
- A pandas dataframe modified by modify_calendar() : df_cal_modified
Output :
- The modified df_listing dataframe with new 'unavail_days', 'avail_days_weekends',
'avail_days_weekdays', 'price_weekend', and 'price_weekday' columns
'''
id_list = df_listing.id[:]
unavailable_days_array = np.array([])
avail_days_weekends_array = np.array([])
avail_days_weekdays_array = np.array([])
price_weekend_array = np.array([])
price_weekday_array = np.array([])
for i in np.nditer(id_list):
tmp = df_cal_modified[(df_cal_modified.listing_id == i)] # Make a dataframe coming from df_listing with a certain id
available_dict = tmp.available.value_counts().to_dict()
if 'f' in available_dict:
unavailable_days = tmp[tmp.available == 'f'].shape[0]
else:
unavailable_days = 0
if 't' in available_dict:
available_weekends = tmp[(tmp.available == 't') & (tmp.weekend == True)].shape[0]
available_weekdays = tmp[(tmp.available == 't') & (tmp.weekend == False)].shape[0]
price_weekend = tmp[(tmp.weekend == True) & (tmp.available == 't')].price.astype(float).describe()['mean']
price_weekday = tmp[(tmp.weekend == False) & (tmp.available == 't')].price.astype(float).describe()['mean']
else:
available_weekends = 0
available_weekdays = 0
price_weekend = np.nan
price_weekday = np.nan
unavailable_days_array = np.append(unavailable_days_array, unavailable_days)
avail_days_weekends_array = np.append(avail_days_weekends_array, available_weekends)
avail_days_weekdays_array = np.append(avail_days_weekdays_array, available_weekdays)
price_weekend_array = np.append(price_weekend_array, price_weekend)
price_weekday_array = np.append(price_weekday_array, price_weekday)
df_listing['unavail_days'] = pd.Series(unavailable_days_array)
df_listing['avail_days_weekends'] = pd.Series(avail_days_weekends_array)
df_listing['avail_days_weekdays'] = pd.Series(avail_days_weekdays_array)
df_listing['price_weekend'] = pd.Series(price_weekend_array)
df_listing['price_weekday'] = pd.Series(price_weekday_array)
return df_listing
def clean_listing_df(df_listing):
'''
This function aims to make the df_listing dataframe for data analysis by
- removing irrelevant columns
- changing object type columns to numeric columns or manipulating them using one hot encoding
- filling NaN values
- creating an integrated_score_log column by the natural log of the result from 'review_scores_rating' times 'number_of_reviews' +1
Input :
- A Pandas dataframe made from 'listings.csv' : df_listing
Output :
- Cleaned df_listing
'''
# Drop columns having 50% of nan value. There were reasons that I decided 50% the threshold for dropping columns.
# 1. Easy to see the dataframe and to check the meaning of the columns.
# 2. Decide which ones have to be dropped.
# The candidates columns to be dropped are 'notes', 'neighbourhood_group_cleansed', 'square_feet', 'weekly_price', 'monthly_price', 'security_deposit', 'has_availability', 'license', 'jurisdiction_names'. Most of them are duplicated to other columns or irrelavant except 'security_deposit' column. I didn't do imputing by the mean or mode of the column because it can distort real shape. I didn't do one-hot-encoding to make the dataframe straightforward. 'security_deposit' has 55 unique values.
df_missing = df_listing.isna().mean()
df_listing_modi1 = df_listing.drop(df_missing[df_missing>0.5].index.to_list(), axis=1)
# Drop columns related with urls and other irrelevant columns.
# url and othe columns are all unique or useless.
remove_list1 = ['listing_url', 'scrape_id', 'last_scraped', 'thumbnail_url', 'medium_url', 'picture_url', 'xl_picture_url', 'host_url',
'host_thumbnail_url', 'host_picture_url', 'country_code', 'country']
df_listing_modi1.drop(remove_list1, axis=1, inplace=True)
# Drop the columns because of data overlap [city, smart_location], Only one value [state],
# Drop the wrong data [market, calendar_last_scraped]
remove_list2 = ['smart_location', 'state', 'name', 'summary', 'space', 'description','neighborhood_overview',
'transit','access','market','calendar_last_scraped']
df_listing_modi1.drop(remove_list2, axis=1, inplace=True)
# Modify 'house_rules' column to 'house_rules_exist_tf' having True value if there is a rule.
# False value, if there is no rule.
# Houes_rules are different for every host. So it is not practical to use one-hot-encoding. Instead of that,
# It is changed to binary type, which is there is rule in a house, True, otherwise, False.
# This can save some information, which is better than just dropping.
df_listing_modi1['house_rules_exist_tf']= pd.notna(df_listing_modi1.house_rules)
df_listing_modi1.drop(['house_rules'], axis=1, inplace=True)
# Remove columns having 1000 unique string valuses and irrelevant data
remove_list3 = ['interaction', 'host_name', 'host_since', 'host_about', 'street','first_review','experiences_offered','requires_license',
'last_review','host_location','neighbourhood_cleansed','experiences_offered','requires_license']
df_listing_modi2 = df_listing_modi1.drop(remove_list3, axis=1)
# Change the columns 'host_response_rate', 'host_acceptance_rate' to float type
columns_change_type = ['host_response_rate','host_acceptance_rate', 'price', 'cleaning_fee']
for i in columns_change_type:
df_listing_modi2[i] = df_listing_modi2[i].str.replace('%','')
df_listing_modi2[i] = df_listing_modi2[i].str.replace('$','')
df_listing_modi2[i] = df_listing_modi2[i].str.replace(',','')
df_listing_modi2[i] = df_listing_modi2[i].astype(float)
# Modify and Split values in 'amenities' column
# Amenities can be one of reason that potential candidate might consider.
df_listing_modi2.amenities = df_listing_modi2.amenities.str.replace("[{}]", "")
df_amenities = df_listing_modi2.amenities.str.get_dummies(sep = ",")
df_amenities = df_amenities.add_prefix('amenities_')
df_listing_modi2 = pd.concat([df_listing_modi2, df_amenities], axis=1)
df_listing_modi2 = df_listing_modi2.drop('amenities', axis=1)
# Use get_dummies for columns having unique values less then 10
# It is reasonable to use one-hot-encoding if the nunber of unique values are less then 10.
# It doesn't lose information, and keep the dataframe simple.
columns_of_object_less10 =[]
for i,j in zip(df_listing_modi2.columns.to_list(), df_listing_modi2.dtypes.to_list()):
if j == object and len(df_listing_modi2[i].value_counts()) < 10 :
columns_of_object_less10.append(i)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=columns_of_object_less10, prefix=columns_of_object_less10,
dummy_na=True)
# Modify 'extra_people' coulmn to get boolean type of 'extra_people_fee_tf'
# Instead of dropping, I decided to change 'extra_people' coulmn to binary type to save some information
df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].astype(str)
df_listing_modi2['extra_people']= df_listing_modi2['extra_people'].str.replace('$','')
df_listing_modi2['extra_people']=df_listing_modi2['extra_people'].str.replace(',','')
df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].astype(float)
df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].replace(to_replace=0, value=np.nan)
df_listing_modi2['extra_people_fee_tf']= pd.notna(df_listing_modi2.extra_people)
df_listing_modi2 = df_listing_modi2.drop('extra_people', axis=1)
# Modify and Split values in 'host_verifications' column
df_listing_modi2.host_verifications = df_listing_modi2.host_verifications.str.replace("[", "")
df_listing_modi2.host_verifications = df_listing_modi2.host_verifications.str.replace("]", "")
df_host_verifications = df_listing_modi2.host_verifications.str.get_dummies(sep = ",")
df_host_verifications = df_host_verifications.add_prefix('host_verification_')
df_listing_modi2 = pd.concat([df_listing_modi2, df_host_verifications], axis=1)
df_listing_modi2 = df_listing_modi2.drop(['host_verifications'], axis=1)
df_listing_modi2 = df_listing_modi2.drop(['host_neighbourhood'], axis=1)
# Modify 'calendar_updated' column
# Instead of dropping, I decided to change 'calendar_updated' coulmn to binary type (updated within a week or not)
# to save some information
df_listing_modi2["calendar_updated_1weekago"] = np.where(df_listing_modi2['calendar_updated'].str.contains(
"days|yesterday|today|a week ago")==True, 'yes', 'more_than_1week')
df_listing_modi2 = df_listing_modi2.drop(['calendar_updated'], axis=1)
# Use get_dummies for the columns 'neighbourhood', 'city', 'zipcode', 'property_type'
tmp = df_listing_modi2.columns.to_list()
tmp1 = df_listing_modi2.dtypes.to_list()
columns_of_object_over10 =[]
for i,j in zip(tmp,tmp1):
if j == object and len(df_listing_modi2[i].value_counts()) > 10 :
columns_of_object_over10.append(i)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=columns_of_object_over10,
prefix=columns_of_object_over10, dummy_na=True)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['calendar_updated_1weekago','house_rules_exist_tf','extra_people_fee_tf'],
prefix=['calendar_updated_1weekago','house_rules_exist_tf','extra_people_fee_tf'], dummy_na=True)
df_listing_modi2["host_response_rate_100"] = np.where(df_listing_modi2['host_response_rate'] ==100, True, False)
df_listing_modi2["host_acceptance_rate_100"] = np.where(df_listing_modi2['host_acceptance_rate'] ==100, True, False)
df_listing_modi2 = df_listing_modi2.drop(['host_response_rate','host_acceptance_rate','reviews_per_month'], axis=1)
# bathrooms, bedrooms, beds, cleaning_fee, review_scores_rating, review_... : : fillna with mean value
# The empty cell are filled with mean values of corresponding columns. Because these are numerical type,
# I thought imputing with mean values is better than dropping or one-hot-encoding
columns1 = ['bathrooms','bedrooms','beds','cleaning_fee','review_scores_rating','review_scores_accuracy','review_scores_cleanliness','review_scores_checkin',
'review_scores_communication','review_scores_location','review_scores_value']
df_listing_modi2[columns1] = df_listing_modi2[columns1].fillna(df_listing_modi2.mean())
df_listing_modi2.price_weekend.fillna(df_listing_modi2.price, inplace=True)
df_listing_modi2.price_weekday.fillna(df_listing_modi2.price, inplace=True)
df_listing_modi2['integrated_score_log'] = np.log(df_listing_modi2['review_scores_rating']*df_listing_modi2['number_of_reviews']+1)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['host_response_rate_100','host_acceptance_rate_100'],
prefix=['host_response_rate_100','host_acceptance_rate_100'])
df_listing_modi2 = df_listing_modi2.drop(['id', 'host_id', 'latitude', 'longitude','price','host_listings_count','host_total_listings_count','maximum_nights'], axis=1)
return df_listing_modi2
def conditioning_listing_df(df_listing_modi2):
'''
This function is for conditioning a dataframe returned by the funtion 'clean_listing_df(df_listing)''
Input :
- A Pandas dataframe came from the function 'clean_listing_df(df_listing)''
Output :
- Cleaned df_listing_modi2 : df_listing_modi3
'''
threshold_80 = df_listing_modi2.integrated_score_log.quantile(0.8)
condition = [df_listing_modi2['integrated_score_log'] == 0, df_listing_modi2['integrated_score_log'] >= threshold_80]
label_list = ['poor','high']
df_listing_modi2['y_label'] = np.select(condition, label_list, default='normal')
# Drop columns related to 'y_label' column
# Without dropping, the remained columns affect model's prediction
df_listing_modi3 = df_listing_modi2.drop(['integrated_score_log','number_of_reviews','review_scores_rating', 'review_scores_value',
'review_scores_communication','review_scores_accuracy','review_scores_checkin','review_scores_cleanliness',
'review_scores_location', 'availability_30','availability_60', 'availability_90','availability_365','calculated_host_listings_count'], axis=1)
return df_listing_modi3
def investigate(df_listing_scaled, pca, i):
'''
This function checks pca components that which original features are storngly related to a pca component
Input :
- Dataframe : df_listing_scaled a dataframe scaled by StandardScaler()
- pca instance
- i : The number of pca component
Output :
- pos_list : Original features having positive relationship with a
corresponding pca component,which are sorted in order of importance
- neg_list : Original features having positive relationship with a
corresponding pca component,which are sorted in order of importance
'''
pos_list =[]
neg_list =[]
feature_names = list(df_listing_scaled.columns)
weights_pca = copy.deepcopy(pca.components_[i])
combined = list(zip(feature_names, weights_pca))
combined_sorted= sorted(combined, key=lambda tup: tup[1], reverse=True)
tmp_list = [list(x) for x in combined_sorted]
tmp_list = [(x[0],"{0:.3f}".format(x[1])) for x in tmp_list]
print("positive to pca{}:".format(i), tmp_list[0:10])
print()
print("negative to pca{}:".format(i), tmp_list[-1:-11:-1])
print()
for j in range(0,10):
pos_list.append(tmp_list[j][0])
for k in range(1,11):
neg_list.append(tmp_list[-k][0])
return pos_list, neg_list
def check_difference(pos_list, neg_list, df_listing_poor, df_listing_high):
'''
Print original features that are stongly related with a corresponding pca component.
'''
data_pos = [[df_listing_high[x].mean(), df_listing_poor[x].mean()] for x in pos_list]
data_neg = [[df_listing_high[x].mean(), df_listing_poor[x].mean()] for x in neg_list]
tmp_pos = pd.DataFrame(data=data_pos , index=pos_list, columns=['high', 'poor'])
tmp_neg = pd.DataFrame(data=data_neg , index=neg_list, columns=['high', 'poor'])
tmp_both = pd.concat([tmp_pos, tmp_neg])
tmp_both["difference"] = tmp_both.high - tmp_both.poor
tmp_both["difference"] = tmp_both["difference"].abs()
result = tmp_both.sort_values(by=['difference'], ascending=False)
return result
| 17,648 | 6,009 |
from typing import List, Any
from markdown import Markdown
from markdown.extensions import Extension
from markdown.blockprocessors import BlockProcessor
import re
import xml.etree.ElementTree as etree
class InfoPanelExtension(Extension):
"""Markdown extension for rendering the Confluence info panel macro. Only supports
the "original" info panels AKA info (blue), success (green), warning (yellow), and error (red).
Example:
```
Normal, introductory paragraph.
Warning: info panels like this must be isolated into their own blocks with surrounding blank lines.
This will be a plain old paragraph, and not included in the warning above.
```
"""
def extendMarkdown(self, md: Markdown) -> None:
md.registerExtension(self)
md.parser.blockprocessors.register(
InfoPanelBlockProcessor(
"Info:", "info", "42afc5c4-fb53-4483-9f1a-a87a7ad033e6", md.parser
),
"info-panel",
25,
)
md.parser.blockprocessors.register(
InfoPanelBlockProcessor(
"Success:", "tip", "d60a142d-bc62-4f37-a091-7254c4472bdf", md.parser
),
"success-panel",
25,
)
md.parser.blockprocessors.register(
InfoPanelBlockProcessor(
"Warning:", "note", "9e14a573-943e-4691-919b-a9f6a389da71", md.parser
),
"warning-panel",
25,
)
md.parser.blockprocessors.register(
InfoPanelBlockProcessor(
"Error:", "warning", "2e759c9c-11f1-4959-82e7-901a2dc737d7", md.parser
),
"error-panel",
25,
)
class InfoPanelBlockProcessor(BlockProcessor):
def __init__(
self, prefix: str, name: str, macro_id: str, *args: Any, **kwargs: Any
):
self._prefix = prefix
self._block_re = re.compile(
r"\s*{}.*".format(prefix), re.MULTILINE | re.DOTALL | re.VERBOSE
)
self._name = name
self._macro_id = macro_id
super().__init__(*args, **kwargs)
def test(self, parent: etree.Element, block: str) -> bool:
return bool(self._block_re.match(block))
def run(self, parent: etree.Element, blocks: List[str]) -> None:
raw_content = blocks.pop(0).lstrip(self._prefix).lstrip()
info_panel = etree.SubElement(
parent,
"ac:structured-macro",
{
"ac:name": self._name,
"ac:schema-version": "1",
"ac:macro-id": self._macro_id,
},
)
rich_text_body = etree.SubElement(info_panel, "ac:rich-text-body")
self.parser.parseChunk(rich_text_body, raw_content)
info_panel.tail = "\n"
def makeExtension(**kwargs: Any) -> InfoPanelExtension:
return InfoPanelExtension(**kwargs)
| 2,898 | 913 |
#!/usr/bin/python3
# Copyright 2019 Abe Leite
# Based on "Proximal Policy Optimization Algorithms", Schulman et al 2017
# For the benefit of my fellow CSCI-B 659 students
# While I hope that this code is helpful I will not vouch for its total accuracy;
# my primary aim here is to elucidate the ideas from the paper.
import sys
import tensorflow as tf
import gym
ACTORS = 8
N_CYCLES = 10000
LEARNING_RATE = 0.00025
CYCLE_LENGTH = 128
BATCH_SIZE = CYCLE_LENGTH*ACTORS
CYCLE_EPOCHS = 3
MINIBATCH = 32*ACTORS
GAMMA = 0.99
EPSILON = 0.1
class DiscretePPO:
def __init__(self, V, pi):
''' V and pi are both keras (Sequential)s.
V maps state to single scalar value;
pi maps state to discrete probability distribution on actions. '''
self.V = V
self.pi = pi
self.old_pi = tf.keras.models.clone_model(self.pi)
self.optimizer = tf.keras.optimizers.Adam(LEARNING_RATE)
@tf.function
def pick_action(self, S):
return tf.random.categorical(self.pi(tf.expand_dims(S,axis=0)), 1)[0,0]
@tf.function
def train_minibatch(self, SARTS_minibatch):
S, A, R, T, S2 = SARTS_minibatch
next_V = tf.where(T, tf.zeros((MINIBATCH,)), self.V(S2))
next_V = tf.stop_gradient(next_V)
advantage = R + GAMMA * next_V - self.V(S)
V_loss = tf.reduce_sum(advantage ** 2)
V_gradient = tf.gradients(V_loss, self.V.weights)
self.optimizer.apply_gradients(zip(V_gradient, self.V.weights))
ratio = tf.gather(self.pi(S), A, axis=1) / tf.gather(self.old_pi(S), A, axis=1)
confident_ratio = tf.clip_by_value(ratio, 1-EPSILON, 1+EPSILON)
current_objective = ratio * advantage
confident_objective = confident_ratio * advantage
PPO_objective = tf.where(current_objective < confident_objective, current_objective, confident_objective)
PPO_objective = tf.reduce_mean(PPO_objective)
pi_gradient = tf.gradients(-PPO_objective, self.pi.weights)
self.optimizer.apply_gradients(zip(pi_gradient, self.pi.weights))
@tf.function
def train(self, SARTS_batch):
S, A, R, T, S2 = SARTS_batch
for _ in range(CYCLE_EPOCHS):
# shuffle and split into minibatches!
shuffled_indices = tf.random.shuffle(tf.range(BATCH_SIZE))
num_mb = BATCH_SIZE // MINIBATCH
for minibatch_indices in tf.split(shuffled_indices, num_mb):
mb_SARTS = (tf.gather(S, minibatch_indices),
tf.gather(A, minibatch_indices),
tf.gather(R, minibatch_indices),
tf.gather(T, minibatch_indices),
tf.gather(S2, minibatch_indices))
self.train_minibatch(mb_SARTS)
for old_pi_w, pi_w in zip(self.old_pi.weights, self.pi.weights):
old_pi_w.assign(pi_w)
def train_PPO(agent, envs, render=False):
episode_returns = []
current_episode_returns = [0 for env in envs]
last_s = [env.reset() for env in envs]
for _ in range(N_CYCLES):
SARTS_samples = []
next_last_s = []
next_current_episode_returns = []
for env, s, episode_return in zip(envs, last_s, current_episode_returns):
for _ in range(CYCLE_LENGTH):
a = agent.pick_action(s).numpy()
s2, r, t, _ = env.step(a)
if render:
env.render()
episode_return += r
SARTS_samples.append((s,a,r,t,s2))
if t:
episode_returns.append(episode_return)
print(f'Episode {len(episode_returns):3d}: {episode_return}')
episode_return = 0
s = env.reset()
else:
s = s2
next_last_s.append(s)
next_current_episode_returns.append(episode_return)
last_s = next_last_s
current_episode_returns = next_current_episode_returns
SARTS_batch = [tf.stack(X, axis=0) for X in zip(*SARTS_samples)]
agent.train(SARTS_batch)
def make_agent(env):
obs_shape = env.observation_space.shape
n_actions = env.action_space.n
V = tf.keras.Sequential([tf.keras.layers.InputLayer(input_shape=obs_shape),
tf.keras.layers.Dense(400, activation='relu'),
tf.keras.layers.Dense(300, activation='relu'),
tf.keras.layers.Dense(1)])
pi = tf.keras.Sequential([tf.keras.layers.InputLayer(input_shape=obs_shape),
tf.keras.layers.Dense(400, activation='relu'),
tf.keras.layers.Dense(300, activation='sigmoid'),
tf.keras.layers.Dense(n_actions, activation='softmax')])
return DiscretePPO(V, pi)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: python ppo.py <Env-V*> (--render)')
envs = [gym.make(sys.argv[1]) for _ in range(ACTORS)]
agent = make_agent(envs[0])
train_PPO(agent, envs, '--render' in sys.argv)
| 5,096 | 1,803 |
from pytube import YouTube
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import YoutubeDLSerializer
from .utils import make_time, make_size
class YoutubeDL(APIView):
serializer_class = YoutubeDLSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
url = serializer.validated_data.get("url")
try:
file = YouTube(url)
except:
return Response({
"status": "failed",
"message": "Invalid url",
}, status=status.HTTP_404_NOT_FOUND)
videos = file.streams
thumbnail = file.thumbnail_url
title = file.title
duration = make_time(file.length)
video_res = {
"1080p": None,
"720p": None,
"480p": None,
"360p": None,
"240p": None,
"144p": None
}
aud_size = 0
audio = None
for video in videos:
if video.resolution in video_res and video_res[video.resolution] is None:
video_res[video.resolution] = {"resolution": video.resolution, "video_type": video.subtype,
"size": make_size(video.filesize),
"url": video.url}
if video.type == "audio":
if video.filesize > aud_size:
audio = video
aud_size = video.filesize
video_data = [value for key, value in video_res.items() if value is not None]
audio_data = None
if audio is not None:
audio_type = audio.subtype
size = make_size(audio.filesize)
url = audio.url
audio_data = {"audio_type": audio_type, "size": size, "url": url}
return Response({
"status": "success",
"message": "Got some data.",
"title": title,
"duration": duration,
"thumbnail": thumbnail,
"video_data": video_data,
}, status=status.HTTP_200_OK)
return Response({"status": "failed",
"message": "Something went wrong.",
"error": serializer.errors},
status=status.HTTP_400_BAD_REQUEST)
| 2,620 | 672 |
# -*- coding: utf-8 -*-
# tomolab
# Michele Scipioni
# Harvard University, Martinos Center for Biomedical Imaging
# University of Pisa
__all__ = ['convert_listmode_dicom_to_interfile',
'import_interfile_projection', 'export_interfile_projection', 'import_h5f_projection',
'import_interfile_volume', 'export_interfile_volume']
from .PET_listmode import convert_listmode_dicom_to_interfile
from .PET_sinogram import import_interfile_projection, export_interfile_projection, import_h5f_projection
from .PET_volume import import_interfile_volume, export_interfile_volume
| 593 | 193 |
import json
import pandas as pd
import numpy as np
from typing import Union, List
from pathlib import Path
from timeit import default_timer as timer
from nntrainer import data as nn_data
def _time_to_seconds(time_column):
return pd.to_timedelta(time_column).dt.total_seconds()
class HT100MBaseDataset:
"""
Dataloader for HowTo100M dataset.
Based on the index csv file of the HT100M dataset this builds a wrapper
around the file structure to return individual files.
"""
def __init__(self, dataset_root: Union[str, Path], metadata_name: str,
split=None):
"""
Setup the dataset
Args:
dataset_root: path to the dataset folder
metadata_name: identifier of the metadata to use. Will select the files we want to use.
split: identifier of the split to use or "ALL"/None to use all data
"""
dataset_root = Path(dataset_root)
# Read the CSV file containing information about the videos
# Format is:
# video_id, category_1, category_2, rank, task_id
# This is used as lookup table of the existing videos
csv = dataset_root.joinpath(f"meta_{metadata_name}.csv")
self._metadata_csv = pd.read_csv(csv, usecols=["video_id", "split"], index_col="video_id")
if split is not None and split != nn_data.DataSplitConst.ALL:
self._metadata_csv = self._metadata_csv[self._metadata_csv["split"] == split]
metadata_path = dataset_root.joinpath("metadata.json")
if not metadata_path.exists():
raise RuntimeError(f"metadata.json for HT100M dataset not found! Path: {dataset_root}")
self._metadata = json.load(metadata_path.open("rt", encoding="utf8"))
self._fps = self._metadata["fps"]
self._caption_root = dataset_root.joinpath("captions")
# Get all available caption files
self._keys = self._metadata_csv.index.to_list()
# Check the dataset integrity. I.e. if all caption csv files for every index are available
if not self.check_integrity():
raise RuntimeError("HT100MDataset: There are data_keys for which the features are not available!")
def check_integrity(self) -> bool:
"""
Checks if caption files for all keys exist. This is crucial for the integrity of the dataset.
Returns: True if dataset integrity is correct.
"""
timer_start = timer()
available_keys = set([x.stem for x in self._caption_root.glob("*.csv")])
print(f"Took {timer() - timer_start:.1f} seconds for scanning caption directory. "
f"Found {len(self._keys)} videos.")
missing_keys = set(self._keys).difference(available_keys)
keys_are_missing = len(missing_keys) != 0
if keys_are_missing:
print(f"There are {len(missing_keys)} missing keys. First 10: {list(missing_keys)[:10]}")
return not keys_are_missing
def _read_caption_csv(self, video_id: str) -> (List[str], List[float], List[float]):
cap_csv = pd.read_csv(self._caption_root.joinpath(video_id + ".csv"),
usecols=["start", "end", "text"],
keep_default_na=False)
cap_csv = cap_csv[
# Drop clips that have no subtitles/captions
(cap_csv["text"].str.len() > 0)
]
return (cap_csv['text'].tolist(),
_time_to_seconds(cap_csv["start"]).tolist(),
_time_to_seconds(cap_csv["end"]).tolist())
def __getitem__(self, video_id: str) -> List[str]:
raise NotImplementedError("GetItem cannot be called on BaseDataset")
def __len__(self):
"""
Returns len of dataset. I.e. number of videos.
"""
return len(self._keys)
def keys(self):
return self._keys
def data_keys(self):
return self._keys
class HT100MCaptionDataset(HT100MBaseDataset):
def __getitem__(self, video_id: str) -> List[str]:
sentences, _, _ = self._read_caption_csv(video_id)
return sentences
class HT100MDataset(HT100MBaseDataset):
def __init__(self, dataset_root: Union[str, Path], metadata_name: str, split: str, max_datapoints: int = -1):
super(HT100MDataset, self).__init__(dataset_root, metadata_name, split=split)
# reduce dataset size if request
if max_datapoints > -1:
self._keys = self._keys[:max_datapoints]
print(f"Reduced number of datapoints to {len(self._keys)}")
def __getitem__(self, key: str):
sentences, starts, stops = self._read_caption_csv(key)
# Drop the same items based on the filter as before
return {
"fps": self._fps,
"data_key": key,
"segments": [
{
"text": text,
"start_sec": start,
"stop_sec": end
} for (text, start, end) in zip(sentences, starts, stops)
]
}
| 5,043 | 1,492 |
import sys
import re
import pandas as pd
def combine_otu_tables(path_to_files):
with open(path_to_files) as a:
filenames = a.read().splitlines()
separated = {re.search(r'ERR\d+?(?=_)',x).group(0):pd.read_table(x, sep = '\t', index_col = 1, header = None,engine='python')
for x in filenames}
indices = [list(x.index) for x in list(separated.values())]
all_taxa = sum(indices,[])
all_taxa = list(set(all_taxa))
altogether = pd.DataFrame(None, columns = list(separated.keys()), index = all_taxa)
for pat in separated:
altogether[pat] = separated[pat][0]
altogether = altogether.fillna(0)
altogether['Mean'] = altogether.mean(axis = 1)
if float(pd.__version__[:4]) >= 0.17:
altogether = altogether.sort_values('Mean', axis = 0, ascending=False)
else:
altogether = altogether.sort('Mean', axis = 0, ascending=False)
return(altogether.ix[:,:-1])
def main():
# list_of_files = 'temp2.txt'
# output = 'combined.txt'
list_of_files = sys.argv[1]
output = sys.argv[2]
combined = combine_otu_tables(list_of_files)
print('Combining all OTU-tables')
combined.to_csv(output, sep = '\t')
if __name__ == "__main__":
main()
| 1,244 | 446 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Galeria.foi_importante'
db.add_column(u'website_galeria', 'foi_importante',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Galeria.foi_importante'
db.delete_column(u'website_galeria', 'foi_importante')
models = {
u'website.calendario': {
'Meta': {'object_name': 'Calendario'},
'data_agendamento': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 4, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'tipo': ('django.db.models.fields.CharField', [], {'default': "u'E'", 'max_length': '1'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.cardapio': {
'Meta': {'object_name': 'Cardapio'},
'ano': ('django.db.models.fields.CharField', [], {'default': "'2013'", 'max_length': '4'}),
'cardapio_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mes': ('django.db.models.fields.CharField', [], {'default': "'12'", 'max_length': '2'}),
'tipo': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1'})
},
u'website.conteudodownload': {
'Meta': {'object_name': 'ConteudoDownload'},
'conteudo_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'descricao': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'miniatura': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'tipo': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.depoimento': {
'Meta': {'object_name': 'Depoimento'},
'autor': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'conteudo': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'website.galeria': {
'Meta': {'object_name': 'Galeria'},
'ano': ('django.db.models.fields.CharField', [], {'default': "'2013'", 'max_length': '4'}),
'descricao': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'destaque': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'foi_importante': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mes': ('django.db.models.fields.CharField', [], {'default': "'12'", 'max_length': '2'}),
'permite_comentario': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'tipo': ('django.db.models.fields.CharField', [], {'default': "u'F'", 'max_length': '1'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.galeriaresource': {
'Meta': {'object_name': 'GaleriaResource'},
'action_resource': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'galeria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Galeria']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'upload_resource': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'url_resource': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'website.materialescolar': {
'Meta': {'object_name': 'MaterialEscolar'},
'anexo_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'ano': ('django.db.models.fields.CharField', [], {'default': "'2013'", 'max_length': '4'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'miniatura': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'servico': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Servico']"})
},
u'website.menu': {
'Meta': {'object_name': 'Menu'},
'endereco': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'menu_pai': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'Menu Pai'", 'null': 'True', 'to': u"orm['website.Menu']"}),
'nivel': ('django.db.models.fields.IntegerField', [], {}),
'ordem': ('django.db.models.fields.IntegerField', [], {}),
'pagina': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Pagina']", 'null': 'True'}),
'palavras_chaves': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'rascunho': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.pagina': {
'Meta': {'object_name': 'Pagina'},
'conteudo': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'palavras_chaves': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'permite_comentario': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rascunho': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.parametro': {
'Meta': {'object_name': 'Parametro'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valor': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.professor': {
'Meta': {'object_name': 'Professor'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'})
},
u'website.publicacao': {
'Meta': {'object_name': 'Publicacao'},
'completa': ('django.db.models.fields.TextField', [], {}),
'data_hora': ('django.db.models.fields.DateTimeField', [], {}),
'data_publicacao': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'destaque': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'galeria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Galeria']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'introducao': ('django.db.models.fields.TextField', [], {}),
'miniatura_publicacao': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'palavras_chaves': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'permite_comentario': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rascunho': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'tipos': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.recomendacao': {
'Meta': {'object_name': 'Recomendacao'},
'acao_link': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'descricao': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'destaque': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'miniatura': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'tipo': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1'})
},
u'website.servico': {
'Meta': {'object_name': 'Servico'},
'atividades_extras': ('django.db.models.fields.TextField', [], {}),
'atividades_incluidas': ('django.db.models.fields.TextField', [], {}),
'conteudo_programatico': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'observacoes': ('django.db.models.fields.TextField', [], {}),
'professor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Professor']"}),
'rotina_diaria': ('django.db.models.fields.TextField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['website'] | 10,950 | 3,652 |
import math
import numpy as np
from vector import Vector
import segment as segment_lib
class Point(Vector):
def direction(self, segment):
det = np.linalg.det([
segment.as_vector().as_array(),
segment_lib.Segment(segment.p1, self).as_vector().as_array()
])
return 1 if det > 0 else 0 if math.isclose(det, 0) else -1 # 1 left, -1 right, 0 on
def inside_segment(self, segment):
pass
def tolist(self):
return (self.x, self.y)
def within_polygon(self, polygon):
return polygon.contains(self)
| 580 | 186 |
import os
from cloud.aws_service import AwsService
def main():
"""Execute script."""
region = os.environ.get('REGION', 'us-east-1')
s3_bucket = os.environ.get('S3_BUCKET', 'costmgmtacct1234')
aws = AwsService()
result = aws.create_bucket(s3_bucket, region)
if result:
print(f'S3 bucket {s3_bucket} was created.')
else:
print(f'Failed creating S3 bucket {s3_bucket}.')
main()
| 424 | 157 |
import sys
sys.path.append("..")
import time
from charge_controller_tcp_driver.charge_controller_tcp_client_helper import *
if __name__ == '__main__':
helper = ChargeControllerTCPClientHelper("169.254.43.3", 12500)
time.sleep(3)
helper.set_pwm(100)
print("PWM:", helper.get_pwm())
#time.sleep(10)
#helper.set_ev_state("A")
#print("EV State: ", helper.get_ev_state())
time.sleep(10)
helper.set_pwm(50)
time.sleep(2)
print("PWM:", helper.get_pwm())
#print("EV State: ", helper.get_ev_state())
time.sleep(1)
#helper.set_pwm(50)
#print("PWM:", helper.get_pwm())
time.sleep(10)
helper.set_pwm(30)
time.sleep(2)
print("PWM:", helper.get_pwm())
# print("EV State: ", helper.get_ev_state())
| 773 | 331 |
import os
from typing import Text
import torch
import unittest
import torch.nn as nn
import torch.optim as optim
from allennlp.models import Model
from allennlp.data.vocabulary import Vocabulary
from zsl_kg.class_encoders.auto_gnn import AutoGNN
from zsl_kg.example_encoders.text_encoder import TextEncoder
from zsl_kg.data.snips import SnipsDataset
from allennlp.data.iterators import BasicIterator
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from zsl_kg.common.graph import NeighSampler
from zsl_kg.knowledge_graph.conceptnet import ConceptNetKG
from allennlp.common.tqdm import Tqdm
class BiLinearModel(Model):
def __init__(
self,
vocab: Vocabulary,
example_encoder: object,
class_encoder: object,
joint_dim: int,
bias: bool = False,
):
super().__init__(vocab)
self.example_encoder = example_encoder
self.class_encoder = class_encoder
self.text_joint = nn.Linear(
self.example_encoder.output_dim, joint_dim, bias=bias
)
self.class_joint = nn.Linear(
self.class_encoder.output_dim, joint_dim, bias=bias
)
def forward(self, batch, node_idx, kg):
encoder_out = self.example_encoder(batch)
text_rep = self.text_joint(encoder_out)
# get label representation
class_out = self.class_encoder(node_idx, kg)
class_rep = self.class_joint(class_out)
logits = torch.matmul(text_rep, class_rep.t())
return logits
class TestIntentClassification(unittest.TestCase):
def setUp(
self,
):
label_maps = {
"train": ["weather", "music", "restaurant"],
"dev": ["search", "movie"],
"test": ["book", "playlist"],
}
data_path = "tests/test_data/datasets/snips/"
datasets = []
for split in ["train", "dev", "test"]:
labels = label_maps[split]
label_to_idx = dict(
[(label, idx) for idx, label in enumerate(labels)]
)
reader = SnipsDataset(label_to_idx)
path = os.path.join(data_path, f"{split}.txt")
_dataset = reader.read(path)
datasets.append(_dataset)
self.train_dataset, self.dev_dataset, self.test_dataset = datasets
vocab = Vocabulary.from_instances(
self.train_dataset + self.dev_dataset + self.test_dataset
)
# create the iterator
self.iterator = BasicIterator(batch_size=32)
self.iterator.index_with(vocab)
print("Loading GloVe...")
# token embed
token_embed_path = os.path.join(data_path, "word_emb.pt")
token_embedding = torch.load(token_embed_path)
print("word embeddings created...")
word_embeddings = BasicTextFieldEmbedder({"tokens": token_embedding})
# create the text encoder
print("Loading the text encoder...")
self.example_encoder = TextEncoder(word_embeddings, 300, 32, 20)
trgcn = {
"input_dim": 300,
"output_dim": 64,
"type": "trgcn",
"gnn": [
{
"input_dim": 300,
"output_dim": 64,
"activation": nn.ReLU(),
"normalize": True,
"sampler": NeighSampler(100, mode="topk"),
"fh": 100,
},
{
"input_dim": 64,
"output_dim": 64,
"activation": nn.ReLU(),
"normalize": True,
"sampler": NeighSampler(50, mode="topk"),
},
],
}
self.class_encoder = AutoGNN(trgcn)
self.train_graph = ConceptNetKG.load_from_disk(
"tests/test_data/subgraphs/snips/train_graph"
)
node_to_idx = dict(
[(node, idx) for idx, node in enumerate(self.train_graph.nodes)]
)
#
self.train_nodes = torch.tensor(
[
node_to_idx[node]
for node in [
"/c/en/weather",
"/c/en/music",
"/c/en/restaurant",
]
]
)
self.model = BiLinearModel(
vocab, self.example_encoder, self.class_encoder, joint_dim=20
)
self.optimizer = optim.Adam(
self.model.parameters(), lr=1e-03, weight_decay=5e-04
)
self.loss_function = nn.CrossEntropyLoss()
def test_intent_classification_train(self):
self.model.train()
total_batch_loss = 0.0
generator_tqdm = Tqdm.tqdm(
self.iterator(self.train_dataset, num_epochs=1, shuffle=False),
total=self.iterator.get_num_batches(self.train_dataset),
)
for batch in generator_tqdm:
self.optimizer.zero_grad()
logits = self.model(
batch["sentence"], self.train_nodes, self.train_graph
)
loss = self.loss_function(logits, batch["labels"])
total_batch_loss += loss.item()
loss.backward()
self.optimizer.step()
self.assertLessEqual(total_batch_loss, 100.0)
| 5,312 | 1,676 |
import csv
import json
import logging
import math
import random as ran
def distance(point1, point2):
logging.debug("Args: {0}".format(locals()))
if type(point1) != type(point2):
logging.warning("Types of given arguments are different: {0} != {1}".format(point1, point2))
logging.debug("Returns: {0}".format(((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2) ** 0.5))
return ((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2) ** 0.5
class Animal:
def __init__(self, id, x, y, move_dist):
logging.info("{0}:[{1}, {2}]".format(id, x, y))
self.id = id
self.x = x
self.y = y
self.move_dist = move_dist
def __lt__(self, other):
return self.id < other.id
def move(self, x, y):
logging.info("{0}:[{1}, {2}] => [{3}, {4}]".format(self.id, self.x, self.y, self.x+x, self.y+y))
self.x += x
self.y += y
def move_in_direction(self, direction):
if direction == 0:
self.move(0, self.move_dist)
elif direction == 1:
self.move(0, -self.move_dist)
elif direction == 2:
self.move(self.move_dist, 0)
elif direction == 3:
self.move(-self.move_dist, 0)
elif type(direction) == Animal:
degrees = math.atan2(direction.y-self.y, direction.x-self.x)
self.move(
self.move_dist * math.cos(degrees),
self.move_dist * math.sin(degrees)
)
def move_in_random_direction(self):
self.move_in_direction(ran.randint(0, 3))
def distance(self, animal):
return distance([self.x, self.y], [animal.x, animal.y])
def find_the_closest_animal(self, animals):
dist = self.distance(animals[0])
closest = animals[0]
for animal in animals:
new_dist = distance([self.x, self.y], [animal.x, animal.y])
if dist > new_dist:
dist = new_dist
closest = animal
return closest
def eaten(self):
logging.info("Eaten: {0}:[{1}, {2}]".format(self.id, self.x, self.y))
self.x = None
self.y = None
def get_pos(self):
return [self.x, self.y]
@staticmethod
def generate_animals(animals_number,
move_range,
spawn_range=10.0):
logging.debug("Args: {0}".format(locals()))
new_animals = []
for s in range(animals_number):
new_animals.append(Animal(
s + 1,
ran.random() * spawn_range * 2 - spawn_range,
ran.random() * spawn_range * 2 - spawn_range,
move_range))
logging.debug("Returns: {0}".format(new_animals))
return new_animals
def save_json(json_data, filename='pos.json', save_dir='.'):
logging.debug("Args: {0}".format(locals()))
with open(save_dir+"/"+filename, 'w') as json_file:
json.dump(json_data, json_file)
def save_csv(csv_data=None, filename='alive.csv', opening_parameter='a', save_dir='.'):
logging.debug("Args: {0}".format(locals()))
with open(save_dir+"/"+filename, opening_parameter, newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
if csv_data is not None:
writer.writerow(csv_data)
def simulate(wolves_sim, sheep_sim, turns_number=50, save_dir='.', wait=False):
logging.debug("Args: {0}".format(locals()))
sheep_eaten = []
save_csv(None, 'alive.csv', 'w', save_dir) # nadpisuje plik
for t in range(turns_number):
for s in sheep_sim:
s.move_in_random_direction()
for w in wolves_sim:
closest = w.find_the_closest_animal(sheep_sim)
if w.distance(closest) <= w.move_dist:
w.x = closest.x
w.y = closest.y
closest.eaten()
sheep_index = closest.id
sheep_eaten.append(closest)
sheep_sim.remove(closest)
else:
w.move_in_direction(closest)
sheep_index = None
print("Turn: {0}\n"
"Wolf position: {1}\n"
"Sheep alive: {2}\n"
"Eaten sheep: {3}".format(t + 1, wolves_sim[0].get_pos(), len(sheep_sim), sheep_index))
# zapis json i csv
pos = {
'round_no': t + 1,
'wolf_pos': wolves_sim[0].get_pos(),
'sheep_pos': list(map(Animal.get_pos, sorted(sheep_sim+sheep_eaten)))
}
save_json(pos, 'pos.json', save_dir)
save_csv([t+1, len(sheep_sim)], 'alive.csv', 'a', save_dir)
# oczekiwanie na klawisz
if wait:
input("Press Enter to continue...")
# populacja owiec spadnie do 0 => koniec symulacji
if len(sheep_sim) == 0:
logging.info("Wolf ate every sheep. End of simulation.")
break
logging.debug("Returns: {0}".format(sheep_eaten))
return sheep_eaten
| 5,077 | 1,722 |
import json
import subprocess
import asyncio
from solana.rpc.async_api import AsyncClient
from solana.publickey import PublicKey
from anchorpy import Program, Provider, Wallet
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def build_and_start_server(project_name, prd_mode):
print(f'{bcolors.OKCYAN}INFO: Starting test for {project_name}')
completed_process_result = subprocess.run(
"npm run prod", shell=True)
if completed_process_result.returncode != 0:
print(
f'{bcolors.FAIL}ERROR: Failed to generate Apollo GraphQL project for project: {project_name}{bcolors.ENDC}')
return False
print(f'{bcolors.OKGREEN}DONE: Project creation successful for project: {project_name}{bcolors.ENDC}')
server_directory = "./src/server"
new_process = subprocess.run(
"npm start", cwd=server_directory, shell=True)
if new_process.returncode != 0:
print(
f'{bcolors.FAIL}ERROR: Failed to start newly generated Apollo GraphQL server for project: {project_name}{bcolors.ENDC}')
return False
print(f'{bcolors.OKGREEN}DONE: Project startup successful for project: {project_name}{bcolors.ENDC}')
return True
def create_project_config(path, content):
with open(path, 'w') as f:
f.write(json.dumps(content))
return
async def check_and_replace_with_new_idl(program_id, idl_path, anchor_provider_url):
try:
client = AsyncClient(anchor_provider_url)
provider = Provider(client, Wallet.local())
program_id = PublicKey(program_id)
idl = await Program.fetch_raw_idl(
program_id, provider
)
except:
await client.close()
return
if idl is not None:
with open(idl_path, 'w') as file:
json.dump(idl, file)
await client.close()
return
def main():
# On Windows, if an error happens where the channels file isn't found, you probably opened the project
# from the wrong directory. Either try reopening the project from the correct directory or play with the
# line below.
# os.chdir('./anchorgql')
config = json.load(open('channels.json'))
channels_config = config['channels']
results = []
for channel in channels_config:
project_name = channel['PROJECT_NAME']
program_id = channel['PROGRAM_ID']
anchor_provider_url = channel['ANCHOR_PROVIDER_URL']
idl_path = channel['IDL_PATH']
asyncio.run(check_and_replace_with_new_idl(
program_id, idl_path, anchor_provider_url))
content = {
"projectName": project_name,
"protocol": channel["PROTOCOL"],
"network": channel["NETWORK"],
"programID": program_id,
"anchorProviderURL": anchor_provider_url,
"idlPath": idl_path,
"anchorVersion": config['anchorVersion'],
"idl": config['idl'],
"port": config['port'],
"packageJsonTemplateFile": config['packageJsonTemplateFile'],
"indexTemplateFile": config['indexTemplateFile'],
"typeDefTemplateFile": config['typeDefTemplateFile'],
"configFile": config['configFile'],
"testMode": config["testMode"],
"prdMode": config["prdMode"]
}
create_project_config('./src/config.json', content)
passed = build_and_start_server(project_name, config["prdMode"])
results.append({
"projectName": project_name,
"passed": passed
})
print()
print("===================================================")
print("===================================================")
print("===================================================")
print()
print(f'{bcolors.OKBLUE}INFO: Test results:{bcolors.ENDC}')
for result in results:
if result['passed']:
print(
f'{bcolors.OKGREEN}{result["projectName"]}: Passed{bcolors.ENDC}')
else:
print(
f'{bcolors.FAIL}{result["projectName"]}: Failed{bcolors.ENDC}')
print()
print("===================================================")
print("=================== End of Run ====================")
print("===================================================")
if __name__ == '__main__':
main()
| 4,518 | 1,360 |
""" an image, nothing fancy """
from dataclasses import dataclass
from .base_activity import ActivityObject
@dataclass(init=False)
class Document(ActivityObject):
"""a document"""
url: str
name: str = ""
type: str = "Document"
id: str = None
@dataclass(init=False)
class Image(Document):
"""an image"""
type: str = "Image"
| 357 | 113 |
from .catch_errors import check_for_period_error
from .exponential_moving_average import exponential_moving_average as ema
def moving_average_convergence_divergence(data, short_period, long_period):
"""
Moving Average Convergence Divergence.
Formula:
EMA(DATA, P1) - EMA(DATA, P2)
"""
check_for_period_error(data, short_period)
check_for_period_error(data, long_period)
macd = ema(data, short_period) - ema(data, long_period)
return macd
| 477 | 169 |
import os
import glob
import shutil
import zipfile
from functions.game_name_functions import *
if (os.getcwd().endswith('scripts')):
os.chdir('..')
from classes.scraper import *
def scrape_csscgc():
# if os.path.exists('tosec\\CSSCGC Games'):
# shutil.rmtree('tosec\\CSSCGC Games')
s = Scraper()
template = 'https://www.yoursinclair.co.uk/csscgc/csscgc.cgi?year='
for year in range(1996, 2017):
files_extracted = []
page = template + str(year)
selector = s.loadUrl(page)
games_tables = selector.xpath('//table[@border="1"]').extract_all()
for game_table in games_tables:
cells = Selector(game_table).xpath('//td//text()').extract_all()
game_name = cells[0]
author = cells[2]
if not author.startswith('Mr'):
author = putInitialsToEnd(author)
filenames = list(set(cells[4].split(' ')+[cells[4]]))
format = cells[10]
game_represented = False
for filename in filenames:
if not filename:
continue
filename = os.path.basename(filename)
ext = os.path.splitext(filename)[-1].lower()
tosec_name = '{} ({})({})({})[CSSCGC]{}'.format(game_name, str(year), author, format, ext)
tosec_name = tosec_name.replace('(Spectrum)', '').replace('ZX Spectrum ', '').replace('(48K)', '')
tosec_name = tosec_name.replace('(128K Spectrum)', '(128K)')
tosec_name = tosec_name.replace('(128K-+2)', '(+2)')
tosec_name =tosec_name.replace('(unknown)', '(-)')
tosec_name = getFileSystemFriendlyName(tosec_name)
src = os.path.join('tosec', 'csscgc scrape', 'CSSCGC' + str(year), filename)
dest = os.path.join('tosec', 'CSSCGC Games', str(year), tosec_name)
# print(src, dest)
if not os.path.exists(src):
# print('File does not exist:', filename, 'Year:', year)
continue
if os.path.exists(dest):
print('Conflict:', tosec_name, filename, 'Year:', year)
continue
os.makedirs(os.path.dirname(dest), exist_ok=True)
if ext == '.zip':
with zipfile.ZipFile(src, 'r') as zf:
files_to_extract = []
conflict = False
for zfname in zf.namelist():
zfname_ext = zfname.split('.')[-1].lower()
if zfname_ext in GAME_EXTENSIONS:
files_to_extract.append(zfname)
for each in GAME_EXTENSIONS:
if len([x for x in files_to_extract if x.endswith(each)])>1:
print('Conflict:', tosec_name, src, files_to_extract, 'Year:', year)
conflict = True
break
if not conflict and files_to_extract:
for file in files_to_extract:
data = zf.read(files_to_extract[0])
ext = os.path.splitext(files_to_extract[0])[-1].lower()
dest = dest.replace('.zip', ext)
with open(dest, 'wb+') as output:
output.write(data)
game_represented = True
files_extracted.append(src)
else:
shutil.copy(src, dest)
files_extracted.append(src)
game_represented = True
if not game_represented:
print('Game not represented:', tosec_name, cells[4], 'Year:', year)
for src in glob.glob(os.path.join('tosec', 'csscgc scrape', 'CSSCGC'+str(year), '*')):
filename, ext = os.path.splitext(os.path.basename(src))
if ext[1:] not in GAME_EXTENSIONS+['zip']:
continue
if src in files_extracted:
continue
else:
tosec_name = '{} ({})(-)[CSSCGC]{}'.format(filename.title() , str(year), ext)
dest = os.path.join('tosec', 'CSSCGC Games', str(year), 'unsorted', tosec_name)
os.makedirs(os.path.dirname(dest), exist_ok=True)
shutil.copy(src, dest)
print('Copied: ', src, 'to:', dest, 'Year:', year)
if __name__=='__main__':
scrape_csscgc() | 4,647 | 1,364 |
from django.forms import ModelForm
from ..models import Pit
class PitForm(ModelForm):
class Meta:
model = Pit
fields = ['location']
def __init__(self, *args, **kwargs):
super(ModelForm, self).__init__(*args, **kwargs)
self.fields['location'].widget.attrs['class'] = 'form-control'
| 328 | 97 |
import pathlib
TEMPLATES_DIR = pathlib.Path(__file__).resolve(strict=True).parent / 'conf'
APP_TEMPLATES_DIR = TEMPLATES_DIR / 'app_template'
PROJECT_TEMPLATES_DIR = TEMPLATES_DIR / 'project_template'
| 203 | 85 |
# std
from typing import Any, Dict, List, Optional, Union
# external
import pkg_resources
import sqlalchemy
from sqlalchemy.orm import aliased, Session
# molar
from molar.backend import schemas
from molar.backend.database.utils import sqlalchemy_to_dict
INFORMATION_QUERY = open(
pkg_resources.resource_filename("molar", "sql/information_query.sql"), "r"
).read()
def resolve_type(type: str, models, alias_registry=None):
if alias_registry is None:
alias_registry = {}
types = type.split(".")
if len(types) == 1:
if isinstance(models, sqlalchemy.orm.attributes.InstrumentedAttribute):
return models[types[0]].astext
type_ = getattr(models, types[0], None)
if type_ is not None:
return type_
if types[0] in alias_registry.keys():
return alias_registry[types[0]]
raise ValueError(f"Type {type} not found in database!")
submodel = getattr(models, types[0], None)
if submodel is None and types[0] in alias_registry.keys():
submodel = alias_registry[types[0]]
if submodel is not None:
return resolve_type(".".join(types[1:]), submodel, alias_registry)
raise ValueError(f"Type {type} not found in database!")
def query_builder(
db: Session,
models,
types: schemas.QueryTypes,
limit: int,
offset: int,
joins: Optional[schemas.QueryJoins] = None,
filters: Optional[schemas.QueryFilters] = None,
order_by: Optional[schemas.QueryOrderBys] = None,
aliases: Optional[schemas.QueryAliases] = None,
):
alias_registry: Dict[str, Any] = {}
# Resolving aliases
if aliases is not None:
if not isinstance(aliases, list):
aliases = [aliases]
for alias in aliases:
alias_registry[alias.alias] = aliased(
resolve_type(alias.type, models), name=alias.alias
)
# Resolving main types
if not isinstance(types, list):
types = [types]
db_objs = []
for type_ in types:
db_obj = resolve_type(type_, models, alias_registry)
db_objs.append(db_obj)
query = db.query(*db_objs)
if joins is not None:
if not isinstance(joins, list):
joins = [joins]
for join in joins:
joined_table = resolve_type(
join.type,
models,
alias_registry,
)
onclause = None
if join.on is not None:
onclause = resolve_type(
join.on.column1, models, alias_registry
) == resolve_type(join.on.column2, models, alias_registry)
query = query.join(
joined_table,
onclause,
isouter=True if join.join_type == "outer" else False,
full=True if join.join_type == "full" else False,
)
if filters is not None:
filters = expand_filters(filters, models, alias_registry)
query = query.filter(filters)
if order_by is not None:
if not isinstance(order_by, list):
order_by = [order_by]
order_bys = []
for ob in order_by:
t = resolve_type(ob.type, models, alias_registry)
if ob.order == "asc":
order_bys.append(t.asc())
else:
order_bys.append(t.desc())
query = query.order_by(*order_bys)
query = query.offset(offset).limit(limit)
return query, db_objs, types
def process_query_output(db_objs, query_results, types):
if len(db_objs) == 1:
return [sqlalchemy_to_dict(db_objs[0], r, types[0]) for r in query_results]
results = []
for result in query_results:
result_dict = {}
for res, db_obj, t in zip(result, db_objs, types):
result_dict.update(sqlalchemy_to_dict(db_obj, res, t, add_table_name=True))
results.append(result_dict)
return results
def expand_filters(filters, models, alias_registry):
if isinstance(filters, schemas.QueryFilterList):
filters = [expand_filters(f) for f in filters.filters]
if filters.op == "and":
return sqlalchemy.and_(*filters)
elif filters.op == "or":
return sqlalchemy.or_(*filters)
else:
raise ValueError(f"Filter operator not supported: {filters.op}")
elif isinstance(filters, schemas.QueryFilter):
type = resolve_type(filters.type, models, alias_registry)
operator = filters.op
if filters.op == "==":
operator = "__eq__"
elif filters.op == "!=":
operator = "__ne__"
elif filters.op == ">":
operator = "__gt__"
elif filters.op == "<":
operator = "__lt__"
elif filters.op == ">=":
operator = "__ge__"
elif filters.op == "<=":
operator = "__le__"
# If value is another column
value = filters.value
if isinstance(value, str):
try:
value_type = resolve_type(value, models, alias_registry)
except ValueError:
pass
else:
value = value_type
return getattr(type, operator)(value)
| 5,249 | 1,587 |
import time
import os
def all_fields_present(passport):
fields = ['byr','iyr','eyr','hgt','hcl','ecl','pid']
for field in fields:
if field not in passport:
return False
return True
def is_valid(passport):
if not all_fields_present(passport):
return False
byr = passport['byr']
if not (is_year(byr) and int(byr) in range(1920, 2003)):
return False
iyr = passport['iyr']
if not (is_year(iyr) and int(iyr) in range(2010, 2021)):
return False
eyr = passport['eyr']
if not (is_year(eyr) and int(eyr) in range(2020, 2031)):
return False
hgt = passport['hgt']
if not valid_height(hgt):
return False
hcl = passport['hcl']
if not(hcl[0]=='#' and len(hcl)==7 and
all(is_digit(x) or x in ['a', 'b', 'c', 'd', 'e', 'f'] for x in hcl[1:])):
return False
ecl = passport['ecl']
if ecl not in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']:
return False
pid = passport['pid']
if not(len(pid) == 9 and all(is_digit(x) for x in pid)):
return False
return True
def is_year(y):
return len(y) == 4 and all(is_digit(x) for x in y)
def is_digit(x):
try:
return int(x) in range(0, 10)
except:
return False
def valid_height(hgt):
try:
if hgt[2:] == 'in' and int(hgt[:2]) in range(59, 77):
return True
if hgt[3:] == 'cm' and int(hgt[:3]) in range(150, 194):
return True
return False
except:
return False
def str_to_passport(s):
passport = {}
items = [x.split(':') for x in s.strip().split(' ')]
for item in items:
passport[item[0]] = item[1]
return passport
def part_one(passports):
total_valid = 0
for passport in passports:
total_valid += all_fields_present(passport)
return total_valid
def part_two(passports):
total_valid = 0
for passport in passports:
total_valid += is_valid(passport)
return total_valid
def main():
start_time = time.time()
with open(os.path.dirname(__file__) + '/input.txt', 'r') as data:
passports = []
s = ''
for line in data.readlines():
if line == '\n':
passports.append(str_to_passport(s))
s = ''
else:
s += line.strip()+' '
passports.append(str_to_passport(s))
part_one_ans = part_one(passports)
part_two_ans = part_two(passports)
print('Day 4 ({:,.3f}s)'.format(time.time() - start_time))
print(' Part 1: {}'.format(part_one_ans))
print(' Part 2: {}'.format(part_two_ans))
if __name__ == "__main__":
main() | 2,377 | 1,067 |
# -*- coding: utf-8 -*-
"""Role models."""
from dataclasses import dataclass
from array import array
from .database import Column, Model, SurrogatePK, db, reference_col, relationship
from sqlalchemy.dialects.postgresql import ARRAY
@dataclass
class Role(SurrogatePK, Model):
"""用户角色信息表"""
__tablename__ = "roles"
# 配置JSON返回字段信息
name: str
id: str
remarks: str
web_menus: array
update_date: str
# role 角色数据权限 data_scope
# 0 默认值 1 只能看到自己数据 2 能看到当前所在机构下的数据 3 能看到系统中的所有数据
DATA_SCOPE_DEFAULT = 0
DATA_SCOPE_SELF = 1
DATA_SCOPE_OFFICE = 2
DATA_SCOPE_ALL = 3
# 配置数据库字段信息
name = Column(db.String(80), unique=True, nullable=False)
users = relationship("UserRole", back_populates="role")
data_scope = Column(db.SmallInteger, nullable=False)
web_menus = Column(ARRAY(db.String))
def __init__(self, **kwargs):
"""Create instance."""
db.Model.__init__(self, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return "<Role({name})>".format(name=self.name)
| 1,089 | 439 |
import requests, datetime as dt, numpy as np, pandas as pd, pytz
from dateutil.relativedelta import relativedelta
# Call for raw data (NASDAQ)
def nsdq_data(ticker):
try:
today = dt.datetime.now(pytz.timezone('US/Eastern')).date()
past = today - relativedelta(years= 5)
price = current_price(ticker.upper())
new_data = {"date" : today.strftime("%m/%d/%Y"), "close" : price}
headers = {'user-agent' : "-"}
url = "https://api.nasdaq.com/api"
post = f"/quote/{ticker.upper()}/historical"
params = {
"assetclass" : "stocks",
"fromdate" : past,
"limit" : '100000',
}
r = requests.get(url + post, headers=headers, params=params).json()
# data cleaning and formatting
# Remove unnecessary data and reverse order
data = pd.DataFrame(r["data"]["tradesTable"]["rows"][::-1])
data[['close']] = data[['close']].replace('\$|,', '', regex=True).astype(float) # Convert 'close' to float type
data = data.append(new_data, ignore_index=True) # Append latest data (aproaching closing time)
# Calculate and add ema3, ema10, and slope to data
ema3 = data['close'].ewm(span=3, adjust=False).mean()
ema10 = data['close'].ewm(span=10, adjust=False).mean()
slope= np.gradient(data['close'])
data['ema3'] = ema3
data['ema10'] = ema10
data['slope'] = slope
return data
except Exception as e:
print("NSDQ Data Error: ", e)
pass
# Call for current price
def current_price(ticker):
try:
url = f"https://api.nasdaq.com/api/quote/{ticker}/info?assetclass=stocks"
headers = {'user-agent' : "-"}
r = requests.get(url, headers=headers).json()['data']
return round(float(r['primaryData']['lastSalePrice'].strip('$')), 2)
except Exception as e:
print("Current Price Error:", e)
pass
# Call for order
def order(ticker, qty, order, api):
try:
side = "buy" if order else "sell"
url = "https://paper-api.alpaca.markets"
post = "/v2/orders"
headers = {
"APCA-API-KEY-ID" : api.alpaca_api,
"APCA-API-SECRET-KEY" : api.alpaca_secret,
}
params = {
"symbol" : ticker.upper(),
"qty" : str(qty),
"side" : side,
"type" : "market",
"time_in_force" : "day"
}
r = requests.post(url + post, headers=headers, json=params)
print("Status Code:", r.status_code)
except Exception as e:
print("Order Error:", e)
pass
# Call to list bought stocks
def stock_list(api):
try:
url = "https://paper-api.alpaca.markets"
post = "/v2/positions"
headers = {
"APCA-API-KEY-ID" : api.alpaca_api,
"APCA-API-SECRET-KEY" : api.alpaca_secret,
}
r = requests.get(url + post, headers=headers).json()
return r
except Exception as e:
print("Stock List Error:", e)
pass
# Call for stock quantity bought
def qty(ticker, api):
try:
url = "https://paper-api.alpaca.markets"
post = "/v2/positions/" + ticker.upper()
headers = {
"APCA-API-KEY-ID" : api.alpaca_api,
"APCA-API-SECRET-KEY" : api.alpaca_secret,
}
r = requests.get(url + post, headers=headers)
return r.json()["qty"] if(r.status_code == 200) else None
except Exception as e:
print("Quantity Error:", e)
pass
# Call for buying power
def money(api):
try:
url = "https://paper-api.alpaca.markets"
post = "/v2/account"
headers = {
"APCA-API-KEY-ID" : api.alpaca_api,
"APCA-API-SECRET-KEY" : api.alpaca_secret,
}
r = requests.get(url + post, headers=headers).json()["buying_power"]
money = round(float(r), 2)
return money
except Exception as e:
print("Buying Power Error:", e)
pass
# Call for calendar (check if holiday)
def calendar(date, api):
try:
url = "https://paper-api.alpaca.markets"
post = f"/v2/calendar"
headers = {
"APCA-API-KEY-ID" : api.alpaca_api,
"APCA-API-SECRET-KEY" : api.alpaca_secret,
}
params = {
"start" : date,
"end" : date,
}
r = requests.get(url + post, headers=headers, params=params).json()
d = r[0]["date"]
return d
except Exception as e:
print("Calendar Error:", e)
pass
# Call for open/close time (params: "Open" or "Clos" only, case senstive and no 'e' for "Clos")
def market_hour(market_time):
try:
url = "https://api.nasdaq.com/api/market-info"
headers = {'user-agent' : "-"}
r = requests.get(url, headers=headers).json()['data']
hour = dt.datetime.strptime(r[f'market{market_time}ingTime'].strip(' ET'),"%b %d, %Y %I:%M %p")
return hour
except Exception as e:
print("Market time Error:", e)
pass
# Call for next open time
def next_open_time(api):
try:
url = "https://paper-api.alpaca.markets"
post = f"/v2/clock"
headers = {
"APCA-API-KEY-ID" : api.alpaca_api,
"APCA-API-SECRET-KEY" : api.alpaca_secret,
}
r = requests.get(url + post, headers=headers).json()
next_open = dt.datetime.strptime(r['next_open'][:-6],"%Y-%m-%dT%H:%M:%S")
return next_open
except Exception as e:
print("Next open time Error:", e)
pass | 5,696 | 1,916 |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def draw_parabola(steps=50):
x = np.linspace(-4, 4, steps)
plt.plot(x, x ** 2)
plt.axvline(x=0, color='b', linestyle='dashed')
def draw_paraboloid(steps=50):
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
x = np.linspace(-1, 1, steps)
y = np.linspace(-1, 1, steps)
X, Y = np.meshgrid(x, y)
Z = X ** 2 + Y ** 2
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
def draw_mishra_bird():
fig = plt.figure(figsize=(14, 10))
x = np.arange(-10, 1, 0.1)
y = np.arange(-6, 0.5, 0.1)
X, Y = np.meshgrid(x, y)
ax = plt.gca(projection='3d')
Z = np.sin(Y) * np.exp((1 - np.cos(X)) ** 2) + np.cos(X) * np.cos(X) * np.exp((1 - np.sin(Y)) ** 2) + (X - Y) ** 2
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
ax.view_init(20, -60)
def draw_hyperbolic_paraboloid():
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
x = np.linspace(-1, 1, 50)
y = np.linspace(-1, 1, 50)
X, Y = np.meshgrid(x, y)
Z = X ** 2 - Y ** 2
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) | 1,187 | 585 |
from urllib import request
import random
import json
# 摸你请求头
url = r'https://www.baidu.com/s?cl=3&tn=baidutop10&fr=top1000&wd=%E7%9F%B3%E7%94%B0%E7%BA%AF%E4%B8%80%E6%84%9F%E6%9F%93%E6%96%B0%E5%86%A0&rsv_idx=2&rsv_dl=fyb_n_homepage&hisfilter=1'
# 代理列表
agent_list = [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36',
'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1',
]
#头信息
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
# 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
}
# 随机代理
agent = random.choice(agent_list)
headers['User-Agent'] = agent
# 方法体
def print_url(url, header):
# 设置超时时间处理
time_out = 1
req_str = request.Request(url=url, headers=header)
try:
resp = request.urlopen(req_str, timeout=time_out)
data = resp.read().decode()
print(data)
except:
print("超时")
finally:
request.urlcleanup()
def print_url_http(url, header):
'''
GET
POST
PUT
DELETE
UPDATE
HEAD
OPTIONS
'''
json.loads()
pass
def get_json_val():
str = data_json_str()
jd = json.loads(str)
print(jd['sodar_query_id'])
# json = data_json()
# print(json['sodar_query_id'])
def data_json():
data = {"sodar_query_id":"YcqaXvPrIMSW2QTPjZeQAQ","injector_basename":"sodar2","bg_hash_basename":"r_kJ4x66L0q9ptqPN1EZdQZJVGt7LCWecB4z-4tOz0Y","bg_binary":"ALzbBj814lyYEaftZLAVu8KNpcS+Et40flMgUba+katdDRF9kHyC5ekeOn+SnF/oOv/75lAHEFYOblxjV5F4SQhJh/HX5oNaB6yQEscwY+2xY7zf1AOQAdXlwstcQsfcf91ydo9bJs3/nAnh41iqmA3KkV9TfstrgriG5sc8NSoUWQywuHf7ZDZeun3Y92u01kXYPGO8rYRMrwOmuOuo1G4VKz01yCxYiTBspPgxnf7FUa45yXGKR151XIRz4IxwZBgy/9IfJW7j0hUjlY/0miYrdQDTKGXvXdhU+YZvQF9FqLDIrYhg5FTB7SlWwIxZrImc8w8pALEU2idJLMue130yPHz7GfnNs6cIoIb8v+Y5v78QUCPflrJP6GxBEej+a3Fmb2hm7pk2iK4hbMb3guNpMSIou8PIP4nd5KQrpDzuG/WOiaSZIuMfkYYifAhSdi6nam3SMto07vPYW4L1XOy4QCvmkbrMwE8A8FLNrC6IzhIPi3cURKXSE6sI/UFoo8jBYaD/961bsfjDRip/stsq5XCf+P2EhgLW9Yl95ddjtReaObOpV5Di5pMhexp0DaCjfmXZyOrZ+LA3UYcOarlSsAIEJZ85HTn7EiJl+DVPSXPmQSy8LAywMyAVuPtKwanswYNiqlYtayDAlPJI26Om2TOeZzO0lRASIyxK6zkms+YajVYJ1z2wNvnv81D1PzH5N9YbWjImivcqNOHZxF/88olXY6oHG+zBqOVTOLyFahFjD7ftMXKFncA9mnEKC/UNXEkdClNu8B63x/aUHyb4u398Eru3PAupW6gnasf404viputMyvkrGgr7AhTRVJNK4Zt5GoQ8znxJCJZ0TRrGH4XgKFIkcgYopx4fmYGc5hP4q4mqFDouvH/Q0NGjx2YpICYE5CSfG1iIV76XO6nTrZ7Fn4zfE+mkgmm7LU/yAGXu2mjeTL0K2nEyOtgcuxq5POsRRtyN3BpNFRZDG06NxTEVZPbbRnm6aEaL4dntcmYsrLu2bFw2nMywczkpyV3ld+jeItdjeLaeRMjEqxhfR21xsMg3AenilDzpPaYlBCosMK3h/MA1nCwLxGENmjHp4lFYPHJohRnMj2Bbs4ROeG7uZoVg/NTmNiagecZC3+xy7+e+hNSS1Dmdq/lSpYLwJPsgrRRutCBRY/Ie2rfToKEt5juHeg9ExyWA8QJpHOPmIwgvoTXlTjWnQoObJuvlwVlJiT3fFDhmox/tAtiy4HzzQeIXekN8mZu1Lee6qlJ0HFE5jP6FVfDZsdn1VPKe8l01YpktU107evEA8rzrdoTnpPAj+d0IRwTh0HylyKHuulw6RD1MOJxPHTY06aGf5IRjpsz+YOKLR/+UPGiTZq4fc12OXYI/rHZTEfcSQu+lkh2zi2q8NAcRBrexYG6WN9UQ7+q5bPxAOEKxtB265eA1JQVd13LIPlBEJEbNCcvBiQiAzA2wDEqR793VpC0EuCDXuCuHwYGuF23YaKqhOaapZS9xVT8aDwKpdo005BdGvyu5Bux2q23npsv3xDE++5F/ny3z57M1cbpfLJQ4YzMVFyNisvqR5rdY71Ms2mTXy/DyoS022LI21D1RMsc16qKD7oCm00M/ggQVC1X7tJDwl0oe/3iisPHUJRiI79FkGbazm9AbQQKUH2LnMPjZ6GEMLkVpQGhglE/yYwVVpsP/PRdK1Cdftg7OADzPty8G1Q5uFyvdmWmIuR5nbW9bebKvhYFCJZHm2DcWgu8tN5NG5/5lrGpqxoNqxaxPwzAocDdU0xwMajHidsg0nkMruMNd997EUOEIdHPvZZFbBG+4ZDZgaYLGRuxGF2lOYNNxMG7qZfoXV5Vw4h/G0Iy7hy6DXRnZCQWOXuGM6wGqwdG3yy085+gqnOyEclnbgsaVo7Ohz4P1u34rFRoSd+yoHs5Cy4iqCBZtu1o71jKxP+/yVbb+UGMNOOnSnrTO1Qs6MHYnQ+7yrN1AVKKwaNFFtsVKp4dW5vv0+6CJ1TmiEuVekSTR6pQ7FYjjvdAXwob0OZDFoxXY7kAFxrIuHXqgzJ0cG1DjxFJtV1JGCAU2vPtS6iYoNbpQX2GRMQx31yWVG4CO0IYJWjraUwvswrtIFbxkJMP2H8GF1AaV4gLV10ZbNsX8V1m0SwPsburH/3ECRLu3IpU6VLdP53WrtBxF4cidDtgaBin9NuQp0bP9wC3TIR0nZ2OD5yDRPw//pGAzZqIMLhvB2AbrLt4qCFvOWKDxJ39Thy9HOyqJh2DEZ/oWUr496RdSvmYqH5yn/pmYFN+gAqgB33wIsbYJxQtGfT2NsIS8yVka1031cP0azO43smM9dXbkU6HVaxOS5Y1U5PR9pjxAilePqS+PUVOIegsGpLfR4rfjXFQt72kpCTNKG+y8/XWH6Brb2THTzGEF1UrNUZfc6+jJ5fflgGAOuECRgzJwr9x0bToMdomF5vrbaLcGbX+Rqw7+ob7GQ5/E9UmFaAOOeDIGd0eX0hwLP1ZEKnkW+4LHFY5h1L51tUIZVPFnsJ1dxEeGXU7zp2SIJ8nbdcXO3WP6o9Q38Hrrw6udiFNZT9lhKujoBYgUZ/d0EDZCS0JuB/vR4u9uHKic0PBVeZpiUtjlaPJbrdHJK5J+JycwsifHqXKeMUDPOkNdPptuif8vsrXnpTgIqVEXFwYI1SCXr/0/hWhm3kz8ZVMPoPyPSehNFvD5/heLy4BCxaW60SjKfDMWiyliTQRDFsnFJZ+CguIE9tYjkwkdtv6yRQI70ltEWhYEHsX0+uZdixmo3wMPT7xjT6wL7891UFDJIFy8WtwTj5VzdN5nSgwlh+yGF9Djn9ihLSN5EebavuLDiJYNlvVOA2mMKSdeB8jvFcwyH5Q8opwQZUWrdrahdkTRK98S3HoGlyMx2u5x+YUgNxrKUJZxfbI/53aDuS2BV2LY2jtVnXQohEll0afDuVvmWNfJ8SQ2tHwX/YWuYYFKUg05ZF8yfxBdn9oezJMLorAa4wyomHtoowUL2j1ITOYZaG46V+sC6Uwf1T9VCDA3Dyugwz34e+NErKouptm99HeY22BzpTvUutUGo4/0m5Wt5CvbX1fEBeTWMb6BZ4sdP/PxJpR+vxBIFStciwLqBYIlVF/TKzKK0OR4gZp/QF4Z2GZPQUSQ4ZMQST3zhcMIsxNnzThwhDQifjvlTBhfM5bNtV6mNtPzQ9UbY5Qk6/88YFt5jJPaVhnfnaZtC9D7WlO3aNSIJ8QmNhg3J3dp6BiCjKMzBjCkXmlOcWGjTO5oQ1p2HKUubHNxQDpmmLthX8n15qLusnaQUeKSf+vFxcneT4DicqBNpECnPSfbwcIZqbDpwGLjNsRNebJwEI2xdbX+MBOPVQ303ptQHEMychPD+tbi7SCTIgJcHfAfRYAW5/AxbzelIwrk/6PC+a60CSW3OOLuOAoP5CLpeg+zRWW6CL5k9DdFDf4ve2vGu+k9V+2JagU56Ea8YCHOQ5VIzqkF3jIh6LkhCYmCyjFBGQLz4Cvu5OGI7TLC9v5/LQhshoqrEcc/JexcJzbx1i/l7In6HW5Zp1BpJvtruexwzKsbZKclmaG4HzPEGUKHgzwDDkMTFYSU2qPpncqPw6NtBp8og4n1KjyAXpfYecFU5tQVDyeUc7tMUgV0BE/WsXoheOKx7Cvo3bRuySPhSih+PBGp6FzP/S/rLxPOmZ/Lcf2F0IXXtR0Cj4gHXhigNou+PrhTgmeW1ayRnYYJ8Ps5JCP5nW5i2EAlH5SvcyAaoXIb2T3l1z7TmEEVLMRC3k5d+fqxB1AEIYZLvLMoCO2tFBh6L7u3Vyh/k0SchaqKKI9U/JVG/l4QwFqpZ8E+C/p15UVgwMwHaAFBKULWncbwNiSk0R2H46n5Ol7+2kv2yfkFvdYrf7VsKD76/6JOCQydMM2BKmL1NL91N+Yd0hmaYBrrFIxVzxkjP8VULgCRwylKpsTBdYp0nvfVeWU+vq1CXy2hhOxzWMVRmMAE9FO6Fux0fprVdrkxDgLk50mhP7Eq8kfnzpXc3ItSgAddB1JCvUdYzhnsQh+F/viDl5iub0LIeF+Kp+HyemXDTkf9OVM1DGwp3CxgNIam2Z1/UxTVC76H8cKhjeo8yOhzoVF0p46N/o2eOmhB55ZcWKvFESKuRMbV+MjcSAhWE+76v8VgxrfwoIfhg2YlwLfMTiapbfMZ5tSh5rutxOuReIAbh8Mo/IYBesQQ2SybvA2GFg7Mcfe2rC+LEIhwXkm4GZkFahH9UWw4m1VUBmty2V9GcIUwp1/vUNfBCvDA8zyM7+r6P1SHjU4DkKVa0qIqF7AEwqASIbg2gjDMuxHyZ+c1izFQLu/8Nf3WFZUNcpMy92jd+wjICK0HzTKJYUVmraEPAQ96bvuibSo9COX9jAhC0xiG6AXurIm+bExk7Bq49uzkDe2AuK8xc3/ygHsr1pqCP/W99SKv2pds52hZb+ezghamFhznJ67EZIWawes9YJ1khIX6i2/N5qTvgFjv4C7d5IQVuMJgY9On9IbwuLJXnr8Shmy7vcc57b2irRiuKmDW4Vc4SBpRwW7wgvjpeuTwvsZyQgDrWFpKvY8PgrOK9MkXdnLPg3kkgFZF7CVHsogJZa3CVoA9uS4D7RT5hm9gsdVkxMkop+//w5bg1+fm/hrGD8wSmYNzLvld6IJOZxQWhE5JPe+WNzC5zEITxZGomzdKYDHRqp+0tQF8xVyHyZPuWPSgqAE/e5jyJ5m/sBa5Vl5oyKxajcv+gKZJhPiOfMLvgX7/+I8mFVccLz4kljK0KUhIScmYQBjWpAlN8JE2yzh2KmEhiTGqNsA9D9MbsRxZ3O3v9GauT2TYcH/EQCLvqftFn05a4Asz/car34eE7UcMcYvUvn0FYiIpHWmxHXAVCxZQ7+u4XQr/ulMxjKgOOeVFBfYcYl5uBc+U/UWM2nimDDF8q3Ugyybv6lTTke31qSGAqYvZLfHCV2CGK/Z2a83Fq6QOROsSdL1pntMU2jNLt6hC3XXzzeATmGTWPxuJXikRvueMc097kOn6G0NyU0qK4HDvymMcPhlibsSiBIPnoUzv6Had7ED6A7ccKy8hzk9ZZx0BGMoZjnAlpJJGK7HC57yTzsg05tX7NRcP5r9MNN/uBF9nJzY5ggZaQIETXUhfoxCfwY/Ce6nP0iHFHdPlsCbydHefp1dgyjPzQMvI6l9OG9n3OSLh9+rKmYQMyz1pi4aHcvt8CzqYhRKlPQEP1xNchQ0IXBhrm2Mi7SER0nimnz07nF1Ki9mPGk757hCsQz+xGwOj7oz1YeCtFT7vISs/kX9zeOtcpnfUlS0roQkwz1tQU2aTsZ5A42vyFRKRE0rv1KASXsiDNZd0/jkhmcneYQxD3L0ttYjsUg2BP/clXNyVWEoTsPs17xtZb+zZ0bAo29G0CEmFlx9n7PewUJOEqzv0s/W9jP0iIBNEsQ9mWQr6Brar3wQRrfjLk6ip8HUNh+YhhSjW0eSA9NsgQE6GaPKaGe03dNQOk8Yu5O1WrNOP+/Wjn2vWTb8TMbusjEgGG7BjGM5YlchUSurpXob/EPZAaR9gbMPt4CtHKUhB87t256CPGqoYxAVNcEhglUOM/p9hEjwkKZ3dB0AOqKswNtb+Nja9vgMFFCte6dOTXDRuHlyKL6IenAIo+5JBYX15WlGhCHiiWXQpbJoFbjeie3fxjGDjRzr8us5tvKUHXQJQCVW6SlKk1uFImLIdngwkXUpv2hypJX8KRtf4uLPu3+x50HIS5g38o9wdVgPjcPxAIEB3fcyEl0IWAx1eUm1LU8h11yx+gzQ/snBaV2vt1VEvLtNtPFZVYvIDuSpsWY8bv8owdZd4wHB1lJZgAp9bBiSTGGEJMlCOuu4lQDOL/Aj3XMW8SSg5zTZblxdxayss3hIkrtoct1YVxe0itQSpG/OR+m3ZNOLr43J2gFN3MagHZwPuGBZC0kW+7nyZM7Sp7FZA/1+A08ddSL3luh/dCaPTVtk6tY1q1t9JH6dcsl77+Kh4nslE0YRA0qQQQIsqz75n7Bu05aFw+g6oYBgqAs4p0uVoWSKtTtfucPHy8gwCn8lh8jeIpk0mWS64OXXPWqyPptuCOZvJPemmP5uYB9MWLrf1QZmZMWgVZHuMmQXXobMTjGz+Dsw/eEVP+nVL8ftDDxwEDT0XpUckl0v3Qt3Np44jFKNLIcm6CIobyN0QQuouOZEmAVVXcJP6NYclNMd3zdKoVVGzFZS0GqX1Qmw+U4rlS0Knl9p2vDtP/HMWcCtnTNP9KZjRF6sJr2Vu+/4oi4f0JwvbUrHdkcED64VFA53ZxvqAKIPE1ebZjFq6SH6BXXl+CkWGqBUAe4HGh+u1QEKNPGA4ETZV4GNTOKbCP98CEmzf7Vo2nxTZ+0F34OUgMtQgrLTYcy0yZLB/Dk7nCgFO3zRLsNZUpX+KQRkSZ/aqiXJpwDRDh4aL2e40ENPHVI5nbWvuQaT44TG8WMIL60jr5WKgj921RMDAeCWipSP6LLtCHwZrTc2UiJugF/AC2WgY4L3/T0MTIK2"}
return data
def data_json_str():
data = {"sodar_query_id": "YcqaXvPrIMSW2QTPjZeQAQ", "injector_basename": "sodar2",
"bg_hash_basename": "r_kJ4x66L0q9ptqPN1EZdQZJVGt7LCWecB4z-4tOz0Y",
"bg_binary": "ALzbBj814lyYEaftZLAVu8KNpcS+Et40flMgUba+katdDRF9kHyC5ekeOn+SnF/oOv/75lAHEFYOblxjV5F4SQhJh/HX5oNaB6yQEscwY+2xY7zf1AOQAdXlwstcQsfcf91ydo9bJs3/nAnh41iqmA3KkV9TfstrgriG5sc8NSoUWQywuHf7ZDZeun3Y92u01kXYPGO8rYRMrwOmuOuo1G4VKz01yCxYiTBspPgxnf7FUa45yXGKR151XIRz4IxwZBgy/9IfJW7j0hUjlY/0miYrdQDTKGXvXdhU+YZvQF9FqLDIrYhg5FTB7SlWwIxZrImc8w8pALEU2idJLMue130yPHz7GfnNs6cIoIb8v+Y5v78QUCPflrJP6GxBEej+a3Fmb2hm7pk2iK4hbMb3guNpMSIou8PIP4nd5KQrpDzuG/WOiaSZIuMfkYYifAhSdi6nam3SMto07vPYW4L1XOy4QCvmkbrMwE8A8FLNrC6IzhIPi3cURKXSE6sI/UFoo8jBYaD/961bsfjDRip/stsq5XCf+P2EhgLW9Yl95ddjtReaObOpV5Di5pMhexp0DaCjfmXZyOrZ+LA3UYcOarlSsAIEJZ85HTn7EiJl+DVPSXPmQSy8LAywMyAVuPtKwanswYNiqlYtayDAlPJI26Om2TOeZzO0lRASIyxK6zkms+YajVYJ1z2wNvnv81D1PzH5N9YbWjImivcqNOHZxF/88olXY6oHG+zBqOVTOLyFahFjD7ftMXKFncA9mnEKC/UNXEkdClNu8B63x/aUHyb4u398Eru3PAupW6gnasf404viputMyvkrGgr7AhTRVJNK4Zt5GoQ8znxJCJZ0TRrGH4XgKFIkcgYopx4fmYGc5hP4q4mqFDouvH/Q0NGjx2YpICYE5CSfG1iIV76XO6nTrZ7Fn4zfE+mkgmm7LU/yAGXu2mjeTL0K2nEyOtgcuxq5POsRRtyN3BpNFRZDG06NxTEVZPbbRnm6aEaL4dntcmYsrLu2bFw2nMywczkpyV3ld+jeItdjeLaeRMjEqxhfR21xsMg3AenilDzpPaYlBCosMK3h/MA1nCwLxGENmjHp4lFYPHJohRnMj2Bbs4ROeG7uZoVg/NTmNiagecZC3+xy7+e+hNSS1Dmdq/lSpYLwJPsgrRRutCBRY/Ie2rfToKEt5juHeg9ExyWA8QJpHOPmIwgvoTXlTjWnQoObJuvlwVlJiT3fFDhmox/tAtiy4HzzQeIXekN8mZu1Lee6qlJ0HFE5jP6FVfDZsdn1VPKe8l01YpktU107evEA8rzrdoTnpPAj+d0IRwTh0HylyKHuulw6RD1MOJxPHTY06aGf5IRjpsz+YOKLR/+UPGiTZq4fc12OXYI/rHZTEfcSQu+lkh2zi2q8NAcRBrexYG6WN9UQ7+q5bPxAOEKxtB265eA1JQVd13LIPlBEJEbNCcvBiQiAzA2wDEqR793VpC0EuCDXuCuHwYGuF23YaKqhOaapZS9xVT8aDwKpdo005BdGvyu5Bux2q23npsv3xDE++5F/ny3z57M1cbpfLJQ4YzMVFyNisvqR5rdY71Ms2mTXy/DyoS022LI21D1RMsc16qKD7oCm00M/ggQVC1X7tJDwl0oe/3iisPHUJRiI79FkGbazm9AbQQKUH2LnMPjZ6GEMLkVpQGhglE/yYwVVpsP/PRdK1Cdftg7OADzPty8G1Q5uFyvdmWmIuR5nbW9bebKvhYFCJZHm2DcWgu8tN5NG5/5lrGpqxoNqxaxPwzAocDdU0xwMajHidsg0nkMruMNd997EUOEIdHPvZZFbBG+4ZDZgaYLGRuxGF2lOYNNxMG7qZfoXV5Vw4h/G0Iy7hy6DXRnZCQWOXuGM6wGqwdG3yy085+gqnOyEclnbgsaVo7Ohz4P1u34rFRoSd+yoHs5Cy4iqCBZtu1o71jKxP+/yVbb+UGMNOOnSnrTO1Qs6MHYnQ+7yrN1AVKKwaNFFtsVKp4dW5vv0+6CJ1TmiEuVekSTR6pQ7FYjjvdAXwob0OZDFoxXY7kAFxrIuHXqgzJ0cG1DjxFJtV1JGCAU2vPtS6iYoNbpQX2GRMQx31yWVG4CO0IYJWjraUwvswrtIFbxkJMP2H8GF1AaV4gLV10ZbNsX8V1m0SwPsburH/3ECRLu3IpU6VLdP53WrtBxF4cidDtgaBin9NuQp0bP9wC3TIR0nZ2OD5yDRPw//pGAzZqIMLhvB2AbrLt4qCFvOWKDxJ39Thy9HOyqJh2DEZ/oWUr496RdSvmYqH5yn/pmYFN+gAqgB33wIsbYJxQtGfT2NsIS8yVka1031cP0azO43smM9dXbkU6HVaxOS5Y1U5PR9pjxAilePqS+PUVOIegsGpLfR4rfjXFQt72kpCTNKG+y8/XWH6Brb2THTzGEF1UrNUZfc6+jJ5fflgGAOuECRgzJwr9x0bToMdomF5vrbaLcGbX+Rqw7+ob7GQ5/E9UmFaAOOeDIGd0eX0hwLP1ZEKnkW+4LHFY5h1L51tUIZVPFnsJ1dxEeGXU7zp2SIJ8nbdcXO3WP6o9Q38Hrrw6udiFNZT9lhKujoBYgUZ/d0EDZCS0JuB/vR4u9uHKic0PBVeZpiUtjlaPJbrdHJK5J+JycwsifHqXKeMUDPOkNdPptuif8vsrXnpTgIqVEXFwYI1SCXr/0/hWhm3kz8ZVMPoPyPSehNFvD5/heLy4BCxaW60SjKfDMWiyliTQRDFsnFJZ+CguIE9tYjkwkdtv6yRQI70ltEWhYEHsX0+uZdixmo3wMPT7xjT6wL7891UFDJIFy8WtwTj5VzdN5nSgwlh+yGF9Djn9ihLSN5EebavuLDiJYNlvVOA2mMKSdeB8jvFcwyH5Q8opwQZUWrdrahdkTRK98S3HoGlyMx2u5x+YUgNxrKUJZxfbI/53aDuS2BV2LY2jtVnXQohEll0afDuVvmWNfJ8SQ2tHwX/YWuYYFKUg05ZF8yfxBdn9oezJMLorAa4wyomHtoowUL2j1ITOYZaG46V+sC6Uwf1T9VCDA3Dyugwz34e+NErKouptm99HeY22BzpTvUutUGo4/0m5Wt5CvbX1fEBeTWMb6BZ4sdP/PxJpR+vxBIFStciwLqBYIlVF/TKzKK0OR4gZp/QF4Z2GZPQUSQ4ZMQST3zhcMIsxNnzThwhDQifjvlTBhfM5bNtV6mNtPzQ9UbY5Qk6/88YFt5jJPaVhnfnaZtC9D7WlO3aNSIJ8QmNhg3J3dp6BiCjKMzBjCkXmlOcWGjTO5oQ1p2HKUubHNxQDpmmLthX8n15qLusnaQUeKSf+vFxcneT4DicqBNpECnPSfbwcIZqbDpwGLjNsRNebJwEI2xdbX+MBOPVQ303ptQHEMychPD+tbi7SCTIgJcHfAfRYAW5/AxbzelIwrk/6PC+a60CSW3OOLuOAoP5CLpeg+zRWW6CL5k9DdFDf4ve2vGu+k9V+2JagU56Ea8YCHOQ5VIzqkF3jIh6LkhCYmCyjFBGQLz4Cvu5OGI7TLC9v5/LQhshoqrEcc/JexcJzbx1i/l7In6HW5Zp1BpJvtruexwzKsbZKclmaG4HzPEGUKHgzwDDkMTFYSU2qPpncqPw6NtBp8og4n1KjyAXpfYecFU5tQVDyeUc7tMUgV0BE/WsXoheOKx7Cvo3bRuySPhSih+PBGp6FzP/S/rLxPOmZ/Lcf2F0IXXtR0Cj4gHXhigNou+PrhTgmeW1ayRnYYJ8Ps5JCP5nW5i2EAlH5SvcyAaoXIb2T3l1z7TmEEVLMRC3k5d+fqxB1AEIYZLvLMoCO2tFBh6L7u3Vyh/k0SchaqKKI9U/JVG/l4QwFqpZ8E+C/p15UVgwMwHaAFBKULWncbwNiSk0R2H46n5Ol7+2kv2yfkFvdYrf7VsKD76/6JOCQydMM2BKmL1NL91N+Yd0hmaYBrrFIxVzxkjP8VULgCRwylKpsTBdYp0nvfVeWU+vq1CXy2hhOxzWMVRmMAE9FO6Fux0fprVdrkxDgLk50mhP7Eq8kfnzpXc3ItSgAddB1JCvUdYzhnsQh+F/viDl5iub0LIeF+Kp+HyemXDTkf9OVM1DGwp3CxgNIam2Z1/UxTVC76H8cKhjeo8yOhzoVF0p46N/o2eOmhB55ZcWKvFESKuRMbV+MjcSAhWE+76v8VgxrfwoIfhg2YlwLfMTiapbfMZ5tSh5rutxOuReIAbh8Mo/IYBesQQ2SybvA2GFg7Mcfe2rC+LEIhwXkm4GZkFahH9UWw4m1VUBmty2V9GcIUwp1/vUNfBCvDA8zyM7+r6P1SHjU4DkKVa0qIqF7AEwqASIbg2gjDMuxHyZ+c1izFQLu/8Nf3WFZUNcpMy92jd+wjICK0HzTKJYUVmraEPAQ96bvuibSo9COX9jAhC0xiG6AXurIm+bExk7Bq49uzkDe2AuK8xc3/ygHsr1pqCP/W99SKv2pds52hZb+ezghamFhznJ67EZIWawes9YJ1khIX6i2/N5qTvgFjv4C7d5IQVuMJgY9On9IbwuLJXnr8Shmy7vcc57b2irRiuKmDW4Vc4SBpRwW7wgvjpeuTwvsZyQgDrWFpKvY8PgrOK9MkXdnLPg3kkgFZF7CVHsogJZa3CVoA9uS4D7RT5hm9gsdVkxMkop+//w5bg1+fm/hrGD8wSmYNzLvld6IJOZxQWhE5JPe+WNzC5zEITxZGomzdKYDHRqp+0tQF8xVyHyZPuWPSgqAE/e5jyJ5m/sBa5Vl5oyKxajcv+gKZJhPiOfMLvgX7/+I8mFVccLz4kljK0KUhIScmYQBjWpAlN8JE2yzh2KmEhiTGqNsA9D9MbsRxZ3O3v9GauT2TYcH/EQCLvqftFn05a4Asz/car34eE7UcMcYvUvn0FYiIpHWmxHXAVCxZQ7+u4XQr/ulMxjKgOOeVFBfYcYl5uBc+U/UWM2nimDDF8q3Ugyybv6lTTke31qSGAqYvZLfHCV2CGK/Z2a83Fq6QOROsSdL1pntMU2jNLt6hC3XXzzeATmGTWPxuJXikRvueMc097kOn6G0NyU0qK4HDvymMcPhlibsSiBIPnoUzv6Had7ED6A7ccKy8hzk9ZZx0BGMoZjnAlpJJGK7HC57yTzsg05tX7NRcP5r9MNN/uBF9nJzY5ggZaQIETXUhfoxCfwY/Ce6nP0iHFHdPlsCbydHefp1dgyjPzQMvI6l9OG9n3OSLh9+rKmYQMyz1pi4aHcvt8CzqYhRKlPQEP1xNchQ0IXBhrm2Mi7SER0nimnz07nF1Ki9mPGk757hCsQz+xGwOj7oz1YeCtFT7vISs/kX9zeOtcpnfUlS0roQkwz1tQU2aTsZ5A42vyFRKRE0rv1KASXsiDNZd0/jkhmcneYQxD3L0ttYjsUg2BP/clXNyVWEoTsPs17xtZb+zZ0bAo29G0CEmFlx9n7PewUJOEqzv0s/W9jP0iIBNEsQ9mWQr6Brar3wQRrfjLk6ip8HUNh+YhhSjW0eSA9NsgQE6GaPKaGe03dNQOk8Yu5O1WrNOP+/Wjn2vWTb8TMbusjEgGG7BjGM5YlchUSurpXob/EPZAaR9gbMPt4CtHKUhB87t256CPGqoYxAVNcEhglUOM/p9hEjwkKZ3dB0AOqKswNtb+Nja9vgMFFCte6dOTXDRuHlyKL6IenAIo+5JBYX15WlGhCHiiWXQpbJoFbjeie3fxjGDjRzr8us5tvKUHXQJQCVW6SlKk1uFImLIdngwkXUpv2hypJX8KRtf4uLPu3+x50HIS5g38o9wdVgPjcPxAIEB3fcyEl0IWAx1eUm1LU8h11yx+gzQ/snBaV2vt1VEvLtNtPFZVYvIDuSpsWY8bv8owdZd4wHB1lJZgAp9bBiSTGGEJMlCOuu4lQDOL/Aj3XMW8SSg5zTZblxdxayss3hIkrtoct1YVxe0itQSpG/OR+m3ZNOLr43J2gFN3MagHZwPuGBZC0kW+7nyZM7Sp7FZA/1+A08ddSL3luh/dCaPTVtk6tY1q1t9JH6dcsl77+Kh4nslE0YRA0qQQQIsqz75n7Bu05aFw+g6oYBgqAs4p0uVoWSKtTtfucPHy8gwCn8lh8jeIpk0mWS64OXXPWqyPptuCOZvJPemmP5uYB9MWLrf1QZmZMWgVZHuMmQXXobMTjGz+Dsw/eEVP+nVL8ftDDxwEDT0XpUckl0v3Qt3Np44jFKNLIcm6CIobyN0QQuouOZEmAVVXcJP6NYclNMd3zdKoVVGzFZS0GqX1Qmw+U4rlS0Knl9p2vDtP/HMWcCtnTNP9KZjRF6sJr2Vu+/4oi4f0JwvbUrHdkcED64VFA53ZxvqAKIPE1ebZjFq6SH6BXXl+CkWGqBUAe4HGh+u1QEKNPGA4ETZV4GNTOKbCP98CEmzf7Vo2nxTZ+0F34OUgMtQgrLTYcy0yZLB/Dk7nCgFO3zRLsNZUpX+KQRkSZ/aqiXJpwDRDh4aL2e40ENPHVI5nbWvuQaT44TG8WMIL60jr5WKgj921RMDAeCWipSP6LLtCHwZrTc2UiJugF/AC2WgY4L3/T0MTIK2"}
data = json.dumps(data)
return data
# 读取本地
def load_location():
with open('../files/json.txt', 'rt') as f:
text = f.read()
print(text)
print(type(text))
js = json.loads(text)
print(js['sodar_query_id'])
pass
# 写入本地
def write_location():
with open('../files/json.txt', 'rt') as f:
text = f.read()
with open('../files/json1.txt', 'w') as f1:
f1.write(text)
if __name__ == '__main__':
# print_url(url=url, header=headers)
# load_location()
write_location()
pass | 15,643 | 11,945 |
import onegov.core
import onegov.org
from tests.shared import utils
def test_view_permissions():
utils.assert_explicit_permissions(onegov.org, onegov.org.OrgApp)
def test_notfound(client):
notfound_page = client.get('/foobar', expect_errors=True)
assert "Seite nicht gefunden" in notfound_page
assert notfound_page.status_code == 404
def test_links(client):
root_url = client.get('/').pyquery('.side-navigation a').attr('href')
client.login_admin()
root_page = client.get(root_url)
new_link = root_page.click("Verknüpfung")
assert "Neue Verknüpfung" in new_link
new_link.form['title'] = 'Google'
new_link.form['url'] = 'https://www.google.ch'
link = new_link.form.submit().follow()
assert "Sie wurden nicht automatisch weitergeleitet" in link
assert 'https://www.google.ch' in link
client.get('/auth/logout')
root_page = client.get(root_url)
assert "Google" in root_page
google = root_page.click("Google", index=0)
assert google.status_code == 302
assert google.location == 'https://www.google.ch'
def test_clipboard(client):
client.login_admin()
page = client.get('/topics/organisation')
assert 'paste-link' not in page
page = page.click(
'Kopieren',
extra_environ={'HTTP_REFERER': page.request.url}
).follow()
assert 'paste-link' in page
page = page.click('Einf').form.submit().follow()
assert '/organisation/organisation' in page.request.url
def test_clipboard_separation(client):
client.login_admin()
page = client.get('/topics/organisation')
page = page.click('Kopieren')
assert 'paste-link' in client.get('/topics/organisation')
# new client (browser) -> new clipboard
client = client.spawn()
client.login_admin()
assert 'paste-link' not in client.get('/topics/organisation')
def test_gobal_tools(client):
links = client.get('/').pyquery('.globals a')
assert links == []
client.login_admin()
links = client.get('/').pyquery('.globals a')
assert links != []
def test_top_navigation(client):
links = client.get('/').pyquery('.side-navigation a span')
assert links.text() == 'Organisation Themen Kontakt Aktuelles'
def test_announcement(client):
client.login_admin()
color = '#006fbb'
bg_color = '#008263'
text = 'This is an announcement which appears on top of the page'
settings = client.get('/header-settings')
# test default not giving the color
assert settings.form['left_header_announcement_bg_color'].value == (
'#FBBC05'
)
assert settings.form['left_header_announcement_font_color'].value == (
'#000000'
)
settings.form['left_header_announcement'] = text
settings.form['left_header_announcement_bg_color'] = bg_color
settings.form['left_header_announcement_font_color'] = color
page = settings.form.submit().follow()
assert text in page
assert (
f'<div id="announcement" style="color: {color}; '
f'background-color: {bg_color};">'
) in page
| 3,075 | 1,009 |
import tkinter as tk
from tkinter import filedialog
from Solve_stages import *
from Text_stages import *
from Analysis_stages import *
from Output import *
root = tk.Tk()
root.title("Cipher program")
root.geometry("1500x500")
root.state("zoomed") #apparently windows only
def getOutputText():
text = ""
for stage in stages:
if stage.check_var.get():
if decode_var.get() == 1: #encode is selected
text = stage.encode(text)
else: #decode is selected
text = stage.decode(text)
return text
def updateOutputText():
text = getOutputText()
right_text.delete(1.0, tk.END)
right_text.insert(tk.END,text)
for stage in stages:
if stage.check_var.get():
stage.updateOutputWidget(text, right_text)
def updateStageEditor():
for child in stage_editor.winfo_children():
child.grid_forget()
stages[selected_stage.get()].display()
root.focus_set()
stage_editor = tk.Frame(root, width=10, height=10)#Size is the same as right_text, they will expand equally to fill the space
stage_editor.grid(row=0, column=0, rowspan=4, sticky="NESW")
stage_editor.grid_propagate(0) #stops the contents of the window affecting the size
stages = []
def addStage(stage):
stages.append(stage)
updateStagesFrame()
stages[len(stages)-1].button.select() #select the newly added stage
updateStageEditor()
updateOutputText()
selected_stage = tk.IntVar()
stages_frame = tk.Frame(root)
stages_frame.grid(row=0, column=1, sticky="NS", columnspan=3)
#Radiobuttons to select between encode and decode
decode_var = tk.IntVar()
decodeBox = tk.Radiobutton(root, text="Decode", variable=decode_var,value=-1,command=updateOutputText)
encodeBox = tk.Radiobutton(root, text="Encode", variable=decode_var,value=1,command=updateOutputText)
decode_var.set(-1) #set to decode as default
decodeBox.grid(row=1,column=1,columnspan=3)
encodeBox.grid(row=2,column=1,columnspan=3)
#Up, Delete, and Down buttons
def stageUp():
if len(stages) > 1 and selected_stage.get() > 1:
stages.insert(selected_stage.get()-1, stages.pop(selected_stage.get()))
selected_stage.set(selected_stage.get()-1)
updateStagesFrame()
updateOutputText()
def stageDown():
if len(stages) > 1 and selected_stage.get() < len(stages)-1 and selected_stage.get() != 0:
stages.insert(selected_stage.get()+1, stages.pop(selected_stage.get()))
selected_stage.set(selected_stage.get()+1)
updateStagesFrame()
updateOutputText()
def deleteStage():
if len(stages) > 1 and selected_stage.get() != 0:
stages.pop(selected_stage.get())
selected_stage.set(selected_stage.get()-1)
updateStagesFrame()
updateStageEditor()
updateOutputText()
stage_up_button = tk.Button(root, text = "↑",command=stageUp,takefocus=0)
stage_delete_button = tk.Button(root, text = "×",command=deleteStage,takefocus=0)
stage_down_button = tk.Button(root, text = "↓",command=stageDown,takefocus=0)
stage_up_button.grid(row=3, column=1, sticky="ESW")
stage_delete_button.grid(row=3,column=2, sticky="ESW")
stage_down_button.grid(row=3, column=3, sticky="ESW")
#Shortcuts for selecting the next and previous stage
def stageSelectUp(event):
if selected_stage.get() > 0:
selected_stage.set(selected_stage.get()-1)
updateStagesFrame()
updateStageEditor()
def stageSelectDown(event):
if selected_stage.get() < len(stages) - 1:
selected_stage.set(selected_stage.get()+1)
updateStagesFrame()
updateStageEditor()
root.bind("<Control-Tab>", stageSelectUp)
root.bind("<Control-Shift-Tab>", stageSelectDown)
root.bind("<Control-Prior>", stageSelectUp) #Control + page up
root.bind("<Control-Next>", stageSelectDown) #Control + page down
def updateStagesFrame():
for button in stages_frame.winfo_children():
button.destroy()
for stage_index in range(len(stages)):
stage = stages[stage_index]
stage.button = tk.Radiobutton(stages_frame, text=stage.name, variable = selected_stage, value = stage_index, command=updateStageEditor,
indicatoron = 0, width = 20, takefocus=0)
stage.check_var = tk.BooleanVar()
stage.check_var.set(True)
stage.checkbox = tk.Checkbutton(stages_frame, variable = stage.check_var, command=updateOutputText, takefocus=0)
if stage_index == 0: #Input cannot be disabled, so don't show the checkbox
stage.checkbox.config(state="disabled")
stage.button.grid(column=1, row=stage_index)
stage.checkbox.grid(column=0, row=stage_index)
updateStagesFrame()
right_text = tk.Text(root, takefocus=0, width=10, height=10, font=("Courier", 10))
right_text.grid(row=0, column=4, rowspan=4, sticky="NESW")
right_text.grid_propagate(0)
tk.Grid.columnconfigure(root, 0, weight=1)
tk.Grid.columnconfigure(root, 1, weight=0)
tk.Grid.columnconfigure(root, 2, weight=0)
tk.Grid.columnconfigure(root, 3, weight=0)
tk.Grid.columnconfigure(root, 4, weight=1)
tk.Grid.rowconfigure(root, 0, weight=1)
tk.Grid.rowconfigure(root, 1, weight=0)
tk.Grid.columnconfigure(stage_editor, 0, weight=1)
tk.Grid.rowconfigure(stage_editor, 0, weight=1)
#==========
def add(menu, StageClass): #Helper function to make adding stages neater
menu.add_command(label= StageClass.name,#Takes the name from the class
command=lambda:addStage(StageClass(stage_editor, #passes the stage editor frame to draw to
updateOutputText))) #and a callback for when things change and the output text needs updating
#Functions for file menu operations:
def openCom():
text = ""
try:
with filedialog.askopenfile() as file:
for line in file:
text += line
stages[0].textbox.delete(1.0, tk.END)
stages[0].textbox.insert(tk.END,text)
except AttributeError:#Catch error if the user cancels the dialog
pass
def clearCom():
global stages
stages[0].textbox.delete(1.0, tk.END)
stages = [stages[0]]
selected_stage.set(0)
updateStageEditor()
updateStagesFrame()
def saveCom():
text = getOutputText()
try:
with filedialog.asksaveasfile() as file:
file.write(text)
except AttributeError:
pass
def copyCom():
text = ""
for stage in stages:
text = stage.process(text)
root.clipboard_clear()
root.clipboard_append(text)
root.update()
menu = tk.Menu(root)
file_menu = tk.Menu(menu, tearoff=0)
file_menu.add_command(label="Open", command=openCom)
file_menu.add_command(label="Clear", command = clearCom)
file_menu.add_command(label="Save", command=saveCom)
file_menu.add_command(label="Copy output", command=copyCom)
menu.add_cascade(label="File", menu = file_menu)
ana_menu = tk.Menu(menu, tearoff=0)
add(ana_menu, Length)
add(ana_menu, PlayfairDetect)
add(ana_menu, FrequencyAnalyse)
add(ana_menu, Doubles)
add(ana_menu, Triples)
add(ana_menu, IoC)
add(ana_menu, WordFinder)
add(ana_menu, VigenereKeyword)
add(ana_menu, ColumnarKeyword)
menu.add_cascade(label="Analyse", menu=ana_menu)
text_menu = tk.Menu(menu, tearoff=0)
add(text_menu, Capitalise)
add(text_menu, Lowercase)
add(text_menu, Swapcase)
add(text_menu, Strip)
add(text_menu, RemoveSpaces)
add(text_menu, Reverse)
add(text_menu, Block)
menu.add_cascade(label="Text stage", menu=text_menu)
solve_menu = tk.Menu(menu, tearoff=0)
add(solve_menu, CaesarShift)
add(solve_menu, Substitution)
add(solve_menu, Affine)
add(solve_menu, Vigenere)
#add(solve_menu, Transposition) #this one doesn't work
add(solve_menu, RailFence)
add(solve_menu, Scytale)
add(solve_menu, Morse)
menu.add_cascade(label="Solve stage", menu=solve_menu)
#Functions for the output menu operations
def changeFontSize(change):
currentSize = int(right_text.cget("font").split(" ")[1])
right_text.config(font=("Courier", currentSize + change))
stages[0].textbox.config(font=("Courier", currentSize + change))
output_menu = tk.Menu(menu, tearoff=0)
add(output_menu, OutputHighlight)
add(output_menu, Blank)
output_menu.add_command(label="Increase font size", command=lambda:changeFontSize(1))
output_menu.add_command(label="Decrease font size", command=lambda:changeFontSize(-1))
right_text.tag_configure("highlight", foreground = "red")
menu.add_cascade(label="Output", menu=output_menu)
root.config(menu=menu)
addStage(Input(stage_editor, updateOutputText))
root.mainloop()
| 8,530 | 2,844 |
from mp.data.pytorch.pytorch_dataset import PytorchDataset
from mp.data.datasets.dataset import Instance
import copy
import torch
class DomainPredictionDatasetWrapper(PytorchDataset):
r"""Wraps a PytorchDataset to reuse its instances.x and replacing the labels"""
def __init__(self, pytorch_ds, target_idx):
"""
Args:
pytorch_ds (PytorchSegmentationDataset): the Dataset that need to be wrapped
target_idx (int): the target idx for domain prediction, corresponding to this dataset
"""
class Dummy:
def __init__(self):
self.instances = pytorch_ds.instances
self.hold_out_ixs = []
self.original_ds = pytorch_ds
# Ugly
# noinspection PyTypeChecker
super().__init__(dataset=Dummy(), size=pytorch_ds.size)
# Copy the predictor, but prevent it from reshaping the prediction
self.predictor = copy.copy(pytorch_ds.predictor)
self.predictor.reshape_pred = False
# Create new target as one hot encoded
# self.target = torch.zeros((1, target_cnt), dtype=self.instances[0].y.tensor.dtype)
# self.target[:, target_idx] = 1
self.target = torch.tensor([target_idx], dtype=self.instances[0].y.tensor.dtype)
# Modify instances
self.instances = [Instance(inst.x, self.target, inst.name, inst.class_ix, inst.group_id)
for inst in self.instances]
def get_subject_dataloader(self, subject_ix):
r"""Get a list of input/target pairs equivalent to those if the dataset
was only of subject with index subject_ix. For evaluation purposes.
"""
# Generate the original subject dataloader and replace the target
subject_dataloader = self.original_ds.get_subject_dataloader(subject_ix)
return [(x, self.target) for x, _ in subject_dataloader]
| 1,912 | 569 |
class Dog:
def speak(self):
print("Woof!")
def __init__(self, name):
self.name = name
def hear(self, words):
if self.name in words:
self.speak()
class Husky(Dog):
origin = "Siberia"
def speak(self):
print("Awoo!")
class Chihuahua(Dog):
origin = "Mexico"
def speak(self):
print("Yip!")
class Labrador(Dog):
origin = "Canada"
| 420 | 160 |
import pathlib
import pandas as pd
from palmnet.visualization.utils import get_palminized_model_and_df, get_df
import matplotlib.pyplot as plt
import numpy as np
import logging
import plotly.graph_objects as go
import plotly.express as px
from pprint import pprint as pprint
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.ERROR)
dataset = {
"Cifar10": "--cifar10",
"Cifar100": "--cifar100",
"SVHN": "--svhn",
"MNIST": "--mnist"
}
basemodels = {
"Cifar100": ["--cifar100-vgg19", "--cifar100-resnet20", "--cifar100-resnet50"],
"Cifar10": ["--cifar10-vgg19"],
"SVHN": ["--svhn-vgg19"],
"MNIST": ["--mnist-lenet"]
}
def show_for_tucker():
# compression_method = ["tucker", "tensortrain"]
# df = df.apply(pd.to_numeric, errors='coerce')
dct_config_lr = dict()
lst_name_trace_low = list()
for dataname in dataset:
df_data = df[df[dataset[dataname]] == 1]
for base_model_name in basemodels[dataname]:
df_model = df_data[df_data[base_model_name] == 1]
for index, row in df_model.iterrows():
fig = go.Figure()
csv_file = pathlib.Path(row["results_dir"]) / row["output_file_csvcbprinter"]
df_csv = pd.read_csv(csv_file)
win_size = 5
lr_values = df_csv["lr"].values
lr_values_log = np.log10(lr_values)
lr_rolling_mean = pd.Series(lr_values_log).rolling(window=win_size).mean().iloc[win_size - 1:].values
loss_rolling_mean = df_csv["loss"].rolling(window=win_size).mean().iloc[win_size - 1:].values
if all(np.isnan(loss_rolling_mean)):
continue
delta_loss = (np.hstack([loss_rolling_mean, [0]]) - np.hstack([[0], loss_rolling_mean]))[1:-1]
delta_loss_rolling_mean = pd.Series(delta_loss).rolling(window=win_size).mean().iloc[win_size - 1:].values
lr_rolling_mean_2x = pd.Series(lr_rolling_mean).rolling(window=win_size).mean().iloc[win_size - 1:].values
lr_rolling_mean_2x_exp = 10 ** lr_rolling_mean_2x
# fig.add_trace(go.Scatter(x=lr_rolling_mean_exp, y=loss_rolling_mean, name="sp_fac {} - hiearchical {}".format(row["--sparsity-factor"], row["--hierarchical"])))
fig.add_trace(go.Scatter(x=lr_rolling_mean_2x_exp[:-1], y=delta_loss_rolling_mean, name=""))
argmin_loss = np.argmin(delta_loss_rolling_mean)
val = lr_rolling_mean_2x_exp[:-1][argmin_loss]
log_val = np.log10(val)
approx = 10 ** np.around(log_val, decimals=0)
sparsity = int(row["--sparsity-factor"])
hierarchical = bool(row["--hierarchical"])
str_hierarchical = " H" if hierarchical else ""
try:
nb_fac = int(row["--nb-factor"])
except ValueError:
nb_fac = None
name_trace = f"tucker_sparse_facto-{dataset[dataname]}-{base_model_name}-Q={nb_fac}-K={sparsity}{str_hierarchical}"
print(len(delta_loss_rolling_mean), name_trace)
if len(delta_loss_rolling_mean) < 10:
lst_name_trace_low.append(name_trace)
continue
dct_config_lr[name_trace] = approx
# title_str = "{}:{} - {} - keep first :{}".format(dataname, base_model_name, "tucker", keep_first)
fig.update_layout(barmode='group',
title=name_trace,
xaxis_title="lr",
yaxis_title="loss",
xaxis_type="log",
xaxis={'type': 'category'},
)
# fig.show()
pprint(dct_config_lr)
pprint(lst_name_trace_low)
if __name__ == "__main__":
root_source_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/results/")
expe_path = "2020/04/0_0_compression_tucker_sparse_facto_select_lr"
expe_path_errors = "2020/04/0_0_compression_tucker_sparse_facto_select_lr_errors"
src_results_dir = root_source_dir / expe_path
src_results_dir_errors = root_source_dir / expe_path_errors
get_df_and_assign = lambda x: get_df(x).assign(results_dir=str(x))
df = get_df_and_assign(src_results_dir)
df_errors = get_df_and_assign(src_results_dir_errors)
df = pd.concat([df, df_errors])
df = df.dropna(subset=["failure"])
df = df[df["failure"] == 0]
df = df.drop(columns="oar_id").drop_duplicates()
root_output_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/reports/figures/")
output_dir = root_output_dir / expe_path / "line_plots"
output_dir.mkdir(parents=True, exist_ok=True)
show_for_tucker() | 4,889 | 1,737 |
# -*- coding: utf-8 -*-
import unittest
import cmath
import numpy as np
from scipy import integrate
from .. import polarization
from ...utils import instance
from ...patch import jsonpickle
class test_polarization(unittest.TestCase):
def _equal_params(self, params1, params2):
for k, v in params1.items():
if instance.isstring(v):
self.assertEqual(v, params2[k])
else:
np.testing.assert_allclose(v, params2[k])
def _gen_jones(self, n=20):
x = np.random.uniform(low=-10, high=10, size=4 * n).reshape((n, 4))
for xi in x:
yield polarization.Jones(xi[0] + xi[1] * 1j, xi[2] + xi[3] * 1j)
def _gen_stokes(self, n=20):
x = np.random.uniform(low=-10, high=10, size=3 * n).reshape((n, 3))
for xi in x:
S0 = np.sqrt(sum(xi[1:] ** 2)) * np.random.uniform(low=1, high=1.5)
yield polarization.Stokes(S0, *xi)
def test_convert_representation(self):
def f1(x, attr):
return getattr(x, attr)
def f2(x, attr):
return getattr(x, attr) % 360
attrs = {
"coherency_matrix": f1,
"dop": f1,
"dolp": f1,
"docp": f1,
"hdolp": f1,
"polangle": f2,
}
for J1 in self._gen_jones():
S1 = J1.to_stokes()
J2 = S1.to_jones()
S2 = J2.to_stokes()
J3 = S2.to_jones()
self._equal_params(J2.to_params(), J3.to_params())
self._equal_params(S1.to_params(), S2.to_params())
self.assertEqual(J1.dop, 1)
for attr, f in attrs.items():
a = f(J1, attr)
np.testing.assert_allclose(a, f(S1, attr))
np.testing.assert_allclose(a, f(J2, attr))
np.testing.assert_allclose(a, f(S2, attr))
np.testing.assert_allclose(a, f(J3, attr))
np.testing.assert_allclose(J1.norm, J2.norm)
np.testing.assert_allclose(
J1.phase_difference % 360, J2.phase_difference % 360
)
np.testing.assert_allclose(J2.to_numpy(), J3.to_numpy())
np.testing.assert_allclose(S1.to_numpy(), S2.to_numpy())
np.testing.assert_allclose(S1.to_numpy(), S2.to_numpy())
def test_stokes(self):
for S in self._gen_stokes():
tmp = S.decompose()
Spol, Sunpol = tmp["pol"], tmp["unpol"]
np.testing.assert_allclose(
S.intensity, S.intensity_polarized + S.intensity_unpolarized
)
np.testing.assert_allclose(S.intensity_polarized, Spol.intensity)
np.testing.assert_allclose(S.intensity_unpolarized, Sunpol.intensity)
np.testing.assert_allclose(S.dop, S.intensity_polarized / S.intensity)
np.testing.assert_allclose(
S.coherency_matrix, Spol.coherency_matrix + Sunpol.coherency_matrix
)
J = S.to_jones(allowloss=True)
np.testing.assert_allclose(J.intensity, Spol.intensity)
S2 = polarization.Stokes.from_params(**S.to_params())
np.testing.assert_allclose(S.to_numpy(), S2.to_numpy())
def test_jones(self):
for J in self._gen_jones():
np.testing.assert_allclose(
J.to_numpy(), J.to_stokes().to_jones(phase0=J.phase0).to_numpy()
)
np.testing.assert_allclose(J.coherency_matrix.trace(), J.norm ** 2)
J2 = polarization.Jones.from_params(**J.to_params())
np.testing.assert_allclose(J.to_numpy(), J2.to_numpy())
J.plot_efield(animate=True)
def test_intensity(self):
for J in self._gen_jones():
S = J.to_stokes()
Jparams = J.to_params()
Sparams = S.to_params()
IJ, IS = np.random.uniform(low=1, high=10, size=2)
J.intensity = IJ
S.intensity = IS
Jparams["intensity"] = IJ
Sparams["intensity"] = IS
self._equal_params(J.to_params(), Jparams)
self._equal_params(S.to_params(), Sparams)
for S in self._gen_stokes():
Sparams = S.to_params()
IS = np.random.uniform(low=1, high=10)
S.intensity = IS
Sparams["intensity"] = IS
self._equal_params(S.to_params(), Sparams)
def test_rotate(self):
for J1 in self._gen_jones():
S1 = J1.to_stokes()
azimuth = np.random.uniform(low=0, high=2 * np.pi) # change-of-frame
J2 = J1.rotate(azimuth)
S2 = S1.rotate(azimuth)
self._equal_params(S2.to_params(), J2.to_stokes().to_params())
R = polarization.JonesMatrixRotation(-azimuth)
Ri = polarization.JonesMatrixRotation(azimuth)
np.testing.assert_allclose(
R.dot(J1.coherency_matrix).dot(Ri), J2.coherency_matrix
)
np.testing.assert_allclose(
R.dot(S1.coherency_matrix).dot(Ri), S2.coherency_matrix
)
def test_thomson(self):
for J1 in self._gen_jones():
S1 = J1.to_stokes()
azimuth = np.random.uniform(low=0, high=2 * np.pi)
polar = np.random.uniform(low=0, high=np.pi)
J2 = J1.thomson_scattering(azimuth, polar)
S2 = S1.thomson_scattering(azimuth, polar)
self._equal_params(S2.to_params(), J2.to_stokes().to_params())
angle = polarization.ThomsonRotationAngle(azimuth) # change-of-frame
R = polarization.JonesMatrixRotation(-angle)
Ri = polarization.JonesMatrixRotation(angle)
Mth = polarization.JonesMatrixThomson(polar)
Mthi = Mth
np.testing.assert_allclose(
Mth.dot(R).dot(J1.coherency_matrix).dot(Ri).dot(Mthi),
J2.coherency_matrix,
)
np.testing.assert_allclose(
Mth.dot(R).dot(S1.coherency_matrix).dot(Ri).dot(Mthi),
S2.coherency_matrix,
)
np.testing.assert_allclose(
S2.intensity, S1.thomson_intensity(azimuth, polar)
)
def integrand(azimuth, polar):
return S1.thomson_intensity(
np.degrees(azimuth), np.degrees(polar)
) * np.sin(polar)
thomsonsc = (
integrate.dblquad(
integrand, 0, np.pi, lambda x: 0, lambda x: 2 * np.pi
)[0]
/ S1.intensity
)
np.testing.assert_allclose(thomsonsc, 8 * np.pi / 3)
def test_compton(self):
for S1 in self._gen_stokes():
azimuth = np.random.uniform(low=0, high=2 * np.pi)
polar = np.random.uniform(low=0, high=np.pi)
energy = np.random.uniform(low=5.0, high=20.0)
S2 = S1.compton_scattering(azimuth, polar, energy)
np.testing.assert_allclose(
S2.intensity, S1.compton_intensity(azimuth, polar, energy)
)
def test_serialize(self):
g1 = next(iter(self._gen_jones()))
g2 = jsonpickle.loads(jsonpickle.dumps(g1))
self.assertEqual(g1, g2)
g1 = next(iter(self._gen_stokes()))
g2 = jsonpickle.loads(jsonpickle.dumps(g1))
self.assertEqual(g1, g2)
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.TestSuite()
testSuite.addTest(test_polarization("test_jones"))
testSuite.addTest(test_polarization("test_stokes"))
testSuite.addTest(test_polarization("test_convert_representation"))
testSuite.addTest(test_polarization("test_intensity"))
testSuite.addTest(test_polarization("test_rotate"))
testSuite.addTest(test_polarization("test_thomson"))
testSuite.addTest(test_polarization("test_compton"))
testSuite.addTest(test_polarization("test_serialize"))
return testSuite
if __name__ == "__main__":
import sys
mysuite = test_suite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
| 8,209 | 2,877 |
#---------------------------------------
#Since : 2019/04/24
#Update: 2019/07/25
# -*- coding: utf-8 -*-
#---------------------------------------
import numpy as np
class RingBuffer:
def __init__(self, buf_size):
self.size = buf_size
self.buf = []
for i in range(self.size):
self.buf.append([])
self.start = 0
self.end = 0
def add(self, el):
self.buf[self.end] = el
self.end = (self.end + 1) % self.size
if self.end == self.start:
self.start = (self.start + 1) % self.size
def Get_buffer(self):
array = []
for i in range(self.size):
buf_num = (self.end - i) % self.size
array.append(self.buf[buf_num])
return array
def Get_buffer_start_end(self):
array = []
for i in range(self.size):
buf_num = (self.start + i) % self.size
if self.buf[buf_num] == []:
return array
array.append(self.buf[buf_num])
return array
def get(self):
val = self.buf[self.start]
self.start =(self.start + 1) % self.size
return val
| 1,166 | 385 |
"""
______ _ _ _____ _ _ _
| ____| | | (_) | __ \ | | /\ | | (_)
| |__ __ _ ___| |_ __ _ _ __ _ | |__) |___ ___| |_ / \ __| |_ __ ___ _ _ __
| __/ _` / __| __/ _` | '_ \| | | _ // _ \/ __| __| / /\ \ / _` | '_ ` _ \| | '_ \
| | | (_| \__ \ || (_| | |_) | | | | \ \ __/ (__| |_ / ____ \ (_| | | | | | | | | | |
|_| \__,_|___/\__\__,_| .__/|_| |_| \_\___|\___|\__| /_/ \_\__,_|_| |_| |_|_|_| |_|
| |
|_|
"""
from .config import router
from .auth import admin_login_view
from .core import ReactAppAdmin, ReactTortoiseModelAdmin
from .commands import create_super_user, compile_app_admin, compile_model_admin
__version__ = "0.0.1"
| 811 | 328 |
import itertools
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from scipy.stats import f
from scipy.stats import norm
class ANOVA:
"""Analyse DOE experiments using ANOVA. NB: n > 1 for the code to work, where n is the number of repeats.
Model: y = y_average
i.e. all factors have no effect on the response. Hence the sum of squares is a measure of how much the factor effects the response.
Replace with linear model??"""
def __init__(self, data):
#Initialise variables and define simple statistical values
self.data = data
self.num_factors = len(self.data.columns) - 1
self.factors = list(self.data.columns[:-1])
self.sum_y = data.iloc[:,-1].sum()
self.unique_dict = self.unique_values_dict(self.data)
self.levels = {}
#Determine all interactions between factors
sources_of_variation = []
for interaction_level in range(self.num_factors):
combos = itertools.combinations(self.factors, interaction_level + 1)
for combo in combos:
sources_of_variation.append(self.make_interaction_name(combo))
sources_of_variation.append('Error')
sources_of_variation.append('Total')
#Create ANOVA table
self.table = pd.DataFrame(columns =['Sum of Squares', 'Degrees of Freedom', 'Mean Square', 'F0', 'P-Value'], index=sources_of_variation)
#Needed for functions later, even though the data ends up in the table.
#Code is designed like this because initally more dictionaries were used instead of pandas dataframe.
self.sum_of_squares = [{}]*self.num_factors
#Determine number of repeats. Must be the same for all measurements.
total = 1
for factor in self.factors:
level = len(self.unique_dict[factor])
self.levels[factor] = level
total = total*level
self.n = len(self.data)/total
self.total = len(self.data)
#Most of the complicated equations are contained within this loop/function
for interaction_level in range(self.num_factors):
self.calculate_interactions(interaction_level + 1)
#Create the table from component parts
#Sum of squares
self.table['Sum of Squares'] = pd.DataFrame(self.sum_of_squares).max()
self.table.loc['Total', 'Sum of Squares'] = (data.iloc[:,-1]**2).sum() - (self.sum_y**2)/(self.total)
prefactor = self.make_prefactor(self.factors)
final_subtotal = (1/(prefactor*self.n)) * (self.data.groupby(self.factors).sum().iloc[:,-1]**2).sum() - (self.sum_y**2)/self.total
self.table.loc['Error', 'Sum of Squares']= self.table.loc['Total', 'Sum of Squares'] - final_subtotal
#Degrees of freedom
self.table.loc['Total', 'Degrees of Freedom'] = self.total - 1
self.table.loc['Error', 'Degrees of Freedom'] = (self.total/self.n) * (self.n - 1)
#Mean square
self.table['Mean Square'] = self.table['Sum of Squares']/self.table['Degrees of Freedom']
#F0
self.table['F0'] = self.table['Mean Square']/self.table.loc['Error', 'Mean Square']
#P-value
self.f_function = f(self.n, self.total/self.n)
self.table['P-Value'] = self.f_function.sf(list(self.table['F0']))
#Remove values which have no meaning. Only calculated in the first place because it was simpler to code.
self.table.iloc[-2:, -2:] = np.NaN
self.table.iloc[-1, -3] = np.NaN
self.table.iloc[:, :-1] = self.table.iloc[:, :-1].astype(float)
#F0 for statistical significance P<0.05
self.calculate_F0_significance_level()
#Residuals for model y = average_y
self.calculate_residuals()
def calculate_interactions(self, interaction_level):
"""Calculates sum of squares and degrees of freedom for a specified interaction level and saves them in the self.table dataframe.
interaction_level = 1 ---> Main factors
interaction_level = 2 ---> 2-factor interactions
interaction_level = 3 ---> 3-factor interactions
..."""
combinations = itertools.combinations(self.factors, interaction_level)
subtotals = {}
effects = {}
for combo in combinations:
interaction_factors = list(combo)
interaction = self.make_interaction_name(interaction_factors)
prefactor = self.make_prefactor(interaction_factors)
self.table.loc[interaction, 'Degrees of Freedom'] = self.calculate_degrees_of_freedom(interaction_factors)
subtotals[interaction] = (1/(prefactor*self.n)) * (self.data.groupby(interaction_factors).sum().iloc[:,-1]**2).sum() - (self.sum_y**2)/self.total
effects[interaction] = subtotals[interaction]
for level in range(interaction_level - 1) :
factor_combos = itertools.combinations(combo, level + 1)
for factor_combo in factor_combos:
name = self.make_interaction_name(factor_combo)
effects[interaction] += -self.sum_of_squares[level][name]
self.sum_of_squares[interaction_level - 1] = effects
def calculate_degrees_of_freedom(self, interaction_factors):
dof = 1
for factor in interaction_factors:
dof = (self.levels[factor] - 1) * dof
return dof
def unique_values_dict(self, df):
unique_dict = {}
for column in df.columns:
unique_dict[column] = df[column].unique()
return unique_dict
def make_prefactor(self, interaction_factors):
#Determine prefactor. Multiply all factor levels together which aren't the main factor
prefactor = 1
for factor in self.factors:
if factor not in interaction_factors:
prefactor = prefactor * self.levels[factor]
return prefactor
def make_interaction_name(self, interaction_factors):
interaction = ''
for factor in interaction_factors:
interaction = interaction + ':' + factor
interaction = interaction[1:]
return interaction
def calculate_F0_significance_level(self, sig=0.05):
self.significance = self.f_function.isf(sig)
def calculate_residuals(self):
self.sigma = np.sqrt(self.table.loc['Error', 'Mean Square'])
tmp_data = self.data.set_index(self.factors)
self.residuals = (tmp_data - tmp_data.groupby(self.factors).mean()).iloc[:, -1].values/self.sigma
def plot_residuals(self):
"""Makes a normal probability plot of residuals"""
residuals = sorted(self.residuals)
df = pd.DataFrame(columns=['Residuals'], data=residuals)
df['Position'] = df.index + 1
df['f'] = (df.Position - 0.375)/(len(df) + 0.25)
df['z'] = norm.ppf(df.f)
plt.figure()
sns.regplot(x='Residuals', y='z', data=df)
plt.show()
def plot_normal(self):
"""Makes a normal probability plot of the response"""
tmp_data = self.data.iloc[:, -1].values
tmp_data.sort()
df = pd.DataFrame(columns=['Response'], data=tmp_data)
df['Position'] = df.index + 1
df['f'] = (df.Position - 0.375)/(len(df) + 0.25)
df['z'] = norm.ppf(df.f)
plt.figure()
sns.regplot(x='Response', y='z', data=df)
plt.show()
def plot_pareto_chart(self):
ANOVA_table = self.table.sort_values(by='F0')
plt.figure()
plt.barh(ANOVA_table.index, ANOVA_table['F0'])
plt.xlabel('F0')
plt.ylabel('Term')
plt.axvline(x = self.significance, linestyle='--')
three_data = pd.read_csv('test_data.csv')
three = ANOVA(three_data)
#Doesn't work for n < 2
five_data = pd.read_csv('example_data.csv')
five_data.drop(columns=['order'], inplace=True)
five = ANOVA(five_data)
| 8,202 | 2,555 |
import os
from datetime import datetime
from datmo.core.util.json_store import JSONStore
from datmo.core.util.misc_functions import prettify_datetime, printable_object, format_table
class Snapshot():
"""Snapshot is an entity object to represent a version of the model. These snapshots
are the building blocks upon which models can be shared and reproduced.
Snapshots consist of 5 main components which are represented as well in the attributes
listed below
1) Source code
2) Dependency environment
3) Large files not included in source code
4) Configurations of your model, features, data, etc
5) Performance metrics that evaluate your model
Note
----
All attributes of the class in the ``Attributes`` section must be serializable by the DB
Parameters
----------
dictionary : dict
id : str, optional
the id of the entity
(default is None; storage driver has not assigned an id yet)
model_id : str
the parent model id for the entity
message : str
long description of snapshot
code_id : str
code reference associated with the snapshot
environment_id : str
id for environment used to create snapshot
file_collection_id : str
file collection associated with the snapshot
config : dict
key, value pairs of configurations
stats : dict
key, value pairs of metrics and statistics
task_id : str, optional
task id associated with snapshot
(default is None, means no task_id set)
label : str, optional
short description of snapshot
(default is None, means no label set)
visible : bool, optional
True if visible to user via list command else False
(default is True to show users unless otherwise specified)
created_at : datetime.datetime, optional
(default is datetime.utcnow(), at time of instantiation)
updated_at : datetime.datetime, optional
(default is same as created_at, at time of instantiation)
Attributes
----------
id : str or None
the id of the entity
model_id : str
the parent model id for the entity
message : str
long description of snapshot
code_id : str
code reference associated with the snapshot
environment_id : str
id for environment used to create snapshot
file_collection_id : str
file collection associated with the snapshot
config : dict
key, value pairs of configurations
stats : dict
key, value pairs of metrics and statistics
task_id : str or None
task id associated with snapshot
label : str or None
short description of snapshot
visible : bool
True if visible to user via list command else False
created_at : datetime.datetime
updated_at : datetime.datetime
"""
def __init__(self, dictionary):
self.id = dictionary.get('id', None)
self.model_id = dictionary['model_id']
self.message = dictionary['message']
self.code_id = dictionary['code_id']
self.environment_id = dictionary['environment_id']
self.file_collection_id = dictionary['file_collection_id']
self.config = dictionary['config']
self.stats = dictionary['stats']
self.task_id = dictionary.get('task_id', None)
self.label = dictionary.get('label', None)
self.visible = dictionary.get('visible', True)
self.created_at = dictionary.get('created_at', datetime.utcnow())
self.updated_at = dictionary.get('updated_at', self.created_at)
def __eq__(self, other):
return self.id == other.id if other else False
def __str__(self):
if self.label:
final_str = '\033[94m' + "snapshot " + self.id + '\033[0m'
final_str = final_str + '\033[94m' + " (" + '\033[0m'
final_str = final_str + '\033[93m' + '\033[1m' + "label: " + self.label + '\033[0m'
final_str = final_str + '\033[94m' + ")" + '\033[0m' + os.linesep
else:
final_str = '\033[94m' + "snapshot " + self.id + '\033[0m' + os.linesep
final_str = final_str + "Date: " + prettify_datetime(
self.created_at) + os.linesep
table_data = []
if self.task_id:
table_data.append(["Task", "-> " + self.task_id])
table_data.append(["Visible", "-> " + str(self.visible)])
# Components
table_data.append(["Code", "-> " + self.code_id])
table_data.append(["Environment", "-> " + self.environment_id])
table_data.append(["Files", "-> " + self.file_collection_id])
table_data.append(["Config", "-> " + str(self.config)])
table_data.append(["Stats", "-> " + str(self.stats)])
final_str = final_str + format_table(table_data)
final_str = final_str + os.linesep + " " + self.message + os.linesep + os.linesep
return final_str
def __repr__(self):
return self.__str__()
def save_config(self, filepath):
JSONStore(os.path.join(filepath, 'config.json'), self.config)
return
def save_stats(self, filepath):
JSONStore(os.path.join(filepath, 'stats.json'), self.stats)
return
def to_dictionary(self, stringify=False):
attr_dict = self.__dict__
pruned_attr_dict = {
attr: val
for attr, val in attr_dict.items()
if not callable(getattr(self, attr)) and not attr.startswith("__")
}
if stringify:
for key in ["config", "stats", "message", "label"]:
pruned_attr_dict[key] = printable_object(pruned_attr_dict[key])
for key in ["created_at", "updated_at"]:
pruned_attr_dict[key] = prettify_datetime(
pruned_attr_dict[key])
return pruned_attr_dict
| 6,010 | 1,677 |
'''
@Author: dengzaiyong
@Date: 2021-08-21 15:16:08
@LastEditTime: 2021-08-27 19:37:08
@LastEditors: dengzaiyong
@Desciption: 训练tfidf, word2vec, fasttext语言模型
@FilePath: /JDQA/ranking/train_LM.py
'''
import os
from collections import defaultdict
from gensim import models, corpora
import config
import pandas as pd
import jieba
from utils.tools import create_logger
logger = create_logger(config.root_path + '/logs/train_LM.log')
class Trainer(object):
def __init__(self):
self.data = self.data_reader(config.rank_train_file) + \
self.data_reader(config.rank_test_file) + \
self.data_reader(config.rank_dev_file)
self.stopwords = open(config.stopwords_path).readlines()
self.preprocessor()
self.train()
self.saver()
def data_reader(self, path):
"""
读取数据集,返回question1和question2所有的句子
"""
sentences = []
df = pd.read_csv(path, sep='\t', encoding='utf-8')
question1 = df['question1'].values
question2 = df['question2'].values
sentences.extend(list(question1))
sentences.extend(list(question2))
return sentences
def preprocessor(self):
"""
分词,并生成计算tfidf需要的数据
"""
logger.info('loading data...')
# 对所有句子进行分词
self.data = [[word for word in jieba.cut(sentence)] for sentence in self.data]
# 计算每个词出现的次数
self.freq = defaultdict(int)
for sentence in self.data:
for word in sentence:
self.freq[word] += 1
# 过滤出现次数小于1的词
self.data = [[word for word in sentence if self.freq[word] > 1] \
for sentence in self.data]
logger.info('building dictionary...')
# 构建词典
self.dictionary = corpora.Dictionary(self.data)
# 保存词典
self.dictionary.save(config.temp_path + '/model/ranking/ranking.dict')
# 构建语料库
self.corpus = [self.dictionary.doc2bow(text) for text in self.data]
# 语料库序列化保存
corpora.MmCorpus.serialize(config.temp_path + '/model/ranking/ranking.mm', self.corpus)
def train(self):
logger.info('train tfidf model...')
self.tfidf = models.TfidfModel(self.corpus, normalize=True)
logger.info('train word2vec model...')
self.w2v = models.Word2Vec(self.data,
vector_size=config.embed_dim,
window=2,
min_count=2,
sample=6e-5,
min_alpha=0.0007,
alpha=0.03,
workers=4,
negative=15,
epochs=10)
self.w2v.build_vocab(self.data)
self.w2v.train(self.data,
total_examples=self.w2v.corpus_count,
epochs=15,
report_delay=1)
logger.info('train fasttext model...')
self.fast = models.FastText(self.data,
vector_size=config.embed_dim,
window=3,
min_count=1,
epochs=10,
min_n=3,
max_n=6,
word_ngrams=1)
def saver(self):
logger.info(' save tfidf model ...')
self.tfidf.save(os.path.join(config.temp_path, 'model/ranking/tfidf.model'))
logger.info(' save word2vec model ...')
self.w2v.save(os.path.join(config.temp_path, 'model/ranking/w2v.model'))
logger.info(' save fasttext model ...')
self.fast.save(os.path.join(config.temp_path, 'model/ranking/fast.model'))
if __name__ == "__main__":
Trainer() | 3,895 | 1,286 |
import uuid
import datetime
import pymysql
from tool.Config import Config
from tool.Logger import Logger
class ImageDAO(object):
def __init__(self, connect_pool):
self.connect_pool = connect_pool
async def userImageExist(self, user_id: str):
selectResult = None
async with self.connect_pool.acquire() as conn:
async with conn.cursor() as cursor:
try:
await cursor.execute("SELECT user_id FROM image WHERE user_id = %s", [user_id, ])
selectResult = await cursor.fetchone()
Logger.getInstance().info('execute sql to determine exist of image by user_id [%s]' % user_id)
except Exception as e:
Logger.getInstance().exception(e)
return selectResult is not None
async def getUserImage(self, user_id: str):
selectResult = None
async with self.connect_pool.acquire() as conn:
async with conn.cursor() as cursor:
try:
await cursor.execute(
"SELECT id, file_name, user_id, url, upload_date FROM image WHERE user_id = %s",
[user_id, ])
Logger.getInstance().info('execute sql to get info of image by user_id[%s]' % user_id)
selectResult = await cursor.fetchone()
except Exception as e:
Logger.getInstance().exception(e)
if selectResult is not None:
return {
'id': selectResult[0],
'file_name': selectResult[1],
'user_id': selectResult[2],
'url': selectResult[3],
'upload_date': selectResult[4].strftime("%Y-%m-%d")
}
else:
return None
async def updateUserImage(self, file_name: str, url: str, user_id: str):
affectRowNum = 0
async with self.connect_pool.acquire() as conn:
async with conn.cursor() as cursor:
try:
affectRowNum = await cursor.execute(
"UPDATE image SET file_name = %s, url = %s, upload_date = %s where user_id = %s",
[file_name,
url,
datetime.datetime.now().strftime("%Y-%m-%d"),
user_id, ])
Logger.getInstance().info('execute sql for updating image info by user_id[%s]' % user_id)
await conn.commit()
except Exception as e:
Logger.getInstance().exception(e)
if affectRowNum:
return True
else:
return False
async def deleteUserImage(self, user_id: str):
affectRowNum = 0
async with self.connect_pool.acquire() as conn:
async with conn.cursor() as cursor:
try:
affectRowNum = await cursor.execute(
"DELETE FROM image WHERE user_id = %s",
[user_id, ]
)
Logger.getInstance().info('execute sql for deleting image info by user_id[%s]' % user_id)
await conn.commit()
except Exception as e:
Logger.getInstance().exception(e)
if affectRowNum:
return True
else:
return False
async def createUserImage(self, file_name: str, url: str, user_id: str):
table = 'image'
data = {
'id': str(uuid.uuid1()),
'file_name': file_name,
'url': url,
'user_id': user_id,
'upload_date': datetime.datetime.now().strftime("%Y-%m-%d"),
}
keys = ', '.join(data.keys())
values = ', '.join(['%s'] * len(data))
insert_sql = "INSERT INTO {table} ({keys}) VALUES ({values})".format(table=table, keys=keys, values=values)
affectRowNum = 0
async with self.connect_pool.acquire() as conn:
async with conn.cursor() as cursor:
try:
affectRowNum = await cursor.execute(insert_sql, tuple(data.values()))
await conn.commit()
Logger.getInstance().info(
'execute sql for inserting a image, affectRowNum[{}], insert sql[{}], values[{}]'.format(
affectRowNum, insert_sql, tuple(data.values())))
except Exception as e:
Logger.getInstance().exception(e)
if affectRowNum:
return True, data
else:
return False, data
| 4,680 | 1,209 |
import pytest
from sqlalchemy.exc import IntegrityError
from app.dao.inbound_shortnumbers_dao import (
dao_get_inbound_shortnumbers,
dao_get_inbound_shortnumber_for_service,
dao_get_available_inbound_shortnumbers,
dao_set_inbound_shortnumber_to_service,
dao_set_inbound_shortnumber_active_flag,
dao_allocate_shortnumber_for_service,
dao_add_inbound_shortnumber)
from app.models import InboundShortNumber
from tests.app.db import create_service, create_inbound_shortnumber
def test_get_inbound_shortnumbers(notify_db, notify_db_session, sample_inbound_shortnumbers):
res = dao_get_inbound_shortnumbers()
assert len(res) == len(sample_inbound_shortnumbers)
assert res == sample_inbound_shortnumbers
def test_get_available_inbound_shortnumbers(notify_db, notify_db_session):
inbound_shortnumber = create_inbound_shortnumber(shortnumber='1')
res = dao_get_available_inbound_shortnumbers()
assert len(res) == 1
assert res[0] == inbound_shortnumber
def test_set_service_id_on_inbound_shortnumber(notify_db, notify_db_session, sample_inbound_shortnumbers):
service = create_service(service_name='test service')
numbers = dao_get_available_inbound_shortnumbers()
dao_set_inbound_shortnumber_to_service(service.id, numbers[0])
res = InboundShortNumber.query.filter(InboundShortNumber.service_id == service.id).all()
assert len(res) == 1
assert res[0].service_id == service.id
def test_after_setting_service_id_that_inbound_shortnumber_is_unavailable(
notify_db, notify_db_session, sample_inbound_shortnumbers):
service = create_service(service_name='test service')
shortnumbers = dao_get_available_inbound_shortnumbers()
assert len(shortnumbers) == 1
dao_set_inbound_shortnumber_to_service(service.id, shortnumbers[0])
res = dao_get_available_inbound_shortnumbers()
assert len(res) == 0
def test_setting_a_service_twice_will_raise_an_error(notify_db, notify_db_session):
create_inbound_shortnumber(shortnumber='1')
create_inbound_shortnumber(shortnumber='2')
service = create_service(service_name='test service')
shortnumbers = dao_get_available_inbound_shortnumbers()
dao_set_inbound_shortnumber_to_service(service.id, shortnumbers[0])
with pytest.raises(IntegrityError) as e:
dao_set_inbound_shortnumber_to_service(service.id, shortnumbers[1])
assert 'duplicate key value violates unique constraint' in str(e.value)
@pytest.mark.parametrize("active", [True, False])
def test_set_inbound_shortnumber_active_flag(notify_db, notify_db_session, sample_service, active):
inbound_shortnumber = create_inbound_shortnumber(shortnumber='1')
dao_set_inbound_shortnumber_to_service(sample_service.id, inbound_shortnumber)
dao_set_inbound_shortnumber_active_flag(sample_service.id, active=active)
inbound_shortnumber = dao_get_inbound_shortnumber_for_service(sample_service.id)
assert inbound_shortnumber.active is active
def test_dao_allocate_shortnumber_for_service(notify_db_session):
shortnumber = '078945612'
inbound_shortnumber = create_inbound_shortnumber(shortnumber=shortnumber)
service = create_service()
updated_inbound_shortnumber = dao_allocate_shortnumber_for_service(service_id=service.id,
inbound_shortnumber_id=inbound_shortnumber.id)
assert service.get_inbound_shortnumber() == shortnumber
assert updated_inbound_shortnumber.service_id == service.id
def test_dao_allocate_shortnumber_for_service_raises_if_inbound_shortnumber_already_taken(notify_db_session, sample_service):
shortnumber = '078945612'
inbound_shortnumber = create_inbound_shortnumber(shortnumber=shortnumber, service_id=sample_service.id)
service = create_service(service_name="Service needs an inbound shortnumber")
with pytest.raises(Exception) as exc:
dao_allocate_shortnumber_for_service(service_id=service.id, inbound_shortnumber_id=inbound_shortnumber.id)
assert 'is not available' in str(exc.value)
def test_dao_allocate_shortnumber_for_service_raises_if_invalid_inbound_shortnumber(notify_db_session, fake_uuid):
service = create_service(service_name="Service needs an inbound shortnumber")
with pytest.raises(Exception) as exc:
dao_allocate_shortnumber_for_service(service_id=service.id, inbound_shortnumber_id=fake_uuid)
assert 'is not available' in str(exc.value)
def test_dao_add_inbound_shortnumber(notify_db_session):
inbound_shortnumber = '12345678901'
dao_add_inbound_shortnumber(inbound_shortnumber)
res = dao_get_available_inbound_shortnumbers()
assert len(res) == 1
assert res[0].short_number == inbound_shortnumber
| 4,773 | 1,577 |
#!/bin/python3
# -*- coding: utf-8 -*-
# file name: profiletool.py
# standart libraries
from time import sleep
from time import process_time_ns as timer_ns
# to call the respective routines
import subprocess as ps
# local imports
import pyfactorial as pyf
import mathfactorial as mtf
def _vector():
return range(2, 501, 2)
def _mod_asm(num):
ps.run(["./asmmodifier.sh", num])
sleep(0.01)
def user_defined_fac(n):
return pyf.iterative_factorial(n)
def mathlib_defined_fac(n):
return mtf.factorial(n)
def vm_defined_fac(n):
ps.run(["./vm_code/hack_machine/CPUEmulator.sh",
"./vm_code/test/Factorial.tst",
"2&>1 >/dev/null"],
capture_output=True,
text=True)
def test_user_factorial():
results = open("./results/vector_nxt_user.txt", "w")
results.seek(0,2)
totalTime = 0
for num in _vector():
start = timer_ns()
fac = user_defined_fac(int(num))
end = timer_ns()
dt = end - start
totalTime += dt
results.write(f"{num} {dt}\n")
print(f"factorial of {num} took {dt} nanoseconds")
sleep(0.02)
print(f"Total time elapsed: {totalTime} nanoseconds")
results.close()
def test_math_factorial():
results = open("./results/vector_nxt_mathlib.txt", "w")
results.seek(0,2)
totalTime = 0
for num in _vector():
start = timer_ns()
fac = mathlib_defined_fac(int(num))
end = timer_ns()
dt = end - start
totalTime += dt
results.write(f"{num} {dt}\n")
print(f"factorial of {num} took {dt} nanoseconds")
sleep(0.02)
print(f"Total time elapsed: {totalTime} nanoseconds")
results.close()
def test_vm_factorial():
results = open("./results/vector_nxt_vm.txt", "w")
results.seek(0,2)
totalTime = 0
for num in _vector():
_mod_asm(str(num)) # modify asm file
start = timer_ns()
vm_defined_fac(int(num))
end = timer_ns()
dt = end - start
totalTime += dt
results.write(f"{num} {dt}\n")
print(f"factorial of {num} took {dt} nanoseconds")
sleep(0.02)
print(f"Total time elapsed: {totalTime} nanoseconds")
results.close()
if __name__ == "__main__":
test_user_factorial()
test_math_factorial()
test_vm_factorial()
| 2,363 | 860 |
# Author:Sunny Liu
from django.shortcuts import HttpResponse
from django.shortcuts import render
from django.shortcuts import redirect
from urmovie import models
from django.views.decorators.csrf import csrf_exempt
import hashlib,os
"""
内容简介:
1.爬虫情况下,对电影封面的添加
"""
@csrf_exempt
def uploadImg(request):
if request.method == 'POST':
print(type(request.FILES.get('img')))
new_img = models.Image(
image_file=request.FILES.get('img'),
image_name = "hahaha.jpg",
)
new_img.save()
return render(request, 'uploadimg.html')
@csrf_exempt
def showImg(request):
imgs = models.Image.objects.all()
content = {
'imgs':imgs,
}
return render(request, 'showimg.html', content) | 760 | 270 |
import torch
import torch.nn as nn
from torch.cuda.amp import custom_fwd
class BatchReNorm2D(nn.Module):
"""Batch Re-Normalization
Parameters
num_features – C from an expected input of size (N, C, H, W)
eps – a value added to the denominator for numerical stability. Default: 1e-5
momentum – the value used for the running_mean and running_var computation. Can be set to None for cumulative moving average (i.e. simple average). Default: 0.1
affine – a boolean value that when set to True, this module has learnable affine parameters. Default: True
r_max - a hyper parameter. The paper used rmax = 1 for the first 5000 training steps, after which these were gradually relaxed to reach rmax=3 at 40k steps.
d_max - a hyper parameter. The paper used dmax = 0 for the first 5000 training steps, after which these were gradually relaxed to reach dmax=5 at 25k steps.
Shape:
Input: (N, C, H, W)
Output: (N, C, H, W) (same shape as input)
Examples:
>>> m = BatchReNorm2d(100)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
"""
def __init__(self, num_features, r_max=1, d_max=0, eps=1e-3, momentum=0.01, affine=True):
super(BatchReNorm2D, self).__init__()
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones((1, num_features, 1, 1)))
self.bias = nn.Parameter(torch.zeros((1, num_features, 1, 1)))
self.register_buffer('running_var', torch.ones(1, num_features, 1, 1))
self.register_buffer('running_mean', torch.zeros(1, num_features, 1, 1))
self.r_max, self.d_max = r_max, d_max
self.eps, self.momentum = eps, momentum
def update_stats(self, input):
batch_mean = input.mean((0, 2, 3), keepdim=True)
batch_var = input.var((0, 2, 3), keepdim=True)
batch_std = (batch_var + self.eps).sqrt()
running_std = (self.running_var + self.eps).sqrt()
r = torch.clamp(batch_std / running_std, min=1 / self.r_max, max=self.r_max).detach()
d = torch.clamp((batch_mean - self.running_mean) / running_std, min=-self.d_max, max=self.d_max).detach()
self.running_mean.lerp_(batch_mean, self.momentum)
self.running_var.lerp_(batch_var, self.momentum)
return batch_mean, batch_std, r, d
@custom_fwd(cast_inputs=torch.float32)
def forward(self, input):
if self.training:
with torch.no_grad():
mean, std, r, d = self.update_stats(input)
input = (input - mean) / std * r + d
else:
mean, std = self.running_mean, self.running_var
input = (input - mean) / (self.running_var + self.eps).sqrt()
if self.affine:
return self.weight * input + self.bias
return input
if __name__ == '__main__':
m = BatchReNorm2D(100)
input = torch.randn(20, 100, 35, 45)
output = m(input)
| 2,983 | 1,051 |
import enum
__all__ = ["TokenType", "Token", "lookup_ident"]
class TokenType(enum.Enum):
"""The enumeration for different types of tokens."""
ILLEGAL = "ILLEGAL"
EOF = "EOF"
# Identifiers and literals
IDENT = "IDENT"
INT = "INT"
STRING = "STRING"
# Operators
ASSIGN = "="
PLUS = "+"
MINUS = "-"
BANG = "!"
ASTERISK = "*"
SLASH = "/"
MODULO = "%" # Additional
LT = "<"
GT = ">"
EQ = "=="
NOT_EQ = "!="
# Delimiters
COMMA = ","
SEMICOLON = ";"
COLON = ":"
DOT = "." # Additional
LPAREN = "("
RPAREN = ")"
LBRACE = "{"
RBRACE = "}"
LBRACKET = "["
RBRACKET = "]"
# Keywords
FUNCTION = "FUNCTION"
LET = "LET"
TRUE = "TRUE"
FALSE = "FALSE"
IF = "IF"
ELSE = "ELSE"
RETURN = "RETURN"
CONST = "CONST"
WHILE = "WHILE"
class Token:
"""Represents a token."""
def __init__(self, tp: TokenType, literal: str) -> None:
self.tp = tp
self.literal = literal
def __repr__(self) -> str:
return f"<Token type: {self.tp} literal: {self.literal}>"
def __str__(self) -> str:
return f"<Token type: {self.tp} literal: {self.literal}>"
KEYWORDS = {
"fn": TokenType.FUNCTION,
"let": TokenType.LET,
"true": TokenType.TRUE,
"false": TokenType.FALSE,
"if": TokenType.IF,
"else": TokenType.ELSE,
"return": TokenType.RETURN,
"const": TokenType.CONST,
"while": TokenType.WHILE,
}
def lookup_ident(ident: str) -> TokenType:
"""Fetch correct token type for an identifier."""
return KEYWORDS.get(ident, TokenType.IDENT)
| 1,657 | 652 |
from collections import deque as LL
class VM_Manager:
def __init__(self):
self.s_size = 9
self.p_size = 9
self.w_size = 9
self.PM = [None] * 2**19 # PM[524288]
self.D = [[None] * 2**10] * 2**9 # D[1024][512]
self.free_frames = LL([i for i in range(2**10)])
self.occupied_frames = [0,1]
def get_free_frame(self):
while True:
frame = self.free_frames.popleft()
if frame not in self.occupied_frames:
return frame
def create_ST(self, s, z, f):
if f >= 0:
self.occupied_frames.append(f)
self.PM[2 * s] = z
PT_idx = 2 * s + 1
self.PM[PT_idx] = f
def create_PT(self, s, p, f):
PT = self.PM[2 * s + 1]
if PT < 0:
self.D[-PT][p] = f
else:
self.occupied_frames.append(f)
self.PM[PT * 512 + p] = f
def translate_VA(self, VA):
s = VA >> (self.p_size + self.w_size)
p = (VA >> self.w_size) & 2 ** self.p_size - 1
w = VA & 2 ** self.w_size - 1
pw = VA & 2 ** (self.p_size + self.w_size) - 1
return s, p, w, pw
def PA(self, s, p, w, pw):
if pw >= self.PM[2 * s]:
return -1
PT = self.PM[2 * s + 1]
if PT < 0:
f1 = self.get_free_frame()
self.PM[f1 * 512 + p] = self.D[-PT][p]
PT = f1
pg = self.PM[PT * 512 + p]
if pg < 0:
f2 = self.get_free_frame()
pg = f2
return pg * 512 + w
def line_input(string):
nested = []
lis = []
for idx, i in enumerate(string.split(), start=1):
lis.append(int(i))
if idx % 3 == 0:
nested.append(lis)
lis = []
return nested
if __name__ == "__main__":
manager_no_dp = VM_Manager()
manager_dp = VM_Manager()
init_dp = open('init-dp.txt','r')
input_dp = open('input-dp.txt', 'r')
init_no_dp = open('init-no-dp.txt','r')
input_no_dp = open('input-no-dp.txt', 'r')
STs_dp = line_input(init_dp.readline())
for ST in STs_dp:
manager_dp.create_ST(*ST)
STs_no_dp = line_input(init_no_dp.readline())
for ST in STs_no_dp:
manager_no_dp.create_ST(*ST)
PTs_dp = line_input(init_dp.readline())
for PT in PTs_dp:
manager_dp.create_PT(*PT)
PTs_no_dp = line_input(init_no_dp.readline())
for PT in PTs_no_dp:
manager_no_dp.create_PT(*PT)
VAs_dp = list(map(int, input_dp.readline().split()))
VAs_no_dp = list(map(int, input_no_dp.readline().split()))
PAs_dp = []
for idx, address in enumerate(VAs_dp, start=1):
spw_pw = manager_dp.translate_VA(address)
PA = manager_dp.PA(*spw_pw)
PAs_dp.append(PA)
PAs_no_dp = []
for idx, address in enumerate(VAs_no_dp, start=1):
spw_pw = manager_no_dp.translate_VA(address)
PA = manager_no_dp.PA(*spw_pw)
PAs_no_dp.append(PA)
print(*PAs_no_dp)
print(*PAs_dp)
with open('output.txt','w') as out:
out.write(' '.join(map(str,PAs_no_dp)) + '\n')
out.write(' '.join(map(str,PAs_dp))) | 3,191 | 1,293 |
import os
from flask import session
from src.utils.common.common_helper import load_project_encdoing, load_project_model, load_project_pca, \
load_project_scaler, read_config
from loguru import logger
from from_root import from_root
from src.utils.databases.mysql_helper import MySqlHelper
from src.preprocessing.preprocessing_helper import Preprocessing
from src.feature_engineering.feature_engineering_helper import FeatureEngineering
import pandas as pd
import numpy as np
config_args = read_config("./config.yaml")
log_path = os.path.join(from_root(), config_args['logs']['logger'], config_args['logs']['generallogs_file'])
logger.add(sink=log_path, format="[{time:YYYY-MM-DD HH:mm:ss.SSS} - {level} - {module} ] - {message}", level="INFO")
mysql = MySqlHelper.get_connection_obj()
"""[Function to make prediction]
"""
def make_prediction(df):
try:
logger.info(f"Started Prediction!!1")
if df is None:
logger.info(f"DataFrame is null")
raise Exception("Data Frame is None")
else:
query_ = f"""Select Name, Input,Output,ActionDate from tblProject_Actions_Reports
Join tblProjectActions on tblProject_Actions_Reports.ProjectActionId=tblProjectActions.Id
where ProjectId={session['pid']}"""
action_performed = mysql.fetch_all(query_)
print(action_performed)
feature_columns = [col for col in df.columns if col != session['target_column']]
df = df.loc[:, feature_columns]
df_org = df
if len(action_performed) > 0:
for action in action_performed:
if action[0] == 'Delete Column':
df = Preprocessing.delete_col(df, action[1].split(","))
elif action[0] == 'Change Data Type':
df = FeatureEngineering.change_data_type(df, action[1], action[2])
elif action[0] == 'Column Name Change':
df = FeatureEngineering.change_column_name(df, action[1], action[2])
elif action[0] == 'Encdoing':
cat_data = Preprocessing.col_seperator(df, 'Categorical_columns')
num_data = Preprocessing.col_seperator(df, 'Numerical_columns')
encoder = load_project_encdoing()
# columns=action[1].split(",")
# df_=df.loc[:,columns]
df_ = encoder.transform(cat_data)
df = pd.concat([df_, num_data], axis=1)
elif action[0] == 'Scalling':
scalar = load_project_scaler()
columns = df.columns
df = scalar.transform(df)
df = pd.DataFrame(df, columns=columns)
elif action[0] == 'PCA':
pca = load_project_pca()
columns = df.columns
df_ = pca.transform(df)
df_ = df_[:, :int(action[1])]
df = pd.DataFrame(df_, columns=[f"Col_{col + 1}" for col in np.arange(0, df_.shape[1])])
elif action[0] == 'Custom Script':
if action[1] is not None:
exec(action[1])
model = load_project_model()
result = model.predict(df)
df_org.insert(loc=0, column=session['target_column'], value=result)
return df_org
else:
pass
return df
except Exception as e:
logger.info('Error in Prediction ' + str(e))
raise Exception(e)
| 3,750 | 1,026 |
"""Views for admin app."""
import random
import os
import requests
from django.shortcuts import render
from django.contrib.auth.models import User
from django.contrib.auth.decorators import user_passes_test
from django.http import HttpResponseRedirect, JsonResponse
from django.urls import reverse
from django.core.exceptions import ObjectDoesNotExist
from .models import Settings
@user_passes_test(lambda u: u.is_superuser)
def users(request):
"""User management page of administrative app."""
try:
temporary_password = request.session['temp_password']
del request.session['temp_password']
except KeyError:
temporary_password = ''
user_list = User.objects.all()
context = {
'users': user_list,
'temporary_password': temporary_password,
}
return render(request, 'admin/users.html', context)
@user_passes_test(lambda u: u.is_superuser)
def delete_user(request):
"""View to handle the deletion of users."""
user = User.objects.get(pk=int(request.POST['user']))
if (not user.is_superuser or
request.user.profile.server_owner and user != request.user):
user.delete()
return HttpResponseRedirect(reverse('admin:users'))
@user_passes_test(lambda u: u.is_superuser)
def create_user(request):
"""View to handle the creation of user."""
password = ''.join(random.choice('0123456789ABCDEF') for i in range(8))
user = User.objects.create_user(
username=request.POST['username'],
first_name=request.POST['first_name'],
last_name=request.POST['last_name'],
email=request.POST['email'],
password=password,
)
user.clean()
user.save()
request.session['temp_password'] = password
return HttpResponseRedirect(reverse('admin:users'))
@user_passes_test(lambda u: u.is_superuser)
def reset_collabodev(_request):
"""View to facilitate the complete reset of CollaboDev."""
settings = Settings.objects.get(pk=1)
settings.settings_initialised = False
os.system('python manage.py flush --noinput')
return HttpResponseRedirect(reverse('admin:reset_page'))
def reset_page(request):
"""Page displaying reset message post reset."""
try:
Settings.objects.get(pk=1)
context = {
'derail': True
}
except ObjectDoesNotExist:
context = {}
return render(request, 'admin/reset_page.html', context)
@user_passes_test(lambda u: u.is_superuser)
def github(request):
"""
Github Integration settings page.
Provides administrators with the ability to associate a GitHub
Organisation with CollaboDev and import all of its repositories
"""
session_data = dict(request.session)
request.session.pop('invalid_org_name', None)
request.session.pop('valid_org_name', None)
settings = Settings.objects.get(pk=1)
session_data['current_org'] = settings.github_org_name
if request.method == 'POST':
org_name = request.POST['org_name']
org_api_url = 'https://api.github.com/orgs/' + org_name
org_data = requests.get(org_api_url).json()
try:
if org_data['login'] == org_name:
settings.github_org_name = org_name
settings.save()
request.session['valid_org_name'] = True
else:
raise KeyError
except KeyError:
request.session['invalid_org_name'] = True
return HttpResponseRedirect(reverse('admin:github'))
return render(request, 'admin/github.html', session_data)
@user_passes_test(lambda u: u.is_superuser)
def update(_request):
"""Facilitates the updating of CollaboDev to its latest settings."""
update_response = ''
# os.popen('git pull https://github.com/dob9601/CollaboDev.git').read()
if update_response.startswith('Updating'):
response = 1
elif update_response == 'Already up to date.\n':
response = 2
elif update_response == '':
response = -1
payload = {
'success': True,
'response': response
}
return JsonResponse(payload)
def first_time_setup(request):
"""First time setup for when CollaboDev is first started up."""
settings = Settings.objects.get(pk=1)
context = {}
if request.method == 'POST':
if 'setup-key' in request.POST:
if request.POST['setup-key'] == settings.settings_setup_code:
context['stage'] = 1
else:
context = {}
admin_pwd = request.POST['admin-password']
admin_pwd_conf = request.POST['admin-password-conf']
if admin_pwd == admin_pwd_conf:
admin_user = User.objects.create_user(
username=request.POST['admin-username'],
first_name=request.POST['admin-first-name'],
last_name=request.POST['admin-last-name'],
email=request.POST['admin-email'],
password=admin_pwd,
is_superuser=True,
)
admin_user.profile.server_owner = True
admin_user.save()
else:
context['stage'] = 1
# Raise password error
if context == {}:
context['stage'] = 2
settings.settings_initialised = True
settings.save()
else:
settings_model = Settings.objects.get(pk=1)
print('COLLABODEV SETUP CODE: '+settings_model.settings_setup_code)
context['stage'] = 0
try:
open("setup-key.txt", "r")
if settings.settings_setup_code == "":
raise FileNotFoundError
except FileNotFoundError:
key = ''.join(random.choice('0123456789ABCDEF') for i in range(16))
key_string = "CollaboDev Setup Code: " + key
with open("setup-key.txt", "w") as key_file:
key_file.write(key_string)
settings.settings_setup_code = key
settings.save()
return render(request, 'admin/first-time-setup.html', context)
| 6,343 | 1,933 |
from spaceone.api.repository.v1 import schema_pb2, schema_pb2_grpc
from spaceone.core.pygrpc import BaseAPI
class Schema(BaseAPI, schema_pb2_grpc.SchemaServicer):
pb2 = schema_pb2
pb2_grpc = schema_pb2_grpc
def create(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('SchemaService', metadata) as schema_svc:
schema_data = schema_svc.create(params)
return self.locator.get_info('SchemaInfo', schema_data)
def update(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('SchemaService', metadata) as schema_svc:
schema_data = schema_svc.update(params)
return self.locator.get_info('SchemaInfo', schema_data)
def delete(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('SchemaService', metadata) as schema_svc:
schema_svc.delete(params)
return self.locator.get_info('EmptyInfo')
def get(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('SchemaService', metadata) as schema_svc:
schema_data = schema_svc.get(params)
return self.locator.get_info('SchemaInfo', schema_data)
def list(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('SchemaService', metadata) as schema_svc:
schemas_data, total_count = schema_svc.list(params)
return self.locator.get_info('SchemasInfo', schemas_data, total_count, minimal=self.get_minimal(params))
def stat(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('SchemaService', metadata) as schema_svc:
return self.locator.get_info('StatisticsInfo', schema_svc.stat(params)) | 2,035 | 603 |
from computer_communication_framework.base_connection import Connection
import subprocess
import re
import datetime
class BasePbs(Connection):
"""
This is meant to be a template to create a connection object for a standard PBS/TORQUE cluster. This inherits from the base_connect.Connection class in base_connection.py. It will not define ALL of the abstract classes specified in base_connection.Connection and so you will not be able to create an instance of it. One should create a class that inherits this class and add all the neccessary methods to statisfy the base_connection.Connection abstract methods.
This is meant to contain the BASIC commands that can be used by programs to control the remote computer (that aren't already included in base_connection.Connection). This is atomistic level commands that form the basis of more complex and specific programs.
Abstract methods that are left out are:
- checkDiskUsage
"""
def __init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email, base_output_path = '/base/output/path', base_runfiles_path = '/base/run/file/path', master_dir = '/master/dir', info_about_cluster = 'Example Cluster Name (ECN): Advanced Computing Research Centre, somewhere.', activate_virtual_environment_list = ['module add python-anaconda-4.2-3.5', 'source activate virtual_environment_name']):
Connection.__init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email)
self.submit_command = 'qsub'
self.information_about_cluster = info_about_cluster
self.base_output_path = base_output_path
self.base_runfiles_path = base_runfiles_path
self.master_dir = master_dir
self.activate_venv_list = activate_virtual_environment_list
# INSTANCE METHODS
def checkQueue(self, job_number):
"""
This function must exist to satisfy the abstract class that it inherits from. In this case it takes a job number and returns a list of all the array numbers of that job still running.
Args:
job_number (int): PBS assigns a unique integer number to each job. Remeber that a job can actually be an array of jobs.
Returns:
output_dict (dict): Has keys 'return_code', 'stdout', and 'stderr'.
"""
# -t flag shows all array jobs related to one job number, if that job is an array.
grep_part_of_cmd = "qstat -tu " + self.user_name + " | grep \'" + str(job_number) + "\' | awk \'{print $1}\' | awk -F \"[][]\" \'{print $2}\'"
output_dict = self.checkSuccess(self.sendCommand([grep_part_of_cmd])) # Remember that all commands should be passed through the "checkSuccess" function that is inherited from the Connection class.
return output_dict
# STUFF FOR THE BCS CHILD CLASS!!!
# no_of_unique_jobs (int): Total amount of jobs to run.
# no_of_repetitions_of_each_job (int): Total amount of repetitions of each job.
# master_dir (str): The directory on the remote computer that you want the submission script to start in.
def createPbsSubmissionScriptTemplate(self, pbs_job_name, no_of_nodes, no_of_cores, walltime, queue_name, job_number, outfile_name_and_path, errorfile_name_and_path, initial_message_in_code = None, shebang = "#!/bin/bash"):
"""
This creates a template for a submission script for the cluster however it does not contain any code for specific jobs (basically just the PBS commands and other bits that might be useful for debugging). It puts it all into a list where list[0] will be line number one of the file and list[2] will be line number two of the file etc and returns that list.
Args:
pbs_job_name (str): The name given to the queuing system.
no_of_nodes (int): The number of nodes that the user would like to request.
no_of_cores (int): The number of cores that the user would like to request.
walltime (str): The maximum amount of time the job is allowed to take. Has the form 'HH:MM:SS'.
queue_name (str): PBS/Torque clusters have a choice of queues and this variable specifies which one to use.
outfile_name_and_path (str): Absolute path and file name of where you want the outfiles of each job array stored.
errorfile_name_and_path (str): Absolute path and file name of where you want to store the errorfiles of each job array stored.
initial_message_in_code (str): The first comment in the code normally says a little something about where this script came from. NOTE: You do not need to include a '#' to indicat it is a comment.
initial_message_in_code == None (str): Should the user wish to put a meaasge near the top of the script (maybe explanation or something) then they can add it here as a string. If it's value is None (the default value) then the line is omitted.
Returns:
list_of_pbs_commands (list of strings): Each string represents the line of a submission file and the list as a whole is the beginning of a PBS submission script.
"""
# add the first part of the template to the list
list_of_pbs_commands = [shebang + "\n", "\n", "# This script was created using Oliver Chalkley's computer_communication_framework library - https://github.com/Oliver-Chalkley/computer_communication_framework." + "\n", "# "]
# Only want to put the users initial message if she has one
if initial_message_in_code is not None:
list_of_pbs_commands += [initial_message_in_code + "\n"]
# add the next part of the template
list_of_pbs_commands = ["# Title: " + pbs_job_name + "\n", "# User: " + self.forename_of_user + ", " + self.surename_of_user + ", " + self.user_email + "\n"]
# Only want to put affiliation if there is one
if type(self.affiliation) is not None:
list_of_pbs_commands += ["# Affiliation: " + self.affiliation + "\n"]
# add the next part of the template to the list
list_of_pbs_commands += ["# Last Updated: " + str(datetime.datetime.now()) + "\n", "\n", "## Job name" + "\n", "#PBS -N " + pbs_job_name + "\n", "\n", "## Resource request" + "\n", "#PBS -l nodes=" + str(no_of_nodes) + ":ppn=" + str(no_of_cores) + ",walltime=" + walltime + "\n", "#PBS -q " + queue_name + "\n", "\n", "## Job array request" + "\n", "#PBS -t " + job_array_numbers + "\n", "\n", "## designate output and error files" + "\n", "#PBS -e " + outfile_name_and_path + "\n", "#PBS -o " + errorfile_name_and_path + "\n", "\n", "# print some details about the job" + "\n", 'echo "The Array ID is: ${PBS_ARRAYID}"' + "\n", 'echo Running on host `hostname`' + "\n", 'echo Time is `date`' + "\n", 'echo Directory is `pwd`' + "\n", 'echo PBS job ID is ${PBS_JOBID}' + "\n", 'echo This job runs on the following nodes:' + "\n", 'echo `cat $PBS_NODEFILE | uniq`' + "\n", "\n"]
return list_of_pbs_commands
def createStandardSubmissionScript(self, file_name_and_path, list_of_job_specific_code, pbs_job_name, no_of_nodes, no_of_cores, queue_name, outfile_name_and_path, errorfile_name_and_path, walltime, initial_message_in_code = None, file_permissions = "700", shebang = "#!/bin/bash"):
"""
This creates a PBS submission script based on the resources you request and the job specific code that you supply. It then writes this code to a file that you specify.
Args:
file_name_and_path (str): Absolute path plus filename that you wish to save the PBS submission script to e.g. /path/to/file/pbs_submission_script.sh.
list_of_job_specific_code (list of strings): Each element of the list contains a string of one line of code. Note: This code is appended to the end of the submission script.
pbs_job_name (str): The name given to this job.
no_of_nodes (int): The number of nodes that the user would like to request.
no_of_cores (int): The number of cores that the user would like to request.
queue_name (str): PBS/Torque clusters have a choice of queues and this variable specifies which one to use.
outfile_name_and_path (str): Absolute path and file name of where you want the outfiles of each job array stored.
errorfile_name_and_path (str): Absolute path and file name of where you want to store the errorfiles of each job array stored.
walltime (str): The maximum amount of time the job is allowed to take. Has the form 'HH:MM:SS'.
initial_message_in_code == None (str): Should the user wish to put a meaasge near the top of the script (maybe explanation or something) then they can add it here as a string. If it's value is None (the default value) then the line is omitted.
file_permissions = "700" (str): The file permissions that the user would like the PBS submission script to have. If it is None then it will not attempt to change the settings. The default setting, 700, makes it read, write and executable only to the user. NOTE: For the submission script to work one needs to make it executable.
shebang = "#!/bin/bash" (str): The shebang line tells the operating system what interpreter to use when executing this script. The default interpreter is BASH which is normally found in /bin/bash.
"""
# Create the PBS template
pbs_script_list = self.createPbsSubmissionScriptCommands(initial_message_in_code, pbs_job_name, no_of_nodes, no_of_cores, walltime, queue_name, job_number, outfile_name_and_path, errorfile_name_and_path, shebang = "#!/bin/bash")
# Add the code that is specific to this job
pbs_script_list += list_of_job_specific_code
# write the code to a file
Connection.createLocalFile(file_name_and_path, pbs_script_list, file_permisions = "700")
# change the permissions if neccessary
if file_permissions != None:
subprocess.check_call(["chmod", str(file_permissions), str(output_filename)])
return
# DELETE THIS ONCE EVERYTHING HAS BEEN DONE
# def createStandardSubmissionScript(self, output_filename, pbs_job_name, queue_name, no_of_unique_jobs, no_of_repetitions_of_each_job, master_dir, outfile_name_and_path, errorfile_name_and_path, walltime, initial_message_in_code, list_of_job_specific_code):
# """
# This acts as a template for a submission script for the cluster however it does not contain any code for specific jobs. This code is pass to the function through the list_of_job_specific_code variable.
#
# The format for a submission in this case will be an array of jobs. Here we want to be able to specify a number of unique jobs and then the amount of times we wish to repeat each unique job. This will then split all the jobs across arrays and CPUs on the cluster depending on how many are given. Each unique job has a name and some settings, this is stored on the cluster in 2 files job_names_file and job_settings_file, respectively.
#
# Args:
# output_filename (str): The name of the submission script.
# pbs_job_name (str): The name given to the queuing system.
# queue_name (str): This cluster has a choice of queues and this variable specifies which one to use.
# no_of_unique_jobs (int): Total amount of jobs to run.
# no_of_repetitions_of_each_job (int): Total amount of repetitions of each job.
# master_dir (str): The directory on the remote computer that you want the submission script to start in.
# outfile_name_and_path (str): Absolute path and file name of where you want the outfiles of each job array stored.
# errorfile_name_and_path (str): Absolute path and file name of where you want to store the errorfiles of each job array stored.
# walltime (str): The maximum amount of time the job is allowed to take. Has the form 'HH:MM:SS'.
# initial_message_in_code (str): The first comment in the code normally says a little something about where this script came from. NOTE: You do not need to include a '#' to indicat it is a comment.
# list_of_job_specific_code (list of strings): Each element of the list contains a string of one line of code.
#
# Returns:
# output_dict (dict): Contains details of how it spread the jobs across arrays and CPUs. Has keys, 'no_of_arrays', 'no_of_unique_jobs_per_array_job', 'no_of_repetitions_of_each_job', 'no_of_sims_per_array_job', and 'list_of_rep_dir_names'.
# """
#
# # set job array numbers to None so that we can check stuff has worked later
# job_array_numbers = None
# # The maximum job array size on the cluster.
# max_job_array_size = 500
# # initialise output dict
# output_dict = {}
# # test that a reasonable amount of jobs has been submitted (This is not a hard and fast rule but there has to be a max and my intuition suggestss that it will start to get complicated around this level i.e. queueing and harddisk space etc)
# total_sims = no_of_unique_jobs * no_of_repetitions_of_each_job
# if total_sims > 20000:
# raise ValueError('Total amount of simulations for one batch submission must be less than 20,000, here total_sims=',total_sims)
#
# output_dict['total_sims'] = total_sims
# # spread simulations across array jobs
# if no_of_unique_jobs <= max_job_array_size:
# no_of_unique_jobs_per_array_job = 1
# no_of_arrays = no_of_unique_jobs
# job_array_numbers = '1-' + str(no_of_unique_jobs)
# else:
# # job_array_size * no_of_unique_jobs_per_array_job = no_of_unique_jobs so all the factors of no_of_unique_jobs is
# common_factors = [x for x in range(1, no_of_unique_jobs+1) if no_of_unique_jobs % x == 0]
# # make the job_array_size as large as possible such that it is less than max_job_array_size
# factor_idx = len(common_factors) - 1
# while factor_idx >= 0:
# if common_factors[factor_idx] < max_job_array_size:
# job_array_numbers = '1-' + str(common_factors[factor_idx])
# no_of_arrays = common_factors[factor_idx]
# no_of_unique_jobs_per_array_job = common_factors[(len(common_factors)-1) - factor_idx]
# factor_idx = -1
# else:
# factor_idx -= 1
#
# # raise error if no suitable factors found!
# if job_array_numbers is None:
# raise ValueError('job_array_numbers should have been assigned by now! This suggests that it wasn\'t possible for my algorithm to split the KOs across the job array properly. Here no_of_unique_jobs=', no_of_unique_jobs, ' and the common factors of this number are:', common_factors)
#
# output_dict['no_of_arrays'] = no_of_arrays
# output_dict['no_of_unique_jobs_per_array_job'] = no_of_unique_jobs_per_array_job
# output_dict['no_of_repetitions_of_each_job'] = no_of_repetitions_of_each_job
# # calculate the amount of cores per array job - NOTE: for simplification we only use cores and not nodes (this is generally the fastest way to get through the queue anyway)
# no_of_cores = no_of_repetitions_of_each_job * no_of_unique_jobs_per_array_job
# output_dict['no_of_sims_per_array_job'] = no_of_cores
# output_dict['list_of_rep_dir_names'] = list(range(1, no_of_repetitions_of_each_job + 1))
# no_of_nodes = 1
# # write the script to file
# with open(output_filename, mode='wt', encoding='utf-8') as myfile:
# myfile.write("#!/bin/bash" + "\n")
# myfile.write("\n")
# myfile.write("# This script was created using Oliver Chalkley's computer_communication_framework library - https://github.com/OliCUoB/computer_communication_framework." + "\n")
# myfile.write("# " + initial_message_in_code + "\n")
# myfile.write("# Title: " + pbs_job_name + "\n")
# myfile.write("# User: " + self.forename_of_user + ", " + self.surename_of_user + ", " + self.user_email + "\n")
# if type(self.affiliation) is not None:
# myfile.write("# Affiliation: " + self.affiliation + "\n")
# myfile.write("# Last Updated: " + str(datetime.datetime.now()) + "\n")
# myfile.write("\n")
# myfile.write("## Job name" + "\n")
# myfile.write("#PBS -N " + pbs_job_name + "\n")
# myfile.write("\n")
# myfile.write("## Resource request" + "\n")
# myfile.write("#PBS -l nodes=" + str(no_of_nodes) + ":ppn=" + str(no_of_cores) + ",walltime=" + walltime + "\n")
# myfile.write("#PBS -q " + queue_name + "\n")
# myfile.write("\n")
# myfile.write("## Job array request" + "\n")
# myfile.write("#PBS -t " + job_array_numbers + "\n")
# myfile.write("\n")
# myfile.write("## designate output and error files" + "\n")
# myfile.write("#PBS -e " + outfile_name_and_path + "\n")
# myfile.write("#PBS -o " + errorfile_name_and_path + "\n")
# myfile.write("\n")
# myfile.write("# print some details about the job" + "\n")
# myfile.write('echo "The Array ID is: ${PBS_ARRAYID}"' + "\n")
# myfile.write('echo Running on host `hostname`' + "\n")
# myfile.write('echo Time is `date`' + "\n")
# myfile.write('echo Directory is `pwd`' + "\n")
# myfile.write('echo PBS job ID is ${PBS_JOBID}' + "\n")
# myfile.write('echo This job runs on the following nodes:' + "\n")
# myfile.write('echo `cat $PBS_NODEFILE | uniq`' + "\n")
# myfile.write("\n")
# for line in list_of_job_specific_code:
# myfile.write(line)
#
# # give the file execute permissions
# subprocess.check_call(["chmod", "700", str(output_filename)])
#
# return output_dict
def getJobIdFromSubStdOut(self, stdout):
"""
When one submits a job to the cluster it returns the job ID to the stdout. This function takes that stdout and extracts the job ID so that it can be used to monitor the job if neccessary.
Args:
stdout (str): The stdout after submitting a job to the queue.
Returns:
return (int): The job ID of the job submitted which returned stdout.
"""
return int(re.search(r'\d+', stdout).group())
| 18,718 | 5,495 |
import random
import re
import json
from combat import *
from travel import *
from pdb import set_trace
def load_words(path):
with open(path, 'r') as f:
for line in f:
clean_line = line.strip()
if clean_line and not clean_line[0] == "#":
yield clean_line
class MarkovGenerator:
def __init__(self, words, length):
self.length = length
self.transitions = {}
for word in words:
key = (None,) * length
for char in word:
self.addTransition(key, char)
key = key[1:] + (char,)
self.addTransition(key, None)
def addTransition(self, key, char):
if key not in self.transitions:
self.transitions[key] = []
self.transitions[key].append(char)
def generate(self):
result = []
key = (None,) * self.length
while key in self.transitions:
next_char = random.choice(self.transitions[key])
if next_char is None:
break
result.append(next_char)
key = key[1:] + (next_char,)
return ''.join(result)
town_generator = MarkovGenerator(load_words('../data/towns.txt'), 2)
name_generator = MarkovGenerator(load_words('../data/names_male.txt'), 3)
occupation_list = list(load_words('../data/occupations.txt'))
color_list = list(load_words('../data/colors.txt'))
landform_list = list(load_words('../data/landforms.txt'))
weapon_list = list(load_words('../data/weapons.txt'))
with open('../monsters.json', 'r') as monster_file:
monsters_list = json.load(monster_file)
def expand(sentence, **kwargs):
# set_trace()
while True:
matches = list(re.finditer('<([!a-zA-Z0-9:_]*?)>', sentence))
if not matches:
return sentence
for match in reversed(matches):
parts = match.group(1).split(':')
if parts[0][0] == '!':
replacement = kwargs[parts[0][1:]]
else:
replacement = globals()[parts[0]]()
if len(parts) >= 2:
replacement = globals()[parts[1]](replacement)
sentence = sentence[:match.start(0)] + replacement + sentence[match.end(0):]
def title(words):
return ' '.join((word[0].upper() + word[1:]) for word in words.split(' '))
def sentence(words):
return words[0].upper() + words[1:]
def book_title():
return '# <!pc_name>\'s Journey to Defeat the Evil Wizard <!wiz_name> _(and his many battles along the way)_\n\n'
def chapter_title(title):
return '## <a name="chapter<!chapter_number>"></a> ' + title + '\n\n'
def chapter_title_plain():
return 'Chapter <!chapter_number>: <!town_name> and the <!monster_name:title>'
def town():
return town_generator.generate()
def name():
return name_generator.generate()
def occupation():
return random.choice(occupation_list)
def color():
return random.choice(color_list)
def landform():
return random.choice(landform_list)
def weapon():
return random.choice(weapon_list)
def positive_trait():
return random.choice([
'bold',
'courageous',
'daring',
'epic',
'fearless',
'gallant',
'grand',
'gutsy',
'noble',
'valiant',
'classic',
'elevated',
'bigger than life',
'dauntless',
'doughty',
'exaggerated',
'fire-eating',
'grandiose',
'gritty',
'gutty',
'high-flown',
'impavid',
'inflated',
'intrepid',
'lion-hearted',
'mythological',
'tall standing',
'stouthearted',
'unafraid',
'valorous',
'undaunted'
])
def negative_trait():
return random.choice([
'hideous',
'smelly',
'terrible',
'menacing',
'awful',
'ruinous',
'evil',
'abhorrent',
'abominable',
'appalling',
'awful',
'cruel',
'disgusting',
'dreadful',
'eerie',
'frightful',
'ghastly',
'grim',
'grisly',
'gruesome',
'heinous',
'hideous',
'horrendous',
'horrid',
'lousy',
'nasty',
'scandalous',
'scary',
'shameful',
'shocking',
'terrible',
'terrifying',
'beastly',
'detestable',
'disagreeable',
'execrable',
'fairy',
'fearful',
'loathsome',
'lurid',
'mean',
'obnoxious',
'offensive',
'repellent',
'repulsive',
'revolting',
'sickie',
'ungodly',
'unholy',
'unkind'
])
def pc_name():
return random.choice([
'<!pc_name>',
'the <positive_trait> <!pc_name>',
'<!pc_name> the <positive_trait>',
'our hero',
'the adventurer',
'he',
'he',
'he',
'he'
])
def activity():
return random.choice([
'sat by the side of the road',
'rushed by quickly, ignoring him',
'gazed at him from an open window',
'talked excitedly with what appeared to be a <occupation>',
'slowly carried supplies',
'slept in an alleyway',
'eyed him suspiciously',
'scuttled out of his way',
'stood by a market stall, negotiating with the <occupation>',
'hawked fine imported goods from <town>',
'bit into an apple',
'finished an apple and tossed the core aside',
'ran from person to person, asking if they had seen <name>',
'loaded a market stall with wares',
'threw punches'
])
def town_people_sentence():
return random.choice([
'A <occupation> <activity>.',
'While the <occupation> <activity>, a <occupation> <activity>.',
'Two <occupation>s <activity>.',
'The <occupation> <activity> with a <occupation>.',
'Nearby, a <occupation> <activity>.'
])
def character_attribute():
return random.choice([
'unusual weapons',
'foreboding cloak',
'impressive armor',
'strong forearms',
'well-made boots',
'determined look',
'dangerous demeanor'
])
def number():
return str(random.randint(2, 10))
def building():
return random.choice([
'tavern',
'inn',
'barn',
'church',
'monastery',
'cattle barn',
'stables',
'warehouse'
])
def direction():
return random.choice([
'left',
'right',
'left' # Bias towards left (for some reason)
])
def in_town_directions_end():
return random.choice([
'It\'s just to the <direction>.',
'There\'s a small door.',
'Look for the large hanging sign that reads \"<!armor_name> Fine Supplies\".'
])
def in_town_directions():
return random.choice([
'down the street to the <building> and <direction>. You\'ll see a <building>. It\'s <in_town_directions>',
'past the <building>. <in_town_directions_end>',
'into the market and towards the <building>. Eventually you need to walk <in_town_directions>',
'just a bit further down the street. <in_town_directions_end>'
])
def town_intro():
return (
'<!pc_name> followed a dirt path into the village of <!town_name>. <town_people_sentence> <town_people_sentence> '
'<!pc_name> continued down the path. <town_people_sentence>\n\n'
'Eventually, <!pc_name> arrived at the town square, where he found a <occupation>. ' +
random.choice([
'The man, eying his <character_attribute>, beckoned him forward.\n\n'
'"Not many people around here like you." he said gruffly. "What makes you think you can step foot in these parts?"\n\n',
'<!pc_name> approached him, hoping for some advice.\n\n'
]) +
random.choice([
'"My name is <!pc_name>, and it is my quest to defeat the evil wizard <!wiz_name>." <!pc_name> announced.\n\n',
'"The evil wizard <!wiz_name> has terrorized these lands for far too long. I <!pc_name> have come to destroy him!" <!pc_name> exclaimed.\n\n',
'"Do you remember the glory days before the evil wizard <!wiz_name> took over?" <!pc_name> asked. '
'"I seek to destroy him and restore this kingdom\'s rightful rule!"\n\n'
]) +
'<town_people_sentence> ' +
random.choice([
'The man eyed him thoughtfully',
'He still looked suspicious',
'The man sat in silence for a while',
'The man quietly reminised about the past'
]) +
random.choice([
', then finally responded.\n\n',
', but eventually responded.\n\n',
'He finally responded.\n\n'
]) +
random.choice([
'"We have waited for your arrival for many years, <!pc_name>. Is there any way I can be of help?"\n\n',
'"Our village of <!town_name> will gladly help you on your quest. What do you need?"\n\n'
]) +
'"My weapons were badly damaged on the way here. Could you point me to your armory to get some new supplies?"\n\n' +
random.choice([
'"<!armor_name> is the best in town. His shop is <in_town_directions> ',
'"The armory is <in_town_directions> You\'ll find <!armor_name>, the best weapons expert we\'ve got. ',
'"<!armor_name> is <in_town_directions> Tell him I sent you. '
]) +
random.choice([
'And here, take a few gold pieces to buy the best." He reached into his pocket and pulled out <number> small coins. '
'"I want that <!wiz_name> gone as much as anybody."\n\n',
'Be careful out there. You\'re not the first to try this adventure. Men stronger than you have vanished or worse."\n\n',
'I\'d show you myself, but I have urgent matters to attend to here in the square."\n\n'
]) +
'<!pc_name> hurried towards the armory. <town_people_sentence> <town_people_sentence> '
'Turning the corner, he saw the armory in front of him. He pushed the door open and walked inside.\n\n'
)
def monster_name():
return random.choice([monster['name'].strip() for monster in monsters_list])
def monster_description(name):
matches = [monster for monster in monsters_list if monster['name'].strip() == name]
if matches and matches[0]['description']:
return matches[0]['description']
else:
return ['The monster ' + name + ' is terrifying for sure, but I honestly don\'t know much about that beast.']
def armory_intro():
return (
random.choice([
'<!armor_name> looked up from his work behind a counter at <!pc_name>.\n\n',
'There was no one there. <!pc_name> cleared his throat and a man ran out from a backroom.\n\n'
]) +
'"I\'m <!pc_name>, a brave adventurer seeking to destroy <!wiz_name>. What dangers lurk nearby?" he asked.\n\n' +
random.choice([
'<!armor_name> grabbed a dusty book from the shelf and flipped through it. Pictures of <monster_name>s and <monster_name>s flew by. '
'Eventually he settled on a page and started to explain.\n\n',
'<!armor_name> lifted up his tunic and pointed to a scar. "You see this?" he asked. "Only one monster can do this kind of damage. The <!monster_name>."\n\n',
'"Brave you say? You may have fought the <monster_name>, or perhaps even the <monster_name>, but that\'s nothing compared to the <!monster_name> we\'ve got."\n\n'
])
)
def armory_explanation():
return random.choice([
'"<!description>" <!armor_name> explained.\n\n',
'The armorer sighed and continued. "<!description>"\n\n',
'<!armor_name> returned to the book of monsters on the desk and pointed at the terrifying illustration. "<!description>"\n\n'
])
def armory_more():
return random.choice([
'<!pc_name> looked surprised. "Incredible! Is there anything else I should know?"\n\n',
'"But my weapons may be too weak. Are there any other ways to defeat the <!monster_name>?" <!pc_name> asked.\n\n',
'<!pc_name> slipped the man <number> coins. "I get the feeling you\'ve been here for a while. Surely you know more than that."\n\n',
'"I could handle that. Tell me again, what makes the <!monster_name> so bad?" <!pc_name> responded.\n\n'
])
def armory_no_more():
return random.choice([
'"That\'s all I can tell you."\n\n',
'"Anything else you need to know can be found it the book. Take your time." He took the book of monsters and handed it to <!pc_name>.\n\n',
'"Look I\'ve got other things to attend to. Do you need weapons or not?" His frusturation was visible.\n\n'
])
def armory_new_weapon(old_weapon):
return (
'As <!pc_name> turned to leave the armory, <!armor_name> called out\n\n' +
random.choice([
'"Before you go, get rid of that useless ' + old_weapon + '. It won\'t make a dent against the carapace of the <!monster_name>." ',
'"Wait, you\'ll need a weapon worthy of your great cause. That rusty ' + old_weapon + ' won\'t do." '
]) +
'\n\n' +
random.choice([
'"Take this <!pc_weapon>. It has served a well over a dozen adventureres before you and it should serve you well too."\n\n',
'"Forged by the finest dwarven smiths in the mountains of <town>, this <!pc_weapon> is the finest display of craftsmanship for miles around."\n\n'
])
)
| 13,795 | 4,244 |
import csv
import argparse
import os
class ReportSplitter:
def __init__(self, values, columns, file, output_folder=None, verbose=False, case_insensitive=True,
contains_value=False):
self.values = values
self.columns = columns
self.file = file
self.output_folder = output_folder
self._file_mapping = {}
self._opened_files = []
self.verbose = verbose
self.case_insensitive = case_insensitive
self.contains_value = contains_value
if self.output_folder is None:
self.output_folder = os.getcwd()
def split(self):
if self.verbose:
print("Values used for indexing:")
print(self.values)
print("Columns that will be indexed:")
print(self.columns)
print("File that will be splitted: " + self.file)
print("Output folder: " + self.output_folder)
print("Case insensitivity enabled: " + self.case_insensitive)
print("Value contained in indexed column: " + self.contains_value)
print("Starting...")
try:
self._file_exists(self.file)
self._folder_exists(self.output_folder)
if self.case_insensitive:
values = self._values_to_lowecase(self.values)
else:
values = self.values
with open(self.file) as csvfile:
reader = csv.DictReader(csvfile)
self._verify_column_names(reader.fieldnames)
self._create_files(reader.fieldnames, values)
# Reading row by row
for row in reader:
# For each row checking columns that contain indexed data
for column in self.columns:
if self.case_insensitive:
column_value = row[column].lower()
else:
column_value = row[column]
# If indexed value in the column, writing this line to appropriate file
if self.contains_value:
for v in values:
if v in column_value:
self._write_line_to_file(v, row)
else:
if column_value in values:
self._write_line_to_file(column_value, row)
self._close_files()
except Exception as err:
print(err)
return
if self.verbose:
print("Finished...")
print("Following files were created:")
for file in self._opened_files:
print(file.name)
def _write_line_to_file(self, value, row):
self._file_mapping[value].writerow(row)
def _folder_exists(self, folder):
if not os.path.exists(folder):
raise Exception("ERROR - folder " + folder + " doesn't exist!")
if not os.path.isdir(folder):
raise Exception("ERROR - " + folder + " is not a folder!")
if not os.access(folder, os.W_OK):
raise Exception("ERROR - folder " + folder + " is not writable!")
def _file_exists(self, file):
if not os.path.exists(file):
raise Exception("ERROR - file " + file + " doesn't exist!")
if not os.path.isfile(file):
raise Exception("ERROR - " + file + " is not a file!")
if not os.access(file, os.R_OK):
raise Exception("ERROR - file " + file + " is not readable!")
def _verify_column_names(self, fieldnames):
for column in self.columns:
if column not in fieldnames:
raise Exception(
"ERROR - Column " + column + " not found to be a in the CSV file. Maybe case sensitivity issue?")
def _create_files(self, fieldnames, values):
try:
for value in values:
file_name = os.path.join(self.output_folder, value.replace(".", "_") + ".csv")
csvfile = open(file_name, 'w')
writer = csv.DictWriter(csvfile, fieldnames)
writer.writeheader()
self._file_mapping[value] = writer
self._opened_files.append(csvfile)
except Exception as err:
raise err
def _values_to_lowecase(self, list):
new_list = []
for value in list:
new_list.append(value.lower())
return new_list
def _close_files(self):
for file in self._opened_files:
file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--value_list", help="List of values based on which should the report be splitted. " +
"Accepts list of comma separated values")
parser.add_argument("-c", "--column_list", help="List of columns that will be searched for indexing." +
"Accepts list of comma separated values")
parser.add_argument("file", help="File that should be splitted")
parser.add_argument("-o", "--output_folder", help="Folder where the output should be placed")
parser.add_argument("-p", "--verbose", help="Verbose mode", action='store_true')
parser.add_argument("-i", "--case_insensitive", help="Allows to enable case insensitivity.", action='store_true')
parser.add_argument("-x", "--contains_value",
help="If enabled, value needs to be only contained in the column. No need for the exact match.",
action='store_true')
args = parser.parse_args()
report_splitter = ReportSplitter(args.value_list.split(","), args.column_list.split(","), args.file,
args.output_folder, args.verbose)
report_splitter.split()
| 5,937 | 1,555 |
from . import decorators
from . import exec
from . import log
import os.path as path
import sublime
import time
import json
@decorators.thread
@decorators.trace
def source(view):
locate(view)
def call(mode, filename, region):
"""
Call calls guru(1) with the given `<mode>`
filename and point.
"""
file = "{}:#{},#{}".format(filename, region.begin(), region.end())
args = ["--json", mode, file]
cmd = exec.Command("guru", args=args)
res = cmd.run()
if res.code == 0:
return json.loads(res.stdout)
def locate(view):
"""
Locate returns the location of the symbol
at the cursor, empty string is returned if no symbol
is found.
"""
file = view.file_name()
pos = view.sel()[0]
resp = call("describe", file, pos)
if resp == None:
return
if resp["detail"] == "value":
if 'objpos' in resp['value']:
open_position(view, resp['value']['objpos'])
return
if resp["detail"] == "type":
if "namepos" in resp["type"]:
open_position(view, resp['type']['namepos'])
return
if 'built-in type' in resp['desc']:
symbol = resp['type']['type']
cwd = path.dirname(file)
goroot = exec.goenv(cwd)['GOROOT']
src = path.join(goroot, 'src', 'builtin', 'builtin.go')
win = view.window()
open_symbol(view, src, symbol)
return
log.error("guru(1) - unknown response {}", resp)
return ""
def open_position(view, src):
win = view.window()
win.open_file(src, sublime.ENCODED_POSITION)
def open_symbol(view, src, symbol):
win = view.window()
new_view = win.open_file(src)
show(new_view, symbol)
sublime.set_timeout(lambda: show(new_view, symbol), 20)
def show(view, symbol):
if view.is_loading():
sublime.set_timeout(lambda: show(view, symbol), 30)
return
for sym in view.symbols():
if symbol in sym[1]:
sel = sublime.Selection(0)
sel.add(sym[0])
view.show(sel)
| 1,899 | 674 |
from unittest import TestCase
from random import Random
from cipher21.key import Cipher21Key
from cipher21.constants import KEY_LENGTH
class AssessKeyTest(TestCase):
def test_positive_cases(self):
prng = Random() # For test repetitiveness purpose only. Use SystemRandom ordinarily.
prng.seed(0xBDC34FD75D0B49F5817B4038C45EC575, version=2)
for t in range(10**4):
with self.subTest(t=t):
Cipher21Key.from_bytes(bytes(prng.getrandbits(8) for _ in range(KEY_LENGTH)))
def test_negative_cases(self):
key = KEY_LENGTH*b'\x00'
with self.assertRaises(ValueError):
Cipher21Key.from_bytes(key)
key = bytes(range(KEY_LENGTH))
with self.assertRaises(ValueError):
Cipher21Key.from_bytes(key)
key = bytes(range(0, 5*KEY_LENGTH, 5))
with self.assertRaises(ValueError):
Cipher21Key.from_bytes(key)
key = bytes(range(KEY_LENGTH, 0, -1))
with self.assertRaises(ValueError):
Cipher21Key.from_bytes(key)
key = bytes(range(7*KEY_LENGTH, 0, -7))
with self.assertRaises(ValueError):
Cipher21Key.from_bytes(key)
key = 2*bytes.fromhex('e521377823342e05bd6fe051a12a8820')
with self.assertRaises(ValueError):
Cipher21Key.from_bytes(key)
| 1,344 | 489 |
# https://leetcode.com/problems/remove-all-adjacent-duplicates-in-string-ii/
class Solution:
def removeDuplicates(self, s: str, k: int) -> str:
res = ''
for c in s:
res += c
if res[-k:] == c * k:
res = res[:-k]
return res
s = Solution()
print(s.removeDuplicates('deeedbbcccbdaa', 3))
| 357 | 128 |
import os
import json
from maya import cmds
import re
def conform_path(path):
return join_path(*split_path(path.replace('\\', '/')))
def join_path(*args):
path = list()
for arg in args:
parts = split_path(arg)
for part in parts:
part = str(part)
if part:
path.append(part)
return '/'.join(path)
def split_path(path):
conformed_path = path.replace('\\', '/')
list_ = list()
for item in conformed_path.split('/'):
if item:
list_.append(item)
return list_
def decompose_file_path(path):
path_split = split_path(path)
file_name = path_split.pop()
location = join_path(*path_split)
return location, file_name
class JsonFile(object):
default_location = cmds.internalVar(userPrefDir=True)
extension = 'json'
def __init__(self, name):
if not self.is_one(name):
cmds.error('\'{}\' is not a valid argument for \'{}\' class.'.format(name, self.__class__.__name__))
self.name = str(name)
def __repr__(self):
return self.name
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == str(other)
def __ne__(self, other):
return not self.__eq__(other)
def __iter__(self):
return iter(self.name)
def endswith(self, item):
return self.name.endswith(item)
def startswith(self, item):
return self.name.startswith(item)
@classmethod
def compress_data(cls, data):
return data
@classmethod
def uncompress_data(cls, data):
return data
@classmethod
def format_file_name(cls, file_name):
file_name = str(file_name)
if not file_name.lower().endswith('.{0}'.format(cls.extension)):
return '{0}.{1}'.format(file_name, cls.extension)
return file_name
@classmethod
def create(cls, *args, **kwargs):
pass
@classmethod
def create_file(cls, data, location=None, file_name=None, force=False):
location = cls.default_location if location is None else str(location)
file_name = cls.get_default_file_name() if file_name is None else str(file_name)
force = bool(force)
location = conform_path(location)
file_name = cls.format_file_name(file_name)
path = join_path(location, file_name)
if not os.path.isdir(location):
raise cmds.error('The given location is invalid -> \'{}\''.format(location))
if not force and os.path.isfile(path):
raise cmds.error('The given path already exists -> \'{}\''.format(path))
with open(path, 'w') as f:
json.dump(None, f)
json_file = cls(path)
json_file.write(data)
print('The file \'{0}\' has been created.'.format(json_file.get_path()))
return json_file
@classmethod
def get_default_file_name(cls):
file_name = re.sub(r'(?<!^)(?=[A-Z])', '_', cls.__name__).lower()
return '{0}.{1}'.format(file_name, cls.extension)
@classmethod
def get(cls, location=None, file_name=None):
location = cls.default_location if location is None else str(location)
file_name = cls.get_default_file_name() if file_name is None else cls.format_file_name(file_name)
full_path = join_path(location, file_name)
if cls.is_one(full_path):
return cls(full_path)
print('The file \'{0}\' does not exist.'.format(full_path))
return None
def load(self, *args, **kwargs):
print('The file \'{0}\' has been loaded.'.format(self.get_path()))
@classmethod
def is_one(cls, path):
path = str(path)
if os.path.isfile(path):
if path.lower().endswith(cls.extension):
return True
return False
def write(self, data):
data = self.compress_data(data)
with open(self.get_path(), 'w') as f:
json.dump(data, f, indent=2, sort_keys=True)
def get_path(self):
return self.name
def read(self):
with open(self.get_path(), 'r') as f:
data = json.load(f)
return self.uncompress_data(data)
def get_file_name(self, extension=True):
name = self.get_path().split('/')[-1]
if extension:
return name
return name.split('.')[0]
def delete(self):
os.remove(self.get_path())
print('The file \'{0}\' has been deleted.'.format(self.get_path()))
| 4,520 | 1,458 |
from .resnet_backbone import resnet18
from torch import nn
import torch
import torch.nn.functional as F
from detro.networks.components import BiFPN, Center_layer, Offset_layer, Reg_layer, Heatmap_layer
from detro.networks.losslib import center_loss, distance_loss
class FeatureFusionNetwork(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs):
resized = []
size = inputs[0].size()[-2:]
for x in inputs[1:]:
resized.append(F.upsample(x, size))
x = torch.cat(resized, dim=1)
return x
class CircleNet(nn.Module):
def __init__(self, num_classes=1):
super().__init__()
self.backbone = resnet18(pretrained=True)
self.neck = FeatureFusionNetwork()
self.conv1 = nn.Conv2d(896, 256, kernel_size=1, stride=1, padding=0)
self.bn1 = nn.BatchNorm2d(256)
self.relu = nn.ReLU(inplace=True)
# self.center_layer = Heatmap_layer(in_channels=256, out_channels=num_classes)
# self.reg_layer = Heatmap_layer(in_channels=256, out_channels=1)
self.hm_layer = Heatmap_layer(in_channels=256, out_channels=num_classes + 1)
def forward(self, inputs):
c1, c2, c3, c4, c5 = self.backbone(inputs)
features = [c2, c3, c4, c5]
features = self.neck(features)
x = features
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# center_heatmap = self.center_layer(x)
# offsets = self.reg_layer(x)
x=self.hm_layer(x)
center_heatmap=x[:,:-1]
offsets=x[:,-1:]
return dict(
center_heatmap=center_heatmap, offsets=offsets
)
def CircleDetCriterion(preds, labels):
loss_center = center_loss(preds['center_heatmap'], labels['center_heatmap'])
# loss_corner=center_loss(preds['corner_heatmap'],labels['corner_heatmap'])
loss_offsets = distance_loss(preds['offsets'], labels['offsets'], labels['offsets_mask'])
return dict(
loss=loss_center + loss_offsets,
loss_center=loss_center,
# loss_corner=loss_corner,
loss_offsets=loss_offsets,
)
| 2,150 | 787 |
from django import forms
from .models import MakerProfile,BuyerProfile,MstLang,MstSkill,Contact,Order,OrderMessage
from register.models import User
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('last_name', 'first_name')
class MakerProfileForm(forms.ModelForm):
class Meta:
model = MakerProfile
fields = ('picture','lang','cost','skill')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['lang'].widget = forms.CheckboxSelectMultiple()
self.fields['lang'].queryset = MstLang.objects
self.fields['skill'].widget = forms.CheckboxSelectMultiple()
self.fields['skill'].queryset = MstSkill.objects
class BuyerProfileForm(forms.ModelForm):
class Meta:
model = BuyerProfile
fields = ('picture',)
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('user','email','message','file',)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['user'].widget.attrs.update({
'class': 'form-control required',
'placeholder':'Your Name',
'data-placement':'top',
'data-trigger':'manual',
'data-content':'Must be at least 3 characters long, and must only contain letters.'})
self.fields['email'].widget.attrs.update({
'class':'form-control email',
'placeholder':'email@xxx.com',
'data-placement':'top',
'data-trigger':'manual',
'data-content':'Must be a valid e-mail address (user@gmail.com)',
})
self.fields['message'].widget.attrs.update({
'class':'form-control',
'placeholder':"Your message here..",
'data-placement':'top',
'data-trigger':'manual',
})
class OrderForm(forms.ModelForm):
class Meta:
model = Order
fields = ('title','body','order_type','order_finish_time','cost',)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['title'].widget.attrs.update({
'class':'form-control',
'placeholder':"タイトルを入れてください",
'data-placement':'top',
'data-trigger':'manual',
"data-content" :"依頼の内容入力",
})
self.fields['order_type'].widget.attrs.update({
'class': 'form-control',
})
self.fields['body'].widget.attrs.update({
'class':'form-control',
})
self.fields['cost'].widget.attrs.update({
'class':'form-control',
})
self.fields['order_finish_time'].widget.attrs.update({
'class':'form-control',
})
class SearchForm(forms.Form):
title = forms.CharField(
initial='',
label='タイトル',
required = False, # 必須ではない
)
| 2,938 | 865 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import jacket.cmd.compute.novnc
import jacket.cmd.compute.serialproxy
import jacket.cmd.compute.spicehtml5proxy
import jacket.compute.baserpc
import jacket.compute.cloudpipe.pipelib
import jacket.compute.conductor.rpcapi
import jacket.compute.conductor.tasks.live_migrate
import jacket.compute.conf
import jacket.compute.console.manager
import jacket.compute.console.rpcapi
import jacket.compute.console.serial
import jacket.compute.console.xvp
import jacket.compute.consoleauth
import jacket.compute.consoleauth.manager
import jacket.compute.consoleauth.rpcapi
import jacket.compute.crypto
import jacket.compute.exception
import jacket.compute.image.download.file
import jacket.compute.image.glance
import jacket.compute.ipv6.api
import jacket.compute.keymgr
import jacket.compute.keymgr.barbican
import jacket.compute.keymgr.conf_key_mgr
import jacket.compute.netconf
import jacket.compute.notifications
import jacket.compute.paths
import jacket.compute.quota
import jacket.compute.rdp
import jacket.compute.servicegroup.api
import jacket.compute.spice
import jacket.compute.utils
import jacket.compute.volume
import jacket.compute.volume.cinder
import jacket.db.base
import jacket.db.compute.api
import jacket.db.compute.sqlalchemy.api
import jacket.objects.compute.network
def list_opts():
return [
('DEFAULT',
itertools.chain(
[jacket.compute.conductor.tasks.live_migrate.migrate_opt],
[jacket.compute.consoleauth.consoleauth_topic_opt],
[jacket.db.base.db_driver_opt],
[jacket.compute.ipv6.api.ipv6_backend_opt],
[jacket.compute.servicegroup.api.servicegroup_driver_opt],
jacket.compute.cloudpipe.pipelib.cloudpipe_opts,
jacket.cmd.compute.novnc.opts,
jacket.compute.console.manager.console_manager_opts,
jacket.compute.console.rpcapi.rpcapi_opts,
jacket.compute.console.xvp.xvp_opts,
jacket.compute.consoleauth.manager.consoleauth_opts,
jacket.compute.crypto.crypto_opts,
jacket.db.compute.api.db_opts,
jacket.db.compute.sqlalchemy.api.db_opts,
jacket.compute.exception.exc_log_opts,
jacket.compute.netconf.netconf_opts,
jacket.compute.notifications.notify_opts,
jacket.objects.compute.network.network_opts,
jacket.compute.paths.path_opts,
jacket.compute.quota.quota_opts,
# jacket.compute.service.service_opts,
jacket.compute.utils.monkey_patch_opts,
jacket.compute.utils.utils_opts,
jacket.compute.volume._volume_opts,
)),
('barbican', jacket.compute.keymgr.barbican.barbican_opts),
('cinder', jacket.compute.volume.cinder.cinder_opts),
('api_database', jacket.db.compute.sqlalchemy.api.api_db_opts),
('database', jacket.db.compute.sqlalchemy.api.oslo_db_options.database_opts),
('glance', jacket.compute.image.glance.glance_opts),
('image_file_url', [jacket.compute.image.download.file.opt_group]),
('compute_keymgr',
itertools.chain(
jacket.compute.keymgr.conf_key_mgr.key_mgr_opts,
jacket.compute.keymgr.keymgr_opts,
)),
('rdp', jacket.compute.rdp.rdp_opts),
('spice',
itertools.chain(
jacket.cmd.compute.spicehtml5proxy.opts,
jacket.compute.spice.spice_opts,
)),
('upgrade_levels',
itertools.chain(
[jacket.compute.baserpc.rpcapi_cap_opt],
[jacket.compute.conductor.rpcapi.rpcapi_cap_opt],
[jacket.compute.console.rpcapi.rpcapi_cap_opt],
[jacket.compute.consoleauth.rpcapi.rpcapi_cap_opt],
)),
('workarounds', jacket.compute.utils.workarounds_opts),
]
| 4,418 | 1,406 |
"""CSC110 final project, main module
Descriptions
===============================
This module contains all the functions we used to implement the
simple linear regression model.
Copyright and Usage Information
===============================
All forms of distribution of this code, whether as given or with any changes, are
expressly prohibited. All rights reserved.
This file is Copyright (c) 2020 Runshi Yang, Chenxu Wang and Haojun Qiu
"""
from typing import List, Tuple
import plotly.graph_objects as go
def evaluate_line(a: float, b: float, x: float) -> float:
"""Evaluate the linear function y = a + bx for the given a, b.
>>> result = evaluate_line(5.0, 1.0, 10.0) # y = 5.0 + 1.0 * 10.0,
>>> result == 15
True
"""
return a + b * x
def convert_points(points: List[tuple]) -> tuple:
"""Return a tuple of two lists, containing the x- and y-coordinates of the given points.
>>> result = convert_points([(0.0, 1.1), (2.2, 3.3), (4.4, 5.5)])
>>> result[0] # The x-coordinates
[0.0, 2.2, 4.4]
>>> result[1] # The y-coordinates
[1.1, 3.3, 5.5]
"""
x_coordinates = [x[0] for x in points]
y_coordinates = [x[1] for x in points]
return (x_coordinates, y_coordinates)
def simple_linear_regression(points: List[tuple]) -> tuple:
"""Perform a linear regression on the given points.
This function returns a pair of floats (a, b) such that the line
y = a + bx is the approximation of this data.
Further reading: https://en.wikipedia.org/wiki/Simple_linear_regression
Preconditions:
- len(points) > 0
>>> simple_linear_regression([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)])
(0.0, 1.0)
"""
avg_x = sum(convert_points(points)[0]) / len(points)
avg_y = sum(convert_points(points)[1]) / len(points)
numerator = [(p[0] - avg_x) * (p[1] - avg_y) for p in points]
denominator = [(p[0] - avg_x) ** 2 for p in points]
b = sum(numerator) / sum(denominator)
a = avg_y - b * avg_x
return (a, b)
def calculate_r_squared(points: List[tuple], a: float, b: float) -> float:
"""Return the R squared value when the given points are modelled as the line y = a + bx.
points is a list of pairs of numbers: [(x_1, y_1), (x_2, y_2), ...]
Preconditions:
- len(points) > 0
"""
avg_y = sum(convert_points(points)[1]) / len(points)
tot = [(avg_y - p[1]) ** 2 for p in points]
res = [(p[1] - (a + b * p[0])) ** 2 for p in points]
return 1 - sum(res) / sum(tot)
def perform_regression(train_data: List[tuple], xlabel: str,
title: str) -> Tuple[float, float, float]:
"""Return (a, b, r_squared)
Plot all data points and regression line
"""
# Get data points.
points = train_data
# Converts the points into the format expected by plotly.
separated_coordinates = convert_points(points)
x_coords = separated_coordinates[0]
y_coords = separated_coordinates[1]
# Do a simple linear regression. Returns the (a, b) constants for
# the line y = a + b * x.
model = simple_linear_regression(points)
a = model[0]
b = model[1]
# Plot all the data points AND a line based on the regression
plot_points_and_regression(x_coords, y_coords, [a, b], xlabel, title)
# Calculate the r_squared value
r_squared = calculate_r_squared(points, a, b)
return (a, b, r_squared)
def plot_points_and_regression(x_coords: list, y_coords: list, coef: List[float],
xlabel: str, title: str) -> None:
"""Plot the given x- and y-coordinates and linear regression model using plotly.
"""
# Create a blank figure
layout = go.Layout(title=title,
xaxis={'title': xlabel},
yaxis={'title': 'number of cases'})
fig = go.Figure(layout=layout)
# Add the raw data
fig.add_trace(go.Scatter(x=x_coords, y=y_coords, mode='markers', name='Data'))
# Add the regression line
x_max = 1.1 * max(x_coords)
fig.add_trace(go.Scatter(x=[0, x_max], y=[evaluate_line(coef[0], coef[1], 0),
evaluate_line(coef[0], coef[1], x_max)],
mode='lines', name='Regression line'))
# Display the figure in a web browser
fig.show()
def predict(test_data: List[Tuple], model: Tuple[float, float, float],
xlabel: str, title: str) -> float:
"""Return r_squared for the prediction.
Plot all data points and regression line
"""
# Get data points.
points = test_data
a = model[0]
b = model[1]
# Converts the points into the format expected by plotly.
separated_coordinates = convert_points(points)
x_coords = separated_coordinates[0]
y_hat = separated_coordinates[1]
# Plot all the data points AND a line based on the regression
plot_points_and_regression(x_coords, y_hat, [a, b], xlabel, title)
# Calculate the r_squared value
r_squared = calculate_r_squared(points, a, b)
return r_squared
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
import python_ta
python_ta.check_all(config={
'extra-imports': ['plotly.graph_objects', 'python_ta'],
'allowed-io': [],
'max-line-length': 100,
'disable': ['R1705', 'C0200']
})
| 5,362 | 1,882 |
class AwsErrorCodes:
SqsNonExistentQueue = 'AWS.SimpleQueueService.NonExistentQueue'
class NonExistantSqsQueueException(Exception):
def __init__(self, queue_name):
self.queue_name = queue_name
Exception.__init__(self, "SQS Queue '%s' no longer exists" % queue_name)
| 292 | 96 |
from django.contrib import admin
from django.urls import path, include
from . import views
from django.conf import settings
app_name='recognition'
urlpatterns = [
path('', views.Home.as_view(), name='home'),
path('settings/', views.Home.as_view(), name='settings'),
path('login/', views.UserLoginView.as_view(), name='login'),
path('logout/', views.LogoutView.as_view(), name='logout'),
path('register/', views.UserRegistrationView.as_view(), name='register'),
path('settings/profile/', views.ProfileSettingsView.as_view(), name='edit-profile'),
path('settings/reg-face/', views.UserFaceRegView.as_view(), name='reg-face'),
path('apis/auth/', views.UserFaceLogInView.as_view(), name='api-auth')
]
| 718 | 240 |
"""Validate consistency of versions and dependencies.
Validates consistency of setup.json and
* environment.yml
* version in aiida_lammps/__init__.py
"""
import json
import os
import sys
import click
FILENAME_SETUP_JSON = "setup.json"
SCRIPT_PATH = os.path.split(os.path.realpath(__file__))[0]
ROOT_DIR = os.path.join(SCRIPT_PATH, os.pardir)
FILEPATH_SETUP_JSON = os.path.join(ROOT_DIR, FILENAME_SETUP_JSON)
def get_setup_json():
"""Return the `setup.json` as a python dictionary."""
with open(FILEPATH_SETUP_JSON, "r") as handle:
setup_json = json.load(handle) # , object_pairs_hook=OrderedDict)
return setup_json
@click.group()
def cli():
"""Command line interface for pre-commit checks."""
pass
@cli.command("version")
def validate_version():
"""Check that version numbers match.
Check version number in setup.json and aiida_lammos/__init__.py and make sure
they match.
"""
# Get version from python package
sys.path.insert(0, ROOT_DIR)
import aiida_lammps # pylint: disable=wrong-import-position
version = aiida_lammps.__version__
setup_content = get_setup_json()
if version != setup_content["version"]:
click.echo("Version number mismatch detected:")
click.echo(
"Version number in '{}': {}".format(
FILENAME_SETUP_JSON, setup_content["version"]
)
)
click.echo(
"Version number in '{}/__init__.py': {}".format("aiida_lammps", version)
)
click.echo(
"Updating version in '{}' to: {}".format(FILENAME_SETUP_JSON, version)
)
setup_content["version"] = version
with open(FILEPATH_SETUP_JSON, "w") as fil:
# Write with indentation of two spaces and explicitly define separators to not have spaces at end of lines
json.dump(setup_content, fil, indent=2, separators=(",", ": "))
sys.exit(1)
if __name__ == "__main__":
cli() # pylint: disable=no-value-for-parameter
| 2,026 | 639 |
import json
import os
import time
from configparser import ConfigParser
import discord
from discord.ext import tasks, commands
from dotenv import load_dotenv
from datetime import datetime
load_dotenv()
TOKEN = os.getenv('TOKEN')
CONFIG_FILE = 'config.ini'
# Config
config_parser = ConfigParser()
config_parser.read(CONFIG_FILE)
# In minutes
CHALLENGE_TIME = int(config_parser.get('CHALLENGE', 'frequency'))
BOUNTY_TIME = int(config_parser.get('BOUNTY', 'frequency'))
challenge_start = 0
bounty_start = 0
started = False
def read_file(file):
with open(file) as f:
lst = []
for entry in json.load(f):
lst.append(entry)
return lst
bounties = read_file(config_parser.get('BOUNTY', 'file'))
challenges = read_file(config_parser.get('CHALLENGE', 'file'))
# Create bot
client = commands.Bot(command_prefix='!')
# Startup information
@client.event
async def on_ready():
print(f'Connected to bot: {client.user.name}')
print(f'Bot ID: {client.user.id}')
@client.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
return
elif isinstance(error, commands.MissingPermissions):
return
elif isinstance(error, commands.MissingRequiredArgument):
return
elif isinstance(error, commands.CommandInvokeError):
return
elif isinstance(error, commands.ChannelNotFound):
return
raise error
@commands.has_permissions(administrator=True)
@client.command(help='- Start the announcements')
async def start(ctx):
global started
if config_parser.get('CHALLENGE', 'enabled') == "True":
challenge_loop.start()
if config_parser.get('BOUNTY', 'enabled') == "True":
bounty_loop.start()
started = True
await ctx.send('Announcements have been started')
time.sleep(3)
countdown.start()
@commands.has_permissions(administrator=True)
@client.command(help='- Stop the announcements')
async def stop(ctx):
global started
challenge_loop.cancel()
bounty_loop.cancel()
countdown.cancel()
started = False
await ctx.send('Announcements have been stopped')
@commands.has_permissions(administrator=True)
@client.command(help='- DO NOT USE THIS WHILE EVENT IS ONGOING!')
async def reset(ctx):
config_parser.set('BOUNTY', 'index', '0')
with open(CONFIG_FILE, 'w') as config_file:
config_parser.write(config_file)
config_parser.set('CHALLENGE', 'index', '0')
with open(CONFIG_FILE, 'w') as config_file:
config_parser.write(config_file)
await ctx.send('Indexes have been reset to 0')
@commands.has_permissions(administrator=True)
@client.command(help='- Give a message id to set message as ended. Run this in the same channel as the ended message.')
async def end(ctx, arg):
ended_message = await ctx.fetch_message(int(arg))
if ended_message.author == client.user:
new_embed = ended_message.embeds[0]
new_embed.set_footer(text='Time remaining: 0h 0min')
await ended_message.edit(embed=new_embed)
await ctx.message.delete()
@commands.has_permissions(administrator=True)
@client.command(help='- Set channels for bounties and challenges. Configure this before you start the event!')
async def set_channel(ctx, t, channel: discord.TextChannel):
if started:
await ctx.send("You can only configure this while the event is stopped.")
return
if t not in ["bounty", "challenge"]:
await ctx.send("Invalid type. Only valid types are 'bounty' and 'challenge'.")
return
config_parser.set(t.upper(), 'channel', str(channel.id))
with open(CONFIG_FILE, 'w') as config_file:
config_parser.write(config_file)
await ctx.send(f'Successfully set the {t} channel to {channel.mention}')
# Announcements for the bounty channel
@tasks.loop(minutes=BOUNTY_TIME)
async def bounty_loop():
global bounty_start
bounty_start = datetime.now()
bounty_channel = client.get_channel(int(config_parser.get('BOUNTY', 'channel')))
bounty_index = int(config_parser.get('BOUNTY', 'index'))
if bounty_index >= len(bounties):
bounty_loop.stop()
return
embed_message = discord.Embed(title=f'{BOUNTY_TIME//60} Hour Bounty', color=discord.Color.green())
embed_message.add_field(name="The current bounty is...", value=bounties[bounty_index]['bounty'], inline=False)
embed_message.add_field(name="Keyword", value=bounties[bounty_index]['keyword'])
embed_message.set_footer(text=f'Time remaining: {BOUNTY_TIME//60}h {BOUNTY_TIME%60}min')
msg = await bounty_channel.send(embed=embed_message)
config_parser.set('BOUNTY', 'index', str(bounty_index + 1))
config_parser.set('BOUNTY', 'message_id', str(msg.id))
with open(CONFIG_FILE, 'w') as config_file:
config_parser.write(config_file)
# Announcements for the challenges channel
@tasks.loop(minutes=CHALLENGE_TIME)
async def challenge_loop():
global challenge_start
challenge_start = datetime.now()
challenge_channel = client.get_channel(int(config_parser.get('CHALLENGE', 'channel')))
challenge_index = int(config_parser.get('CHALLENGE', 'index'))
if challenge_index >= len(challenges):
challenge_loop.stop()
return
embed_message = discord.Embed(title="Daily Challenge", color=discord.Color.green())
embed_message.add_field(name="The current challenge is...", value=challenges[challenge_index]['challenge'], inline=False)
embed_message.add_field(name="Keyword", value=challenges[challenge_index]['keyword'])
embed_message.set_footer(text=f'Time remaining: {CHALLENGE_TIME // 60}h {CHALLENGE_TIME % 60}min')
msg = await challenge_channel.send(embed=embed_message)
config_parser.set('CHALLENGE', 'index', str(challenge_index + 1))
config_parser.set('CHALLENGE', 'message_id', str(msg.id))
with open(CONFIG_FILE, 'w') as config_file:
config_parser.write(config_file)
def update_counter(message, t, start_time):
new_embed = message.embeds[0]
difference = datetime.now() - start_time
difference_min = difference.seconds//60
new_embed.set_footer(text=f'Time remaining: {(t - difference_min)//60}h {(t - difference_min)%60}min')
return new_embed
@tasks.loop(minutes=1)
async def countdown():
if config_parser.get('BOUNTY', 'enabled') == "True":
bounty_channel = await client.fetch_channel(config_parser.get('BOUNTY', 'channel'))
bounty_message = await bounty_channel.fetch_message(config_parser.get('BOUNTY', 'message_id'))
await bounty_message.edit(embed=update_counter(bounty_message, BOUNTY_TIME, bounty_start))
if config_parser.get('CHALLENGE', 'enabled') == "True":
challenge_channel = await client.fetch_channel(config_parser.get('CHALLENGE', 'channel'))
challenge_message = await challenge_channel.fetch_message(config_parser.get('CHALLENGE', 'message_id'))
await challenge_message.edit(embed=update_counter(challenge_message, CHALLENGE_TIME, challenge_start))
client.run(TOKEN)
| 7,062 | 2,344 |
import numpy as np
import itdbase
from itdbase import Cell
import itin
from copy import deepcopy as cp
import cPickle as pick
# from tsase.optimize import MDMin
from ase.optimize.fire import FIRE
# from ase.optimize import BFGS
from ase import *
from ase.io import read, write
import os
import sys
import numpy as np
from tsase.mushybox import mushybox
# from tsase.calculators.vasp_ext import Vasp
# from tsase.calculators.lammps_ext import LAMMPS
from ase.calculators.lammpsrun import LAMMPS
from tsase.dimer import ssdimer
from tsase.dimer import lanczos
from tsase.neb.util import vunit, vrand
def gopt(xcell, mode):
if itin.interface == 'lammps':
return gopt_lammps(xcell, mode)
elif itin.interface == 'vasp':
return gopt_vasp(xcell, mode)
else:
print 'ERROR: WRONG INTERFACE'
sys.exit(1)
def gopt_vasp(xcell, mode):
lat = xcell.get_lattice()
vol = xcell.get_volume()
jacob = (vol / itin.nat)**(1.0/3.0) * itin.nat**0.5
latt = lat + np.dot(lat, mode[-3:]/jacob)
xcell.set_lattice(latt)
newpos = xcell.get_positions() + mode[:-3]
xcell.set_positions(newpos)
write_cell_to_vasp(xcell, "POSCAR")
os.system("cp INCAR_OPT INCAR")
os.system("sh runvasp.sh")
e = float(os.popen("awk '/free energy/{print $5}' OUTCAR|tail -1").read())
pcell = set_cell_from_vasp("CONTCAR")
h = itin.press * pcell.get_volume() / 1602.2 + e
pcell.set_e(h)
gdirs = glob.glob('Gdir*')
gdir = 'Gdir' + str(len(gdirs))
os.system('mkdir -p ' + gdir)
os.system('cp POSCAR OUTCAR CONTCAR XDATCAR ' + gdir)
sdata.gdir = gdir
return pcell
def gopt_lammps(xcell, mode):
write_cell_to_vasp(xcell, 'POSCAR')
p1 = read('POSCAR', format='vasp')
# tags = [a.symbol == 'Si' for a in p1]
#parameters = {'mass': ['1 1.0'], 'pair_style': 'lj/sf 2.5',
# 'pair_coeff': ['1 1 1.0 1.0 2.5'],
# 'pair_modify': 'shift yes'}
parameters = itin.parameters
calc = LAMMPS(parameters=parameters)
p1.set_calculator(calc)
natom = len(p1)
vol = p1.get_volume()
jacob = (vol/natom)**(1.0/3.0) * natom**0.5
# mode = np.zeros((len(p)+3, 3))
# mode = vrand(mode)
# try:
# mode = vunit(mode)
# except:
# pass
cellt = p1.get_cell() + np.dot(p1.get_cell(), mode[-3:]/jacob)
p1.set_cell(cellt, scale_atoms=True)
p1.set_positions(p1.get_positions() + mode[:-3])
pstress = p1.get_cell()*0.0
p1box = mushybox(p1, pstress)
# print len(p1box)
# print p1box.jacobian
# print p1box.get_potential_energy()
try:
dyn = FIRE(p1box, dt=0.1, maxmove=0.2, dtmax=0.2)
dyn.run(fmax=0.01, steps=2000)
io.write("CONTCAR", p1, format='vasp')
pcell = set_cell_from_vasp("CONTCAR")
e = p1box.get_potential_energy()
pcell.set_e(e)
except:
pcell = cp(xcell)
pcell.set_e(151206)
return pcell
def rundim(xcell, mode):
if itin.interface == 'lammps':
return rundim_lammps(xcell, mode)
elif itin.interface == 'vasp':
return rundim_vasp(xcell, mode)
else:
print 'ERROR: WRONG INTERFACE'
sys.exit(1)
def rundim_vasp(xcell, mode):
lat = xcell.get_lattice()
vol = xcell.get_volume()
jacob = (vol/itin.nat)**(1.0/3.0) * itin.nat**0.5
latt = lat + np.dot(lat, mode[-3:]/jacob)
xcell.set_lattice(latt)
f = open('MODECAR', 'w')
for x in mode[:-3]:
f.write("%15.9f %15.9f %15.9f\n" % tuple(x))
f.close()
write_cell_to_vasp(xcell, "POSCAR")
os.system("cp INCAR_DIM INCAR")
os.system("sh runvasp.sh")
e = float(os.popen("awk '/free energy/{print $5}' OUTCAR|tail -1").read())
pcell = set_cell_from_vasp("CONTCAR")
h = itin.press * pcell.get_volume() / 1602.2 + e
pcell.set_e(h)
ddirs = glob.glob('Ddir*')
ddir = 'Ddir' + str(len(ddirs))
os.system('mkdir -p ' + ddir)
os.system('cp POSCAR MODECAR OUTCAR XDATCAR DIMCAR ' + ddir)
sdata.ddir = ddir
return pcell
def rundim_ts(xcell, mode):
write_cell_to_vasp('TSCELL', 'w')
f = open('tmode', 'w')
pick.dump(mode, 'f')
f.close()
os.system('python -u dvjob.py > zout')
os.system('rm WAVECAR')
e = float(os.popen("awk '/TTENERGY/{print $2}' zout").read())
pcell = set_cell_from_vasp('dimer1.con')
h = itin.press * pcell.get_volume() / 1602.2 + e
pcell.set_e(h)
return pcell
def rundim_lammps(xcell, mode):
write_cell_to_vasp(xcell, 'DCAR')
p = read('DCAR', format='vasp')
parameters = itin.parameters
calc = LAMMPS(parameters=parameters)
p.set_calculator(calc)
# E0 = p.get_potential_energy()
natom = len(p)
vol = p.get_volume()
jacob = (vol/natom)**(1.0/3.0) * natom**0.5
# mode = np.zeros((len(p)+3, 3))
# mode = vrand(mode)
try:
mode = vunit(mode)
except:
mode = z_rmode()
cellt = p.get_cell() + np.dot(p.get_cell(), mode[-3:]/jacob)
p.set_cell(cellt, scale_atoms=True)
p.set_positions(p.get_positions() + mode[:-3])
d = lanczos.lanczos_atoms(p, mode=mode, rotationMax=4,
ss=True, phi_tol=15)
dyn = FIRE(d, dt=0.1, maxmove=0.2, dtmax=0.2)
try:
dyn.run(fmax=0.05, steps=2000)
E1 = p.get_potential_energy()
write("CDCAR", d.R0, format='vasp', direct=True)
pcell = set_cell_from_vasp("CDCAR")
pcell.set_e(E1)
except:
pcell = cp(xcell)
pcell.set_e(151206)
return pcell
def set_cell_from_vasp(pcar):
xcell = Cell()
buff = []
with open(pcar) as f:
for line in f:
buff.append(line.split())
lat = np.array(buff[2:5], float)
try:
typt = np.array(buff[5], int)
except:
del(buff[5])
typt = np.array(buff[5], int)
nat = sum(typt)
pos = np.array(buff[7:7 + nat], float)
xcell.set_name(itin.sname)
xcell.set_lattice(lat)
if buff[6][0].strip()[0] == 'D':
xcell.set_positions(pos)
else:
xcell.set_cart_positions(pos)
xcell.set_typt(typt)
xcell.set_znucl(itin.znucl)
xcell.set_types()
xcell.cal_fp(itin.fpcut, itin.lmax)
return xcell
def write_cell_to_vasp(xcell, pcar):
lat = xcell.get_lattice()
typt = xcell.get_typt()
pos = xcell.get_positions()
f = open(pcar, 'w')
f.write(itin.sname + '\n')
f.write('1.0\n')
for x in lat:
f.write("%15.9f %15.9f %15.9f\n" % tuple(x))
for iz in itin.znucl:
f.write(itdbase.atom_data[iz][1])
f.write(' ')
f.write('\n')
for ix in typt:
f.write(str(ix) + ' ')
f.write('\n')
f.write('Direct\n')
for x in pos:
f.write("%15.9f %15.9f %15.9f\n" % tuple(x))
f.close()
def getx(cell1, cell2):
mode = np.zeros((itin.nat + 3, 3))
mode[-3:] = cell1.get_lattice() - cell2.get_lattice()
ilat = np.linalg.inv(cell1.get_lattice())
vol = cell1.get_volume()
jacob = (vol / itin.nat)**(1.0 / 3.0) * itin.nat**0.5
mode[-3:] = np.dot(ilat, mode[-3:]) * jacob
pos1 = cell1.get_cart_positions()
pos2 = cell2.get_cart_positions()
for i in range(itin.nat):
mode[i] = pos1[i] - pos2[i]
try:
mode = vunit(mode)
except:
mode = np.zeros((itin.nat + 3, 3))
return mode
def z_rmode():
mode = np.zeros((itin.nat + 3, 3))
mode = vrand(mode)
mode = vunit(mode)
return mode
| 7,462 | 3,232 |
from ckan.lib.cli import CkanCommand
import logging
import sys
class Issues(CkanCommand):
"""
Usage:
paster issues init_db
- Creates the database table issues needs to run
paster issues upgrade_db
- Does any database migrations required (idempotent)
"""
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
"""
Parse command line arguments and call appropriate method.
"""
if not self.args or self.args[0] in ['--help', '-h', 'help']:
print self.usage
sys.exit(1)
cmd = self.args[0]
self._load_config()
self.log = logging.getLogger(__name__)
if cmd == 'init_db':
from ckanext.issues.model import setup
setup()
self.log.info('Issues tables are initialized')
elif cmd == 'upgrade_db':
from ckanext.issues.model import upgrade
upgrade()
self.log.info('Issues tables are up to date')
else:
self.log.error('Command %s not recognized' % (cmd,))
| 1,113 | 315 |
from django.http import HttpResponseServerError
from django.shortcuts import render
from django.template import RequestContext
from django.template.loader import get_template
def login(request):
return render(request, 'login.html')
def error404(request):
t = get_template('404.html')
res = HttpResponseServerError(t.render(RequestContext(request)))
return res
def error500(request):
t = get_template('500.html')
res = HttpResponseServerError(t.render(RequestContext(request)))
return res
| 510 | 155 |
#!/usr/bin/env python
# Copyright (c) 2017 Warren Kumari
"""
This small program uses a Raspberry Pi Zero W to drive the display portion
of a Symmetricom ND-4 display.
This replaces the processor board of the ND-4, and powers the Pi from the
internal ND-4 power supply. The original processor board simply drives a
MAX7219 which is conveniently on the power-supply board, to the processor
board just gets unplugged and the Pi connected instead.
The wiring is as follows:
ND-4 MAX7219 Function Pi Pin
--------------------------------
VCC VCC 2
GND GND 6
PA0 CLK SPI CLK(11) 23
PA1 LOAD/CS SPI CE0(8) 24
PA2 DIN MOSI(10) 19
All the hard work is done by Richard Hull's luma.led_matrix library from: https://github.com/rm-hull/luma.led_matrix
"""
from datetime import datetime
import time
from luma.core.interface.serial import spi, noop
from luma.core.render import canvas
from luma.core.virtual import sevensegment
from luma.led_matrix.device import max7219
# Setup the interface.
serial = spi(port=0, device=0, gpio=noop())
device = max7219(serial, cascaded=1)
seg = sevensegment(device)
# For some reason the LED display ignores the first octet.
# The colons are addressed with a period at position 8 in the string,
# and the "point" is at 3.
# For added entertainment, the digits are all reversed as well, so
# 17:28:31 is sent as "0013827.1"
while True:
timestr = datetime.now().strftime('%H%M%S')
# Reverse the time string
revtimestr = timestr[::-1]
paddedstr = "00" + revtimestr
# ... and display it.
seg.text = paddedstr
# and now sleep around 1/2 second and redisplay with the colon on
# to makke it "flash"
time.sleep(0.5)
# insert a period before last character (to get : on display)
# Removed: add a period in spot 3 to get period to flash
revtimestr = revtimestr[:5] + '.' + revtimestr[5:]
paddedstr = "00" + revtimestr
seg.text = paddedstr
time.sleep(0.5)
| 1,984 | 683 |
#!/usr/bin/python
#
# James Sandford, copyright BBC 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from hypothesis import given, strategies as st # type: ignore
from rtpPayload_ttml import (RTPPayload_TTML, LengthError, SUPPORTED_ENCODINGS,
utfEncode)
class TestExtension (TestCase):
def setUp(self):
self.thisP = RTPPayload_TTML()
@given(st.tuples(
st.text(),
st.sampled_from(SUPPORTED_ENCODINGS),
st.booleans()).filter(
lambda x: len(utfEncode(x[0], x[1], x[2])) < 2**16))
def test_init(self, data):
doc, encoding, bom = data
reservedBits = bytearray(b'\x00\x00')
newP = RTPPayload_TTML(reservedBits, doc, encoding, bom)
self.assertEqual(newP.reserved, reservedBits)
self.assertEqual(newP.userDataWords, doc)
self.assertEqual(newP._encoding, encoding)
self.assertEqual(newP._bom, bom)
@given(
st.text(),
st.text().filter(lambda x: x not in SUPPORTED_ENCODINGS),
st.booleans())
def test_init_invalidEnc(self, doc, enc, bom):
reservedBits = bytearray(b'\x00\x00')
with self.assertRaises(AttributeError):
RTPPayload_TTML(reservedBits, doc, enc, bom)
def test_reserved_default(self):
self.assertEqual(self.thisP.reserved, bytearray(b'\x00\x00'))
def test_reserved_notBytes(self):
with self.assertRaises(AttributeError):
self.thisP.reserved = ""
@given(st.binary().filter(lambda x: x != bytearray(b'\x00\x00')))
def test_reserved_invalid(self, value):
with self.assertRaises(ValueError):
self.thisP.reserved = bytearray(value)
def test_userDataWords_default(self):
self.assertEqual(self.thisP.userDataWords, "")
@given(st.text().filter(lambda x: len(utfEncode(x, "UTF-8")) < 2**16))
def test_userDataWords(self, doc):
self.thisP.userDataWords = doc
self.assertEqual(self.thisP.userDataWords, doc)
def test_userDataWords_invalidType(self):
with self.assertRaises(AttributeError):
self.thisP.userDataWords = 0
def test_userDataWords_tooLong(self):
doc = ""
for x in range(2**16):
doc += "a"
with self.assertRaises(LengthError):
self.thisP.userDataWords = doc
@given(st.tuples(
st.text(),
st.sampled_from(SUPPORTED_ENCODINGS),
st.booleans()).filter(
lambda x: len(utfEncode(x[0], x[1], x[2])) < 2**16))
def test_userDataWords_encodings(self, data):
doc, encoding, bom = data
payload = RTPPayload_TTML(
userDataWords=doc, encoding=encoding, bom=bom)
self.assertEqual(payload.userDataWords, doc)
self.assertEqual(payload._userDataWords, utfEncode(doc, encoding, bom))
def test_eq(self):
reservedBits = bytearray(b'\x00\x00')
newP = RTPPayload_TTML(reservedBits, "")
self.assertEqual(newP, self.thisP)
def test_bytearray_default(self):
expected = bytearray(4)
self.assertEqual(bytes(self.thisP), expected)
newP = RTPPayload_TTML().fromBytearray(expected)
self.assertEqual(newP, self.thisP)
@given(st.binary(min_size=2, max_size=2).filter(
lambda x: x != b'\x00\x00'))
def test_fromBytearray_invalidLen(self, length):
bArray = bytearray(4)
bArray[2:4] = length
with self.assertRaises(LengthError):
RTPPayload_TTML().fromBytearray(bArray)
@given(st.text())
def test_toBytearray(self, doc):
self.thisP.userDataWords = doc
bDoc = utfEncode(doc)
expected = bytearray(2)
expected += int(len(bDoc)).to_bytes(2, byteorder='big')
expected += bDoc
self.assertEqual(expected, self.thisP.toBytearray())
@given(st.text())
def test_fromBytearray(self, doc):
expected = RTPPayload_TTML(userDataWords=doc)
bDoc = utfEncode(doc)
bArray = bytearray(2)
bArray += int(len(bDoc)).to_bytes(2, byteorder='big')
bArray += bDoc
self.thisP.fromBytearray(bArray)
self.assertEqual(expected, self.thisP)
| 4,730 | 1,629 |
import importlib
client_hints_ua_list = importlib.import_module("client-hints.resources.clienthintslist").client_hints_ua_list
def main(request, response):
"""
Simple handler that sets a response header based on which client hint
request headers were received.
"""
response.headers.append(b"Access-Control-Allow-Origin", b"*")
response.headers.append(b"Access-Control-Allow-Headers", b"*")
response.headers.append(b"Access-Control-Expose-Headers", b"*")
client_hint_headers = client_hints_ua_list()
request_client_hints = {i: request.headers.get(i) for i in client_hint_headers}
for header in client_hint_headers:
if request_client_hints[header] is not None:
response.headers.set(header + b"-received", request_client_hints[header])
headers = []
content = u""
return 200, headers, content
| 860 | 272 |
import os
import json
import numpy as np
class Vocab(object):
def __init__(self):
self.word_to_id = dict()
self.count = list()
self.words = list()
self.to_lower = False
# add character information
self.chars = list() # ['a', 'b', 'c', 'd', ...]
self.char_to_id = dict() # {'a': 0, 'b': 1, ...}
self.word_to_chars = list() # [ ['a', 'b', 'c'], ... ]
self.word_max_len = 0
self.char_beg_id = 0
self.char_end_id = 0
def load_data(self, file_list):
v_count = dict()
total_line = 0
total_word = 0
for file in file_list:
print('[%s.%s] generate_vocab: ' % (__name__, self.__class__.__name__), file)
with open(file, 'rt') as f:
for line in f:
# to lower
if self.to_lower:
line = line.lower()
for w in line.split():
v_count.setdefault(w, 0)
v_count[w] += 1
total_word += 1
total_line += 1
return v_count, total_line, total_word
def generate_vocab(self, file_list, cutoff=0, max_size=None,
add_beg_token='<s>', add_end_token='</s>', add_unk_token='<unk>',
to_lower=False):
self.to_lower = to_lower
v_count, total_line, total_word = self.load_data(file_list)
if add_beg_token is not None:
v_count[add_beg_token] = total_line
if add_end_token is not None:
v_count[add_end_token] = total_line
if add_unk_token is not None:
v_count[add_unk_token] = 1
print('[%s.%s] vocab_size=' % (__name__, self.__class__.__name__), len(v_count))
print('[%s.%s] total_line=' % (__name__, self.__class__.__name__), total_line)
print('[%s.%s] total_word=' % (__name__, self.__class__.__name__), total_word)
# cutoff
v_list = []
ignore_list = [add_beg_token, add_end_token, add_unk_token]
for w, count in v_count.items():
if w in ignore_list:
continue
if count > cutoff:
v_list.append((w, count))
# to handle the words with the same counts
v_list = sorted(v_list, key=lambda x: x[0]) # sorted as the word
v_list = sorted(v_list, key=lambda x: -x[1]) # sorted as the count
ignore_dict = dict()
for ignore_token in reversed(ignore_list):
if ignore_token is not None and ignore_token not in ignore_dict:
v_list.insert(0, (ignore_token, v_count[ignore_token]))
ignore_dict[ignore_token] = 0
print('[%s.%s] vocab_size(after_cutoff)=' % (__name__, self.__class__.__name__), len(v_list))
if max_size is not None:
print('[%s.%s] vocab max_size=()' % (__name__, self.__class__.__name__), max_size)
unk_count = sum(x[1] for x in v_list[max_size:])
v_list = v_list[0: max_size]
# revise the unkcount
if add_unk_token is not None:
for i in range(len(v_list)):
if v_list[i][0] == add_unk_token:
v_list[i] = (add_unk_token, v_list[i][1] + unk_count)
break
# create vocab
self.count = list()
self.words = list()
self.word_to_id = dict()
for i, (w, count) in enumerate(v_list):
self.words.append(w)
self.count.append(count)
self.word_to_id[w] = i
return self
def write(self, fname):
with open(fname, 'wt') as f:
f.write('to_lower = %d\n' % int(self.to_lower))
for i in range(len(self.words)):
f.write('{}\t{}\t{}'.format(i, self.words[i], self.count[i]))
if self.word_to_chars:
s = ' '.join('{}'.format(k) for k in self.word_to_chars[i])
f.write('\t{}'.format(s))
f.write('\n')
# write a extra char vocabulary
if self.chars:
with open(fname + '.chr', 'wt') as f:
f.write('char_beg_id = %d\n' % self.char_beg_id)
f.write('char_end_id = %d\n' % self.char_end_id)
f.write('word_max_len = %d\n' % self.word_max_len)
f.write('id \t char\n')
for i in range(len(self.chars)):
f.write('{}\t{}\n'.format(i, self.chars[i]))
def read(self, fname):
self.words = list()
self.count = list()
self.word_to_id = dict()
self.word_to_chars = list()
with open(fname, 'rt') as f:
self.to_lower = bool(int(f.readline().split()[-1]))
for line in f:
a = line.split()
i = int(a[0])
w = a[1]
n = int(a[2])
self.words.append(w)
self.count.append(n)
self.word_to_id[w] = i
# read word_to_chars
if len(a) > 3:
self.word_to_chars.append([int(k) for k in a[3:]])
if self.word_to_chars:
# read char vocab
self.chars = list()
self.char_to_id = dict()
with open(fname + '.chr', 'rt') as f:
self.char_beg_id = int(f.readline().split()[-1])
self.char_end_id = int(f.readline().split()[-1])
self.word_max_len = int(f.readline().split()[-1])
f.readline()
for line in f:
a = line.split()
i = int(a[0])
c = a[1]
self.chars.append(c)
self.char_to_id[c] = i
return self
def create_chars(self, add_char_beg='<s>', add_char_end='</s>'):
if self.chars:
return
# process the word and split to chars
c_dict = dict()
for w in self.words:
for c in list(w):
c_dict.setdefault(c, 0)
if add_char_beg is not None:
c_dict.setdefault(add_char_beg)
if add_char_end is not None:
c_dict.setdefault(add_char_end)
self.chars = sorted(c_dict.keys())
self.char_to_id = dict([(c, i) for i, c in enumerate(self.chars)])
self.char_beg_id = self.char_to_id[add_char_beg]
self.char_end_id = self.char_to_id[add_char_end]
self.word_to_chars = []
for w in self.words:
chr_ids = [self.char_to_id[c] for c in w]
chr_ids.insert(0, self.char_beg_id)
chr_ids.append(self.char_end_id)
self.word_to_chars.append(chr_ids)
self.word_max_len = max([len(x) for x in self.word_to_chars])
def words_to_ids(self, word_list, unk_token='<unk>'):
id_list = []
for w in word_list:
if self.to_lower:
w = w.lower()
if w in self.word_to_id:
id_list.append(self.word_to_id[w])
elif unk_token is not None and unk_token in self.word_to_id:
id_list.append(self.word_to_id[unk_token])
else:
raise KeyError('[%s.%s] cannot find the word = %s' % (__name__, self.__class__.__name__, w))
return id_list
def ids_to_words(self, id_list):
return [self.words[i] for i in id_list]
def get_size(self):
return len(self.words)
def get_char_size(self):
if not self.chars:
raise TypeError('[Vocab] no char information!!')
return len(self.chars)
def __contains__(self, item):
return item in self.word_to_id
class VocabX(Vocab):
def __init__(self, total_level=2, read_level=0):
super().__init__()
self.total_level = total_level
self.read_level = read_level
def load_data(self, file_list):
v_count = dict()
total_line = 0
total_word = 0
for file in file_list:
print('[%s.%s] generate_vocab: ' % (__name__, self.__class__.__name__), file)
cur_line = 0
with open(file, 'rt') as f:
for line in f:
if cur_line % self.total_level == self.read_level:
for w in line.split():
v_count.setdefault(w, 0)
v_count[w] += 1
total_word += 1
total_line += 1
cur_line += 1
return v_count, total_line, total_word
| 8,892 | 3,072 |
# ==BEGIN LICENSE==
#
# MIT License
#
# Copyright (c) 2018 SRI Lab, ETH Zurich
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ==END LICENSE==
import ctypes
import os
from dpfinder.logging import logger
from dpfinder.utils.redirect import redirect_stdout
path = os.path.dirname(__file__)
lib = ctypes.cdll.LoadLibrary(path + '/libratio.so')
joint_gauss_fraction = getattr(lib, "ratio_cdf_extern", None)
joint_gauss_fraction.restype = ctypes.c_double
ratio_pdf_extern = getattr(lib, "ratio_pdf_extern", None)
ratio_pdf_extern.restype = ctypes.c_double
def cdf(lower, upper, mx, my, sx, sy, rho):
lower = ctypes.c_double(lower)
upper = ctypes.c_double(upper)
mx = ctypes.c_double(mx)
my = ctypes.c_double(my)
sx = ctypes.c_double(sx)
sy = ctypes.c_double(sy)
rho = ctypes.c_double(rho)
return joint_gauss_fraction(lower, upper, mx, my, sx, sy, rho)
def pdf(w, mx, my, sx, sy, rho):
w = ctypes.c_double(w)
mx = ctypes.c_double(mx)
my = ctypes.c_double(my)
sx = ctypes.c_double(sx)
sy = ctypes.c_double(sy)
rho = ctypes.c_double(rho)
return ratio_pdf_extern(w, mx, my, sx, sy, rho)
ratio_confidence_interval_C = getattr(lib, "ratio_confidence_interval_extern", None)
ratio_confidence_interval_C.restype = ctypes.c_double
def ratio_confidence_interval(p1, p2, d1, d2, corr, center, confidence, err_goal):
p1 = ctypes.c_double(p1)
p2 = ctypes.c_double(p2)
d1 = ctypes.c_double(d1)
d2 = ctypes.c_double(d2)
corr = ctypes.c_double(corr)
center = ctypes.c_double(center)
confidence = ctypes.c_double(confidence)
err_goal = ctypes.c_double(err_goal)
with redirect_stdout.redirect(output=logger.debug):
ret = ratio_confidence_interval_C(p1, p2, d1, d2, corr, center, confidence, err_goal)
return ret
| 2,749 | 1,087 |
####!/usr/bin/env python
#----------------------------
"""
:py:class:`DCConfigParameters` - class supporting configuration parameters for application
==========================================================================================
See:
* :py:class:`DCStore`
* :py:class:`DCType`
* :py:class:`DCRange`
* :py:class:`DCVersion`
* :py:class:`DCBase`
* :py:class:`DCInterface`
* :py:class:`DCUtils`
* :py:class:`DCDetectorId`
* :py:class:`DCConfigParameters`
* :py:class:`DCFileName`
* :py:class:`DCLogger`
* :py:class:`DCMethods`
* :py:class:`DCEmail`
This software was developed for the SIT project.
If you use all or part of it, please give an appropriate acknowledgment.
Created: 2016-05-17 by Mikhail Dubrovin
"""
#----------------------------
from PSCalib.DCLogger import log
from CalibManager.ConfigParameters import ConfigParameters
#----------------------------
class DCConfigParameters(ConfigParameters) :
"""A storage of configuration parameters for Detector Calibration Store (DCS) project.
"""
def __init__(self, fname=None) :
"""Constructor.
- fname the file name with configuration parameters, if not specified then default value.
"""
ConfigParameters.__init__(self)
self.name = self.__class__.__name__
self.fname_cp = 'confpars-dcs.txt' # Re-define default config file name
log.info('In %s c-tor', self.name)
self.declareParameters()
self.readParametersFromFile(fname)
#-----------------------------
def declareParameters(self) :
# Possible typs for declaration : 'str', 'int', 'long', 'float', 'bool'
# Logger.py
self.log_level = self.declareParameter(name='LOG_LEVEL_OF_MSGS', val_def='info', type='str' )
self.log_file = self.declareParameter(name='LOG_FILE_NAME', val_def='./log.txt', type='str' )
self.dir_repo = self.declareParameter(name='CDS_DIR_REPO', val_def='/reg/d/psdm/detector/calib', type='str' )
#self.dir_repo = self.declareParameter(name='CDS_DIR_REPO', val_def='/reg/g/psdm/detector/calib', type='str' )
#------------------------------
cp = DCConfigParameters()
#------------------------------
def test_DCConfigParameters() :
log.setPrintBits(0377)
cp.readParametersFromFile()
cp.printParameters()
cp.log_level.setValue('debug')
cp.saveParametersInFile()
#------------------------------
if __name__ == "__main__" :
import sys
test_DCConfigParameters()
sys.exit(0)
#------------------------------
| 2,611 | 793 |
import datetime
import json
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey, Float, \
Enum, DateTime, Numeric, Text, Unicode, UnicodeText
from sqlalchemy import event
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy.sql import func
from sqlalchemy.orm import relationship, backref
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy_i18n import make_translatable, translation_base, Translatable
make_translatable(options={'locales': ['pt', 'en'],
'auto_create_locales': True,
'fallback_locale': 'en'})
db = SQLAlchemy()
# noinspection PyClassHasNoInit
class DataSourceFormat:
CSV = 'CSV'
CUSTOM = 'CUSTOM'
GEO_JSON = 'GEO_JSON'
JDBC = 'JDBC'
IMAGE_FOLDER = 'IMAGE_FOLDER'
DATA_FOLDER = 'DATA_FOLDER'
HAR_IMAGE_FOLDER = 'HAR_IMAGE_FOLDER'
HDF5 = 'HDF5'
HIVE = 'HIVE'
JSON = 'JSON'
NPY = 'NPY'
PICKLE = 'PICKLE'
PARQUET = 'PARQUET'
SAV = 'SAV'
SHAPEFILE = 'SHAPEFILE'
TAR_IMAGE_FOLDER = 'TAR_IMAGE_FOLDER'
TEXT = 'TEXT'
VIDEO_FOLDER = 'VIDEO_FOLDER'
XML_FILE = 'XML_FILE'
UNKNOWN = 'UNKNOWN'
@staticmethod
def values():
return [n for n in list(DataSourceFormat.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class DataSourceInitialization:
NO_INITIALIZED = 'NO_INITIALIZED'
INITIALIZING = 'INITIALIZING'
INITIALIZED = 'INITIALIZED'
@staticmethod
def values():
return [n for n in list(DataSourceInitialization.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class ModelType:
KERAS = 'KERAS'
MLEAP = 'MLEAP'
PERFORMANCE_SPARK = 'PERFORMANCE_SPARK'
PERFORMANCE_KERAS = 'PERFORMANCE_KERAS'
SPARK_ML_CLASSIFICATION = 'SPARK_ML_CLASSIFICATION'
SPARK_ML_REGRESSION = 'SPARK_ML_REGRESSION'
SPARK_MLLIB_CLASSIFICATION = 'SPARK_MLLIB_CLASSIFICATION'
UNSPECIFIED = 'UNSPECIFIED'
@staticmethod
def values():
return [n for n in list(ModelType.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class DeploymentStatus:
NOT_DEPLOYED = 'NOT_DEPLOYED'
ERROR = 'ERROR'
EDITING = 'EDITING'
SAVED = 'SAVED'
RUNNING = 'RUNNING'
STOPPED = 'STOPPED'
SUSPENDED = 'SUSPENDED'
PENDING = 'PENDING'
DEPLOYED = 'DEPLOYED'
@staticmethod
def values():
return [n for n in list(DeploymentStatus.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class StorageType:
MONGODB = 'MONGODB'
ELASTIC_SEARCH = 'ELASTIC_SEARCH'
HDFS = 'HDFS'
HIVE = 'HIVE'
HIVE_WAREHOUSE = 'HIVE_WAREHOUSE'
KAFKA = 'KAFKA'
LOCAL = 'LOCAL'
JDBC = 'JDBC'
CASSANDRA = 'CASSANDRA'
@staticmethod
def values():
return [n for n in list(StorageType.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class DataType:
BINARY = 'BINARY'
CHARACTER = 'CHARACTER'
DATE = 'DATE'
DATETIME = 'DATETIME'
DECIMAL = 'DECIMAL'
DOUBLE = 'DOUBLE'
ENUM = 'ENUM'
FILE = 'FILE'
FLOAT = 'FLOAT'
INTEGER = 'INTEGER'
LAT_LONG = 'LAT_LONG'
LONG = 'LONG'
TEXT = 'TEXT'
TIME = 'TIME'
TIMESTAMP = 'TIMESTAMP'
VECTOR = 'VECTOR'
@staticmethod
def values():
return [n for n in list(DataType.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class AttributeForeignKeyDirection:
FROM = 'FROM'
TO = 'TO'
@staticmethod
def values():
return [n for n in list(AttributeForeignKeyDirection.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class PrivacyRiskType:
IDENTIFICATION = 'IDENTIFICATION'
@staticmethod
def values():
return [n for n in list(PrivacyRiskType.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class PermissionType:
READ = 'READ'
WRITE = 'WRITE'
MANAGE = 'MANAGE'
@staticmethod
def values():
return [n for n in list(PermissionType.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class AnonymizationTechnique:
ENCRYPTION = 'ENCRYPTION'
GENERALIZATION = 'GENERALIZATION'
SUPPRESSION = 'SUPPRESSION'
MASK = 'MASK'
NO_TECHNIQUE = 'NO_TECHNIQUE'
@staticmethod
def values():
return [n for n in list(AnonymizationTechnique.__dict__.keys())
if n[0] != '_' and n != 'values']
# noinspection PyClassHasNoInit
class PrivacyType:
IDENTIFIER = 'IDENTIFIER'
QUASI_IDENTIFIER = 'QUASI_IDENTIFIER'
SENSITIVE = 'SENSITIVE'
NON_SENSITIVE = 'NON_SENSITIVE'
@staticmethod
def values():
return [n for n in list(PrivacyType.__dict__.keys())
if n[0] != '_' and n != 'values']
# Association tables definition
class Attribute(db.Model):
""" Data source attribute. """
__tablename__ = 'attribute'
# Fields
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
description = Column(String(500))
type = Column(Enum(*list(DataType.values()),
name='DataTypeEnumType'), nullable=False)
size = Column(Integer)
precision = Column(Integer)
scale = Column(Integer)
nullable = Column(Boolean,
default=False, nullable=False)
enumeration = Column(Boolean,
default=False, nullable=False)
missing_representation = Column(String(200))
feature = Column(Boolean,
default=True, nullable=False)
label = Column(Boolean,
default=True, nullable=False)
distinct_values = Column(Integer)
mean_value = Column(Float)
median_value = Column(String(200))
max_value = Column(String(200))
min_value = Column(String(200))
std_deviation = Column(Float)
missing_total = Column(String(200))
deciles = Column(LONGTEXT)
format = Column(String(100))
key = Column(Boolean,
default=False, nullable=False)
# Associations
data_source_id = Column(Integer,
ForeignKey("data_source.id",
name="fk_attribute_data_source_id"),
nullable=False,
index=True)
data_source = relationship(
"DataSource",
overlaps='attributes',
foreign_keys=[data_source_id],
backref=backref("attributes",
cascade="all, delete-orphan"))
attribute_privacy = relationship(
"AttributePrivacy", uselist=False,
back_populates="attribute", lazy='joined')
def __str__(self):
return self.name
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class AttributeForeignKey(db.Model):
""" Attribute that form a foreign key in data sources """
__tablename__ = 'attribute_foreign_key'
# Fields
id = Column(Integer, primary_key=True)
order = Column(Integer, nullable=False)
direction = Column(Enum(*list(AttributeForeignKeyDirection.values()),
name='AttributeForeignKeyDirectionEnumType'), nullable=False)
# Associations
foreign_key_id = Column(Integer,
ForeignKey("data_source_foreign_key.id",
name="fk_attribute_foreign_key_foreign_key_id"),
nullable=False,
index=True)
foreign_key = relationship(
"DataSourceForeignKey",
overlaps='attributes',
foreign_keys=[foreign_key_id],
backref=backref("attributes",
cascade="all, delete-orphan"))
from_attribute_id = Column(Integer,
ForeignKey("attribute.id",
name="fk_attribute_foreign_key_from_attribute_id"),
nullable=False,
index=True)
from_attribute = relationship(
"Attribute",
overlaps='foreign_keys',
foreign_keys=[from_attribute_id],
backref=backref("foreign_keys",
cascade="all, delete-orphan"))
to_attribute_id = Column(Integer,
ForeignKey("attribute.id",
name="fk_attribute_foreign_key_to_attribute_id"),
nullable=False,
index=True)
to_attribute = relationship(
"Attribute",
overlaps='references',
foreign_keys=[to_attribute_id],
backref=backref("references",
cascade="all, delete-orphan"))
def __str__(self):
return self.order
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class AttributePrivacy(db.Model):
""" Privacy configuration for an attribute. """
__tablename__ = 'attribute_privacy'
# Fields
id = Column(Integer, primary_key=True)
attribute_name = Column(String(200), nullable=False)
data_type = Column(Enum(*list(DataType.values()),
name='DataTypeEnumType'))
privacy_type = Column(Enum(*list(PrivacyType.values()),
name='PrivacyTypeEnumType'), nullable=False)
category_technique = Column(String(100))
anonymization_technique = Column(Enum(*list(AnonymizationTechnique.values()),
name='AnonymizationTechniqueEnumType'), nullable=False)
hierarchical_structure_type = Column(String(100))
privacy_model_technique = Column(String(100))
hierarchy = Column(LONGTEXT)
category_model = Column(LONGTEXT)
privacy_model = Column(LONGTEXT)
privacy_model_parameters = Column(LONGTEXT)
unlock_privacy_key = Column(String(400))
is_global_law = Column(Boolean,
default=False)
# Associations
attribute_id = Column(Integer,
ForeignKey("attribute.id",
name="fk_attribute_privacy_attribute_id"),
index=True)
attribute = relationship(
"Attribute",
overlaps='attribute_privacy',
foreign_keys=[attribute_id],
back_populates="attribute_privacy")
attribute_privacy_group_id = Column(Integer,
ForeignKey("attribute_privacy_group.id",
name="fk_attribute_privacy_attribute_privacy_group_id"),
index=True)
attribute_privacy_group = relationship(
"AttributePrivacyGroup",
overlaps='attribute_privacy',
foreign_keys=[attribute_privacy_group_id],
backref=backref("attribute_privacy",
cascade="all, delete-orphan"))
def __str__(self):
return self.attribute_name
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class AttributePrivacyGroup(db.Model):
""" Groups attributes with same semantic """
__tablename__ = 'attribute_privacy_group'
# Fields
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
user_id = Column(Integer, nullable=False)
def __str__(self):
return self.name
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class DataSource(db.Model):
""" Data source in Lemonade system (anything that stores data. """
__tablename__ = 'data_source'
# Fields
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
description = Column(String(500))
enabled = Column(Boolean,
default=True, nullable=False)
statistics_process_counter = Column(Integer,
default=0, nullable=False)
read_only = Column(Boolean,
default=True, nullable=False)
privacy_aware = Column(Boolean,
default=False, nullable=False)
url = Column(String(200), nullable=False)
created = Column(DateTime,
default=func.now(), nullable=False)
updated = Column(DateTime,
default=datetime.datetime.utcnow, nullable=False,
onupdate=datetime.datetime.utcnow)
format = Column(Enum(*list(DataSourceFormat.values()),
name='DataSourceFormatEnumType'), nullable=False)
initialization = Column(Enum(*list(DataSourceInitialization.values()),
name='DataSourceInitializationEnumType'),
default=DataSourceInitialization.INITIALIZED, nullable=False)
initialization_job_id = Column(String(200))
provenience = Column(LONGTEXT)
estimated_rows = Column(Integer,
default=0)
estimated_size_in_mega_bytes = Column(Numeric(10, 2))
expiration = Column(String(200))
user_id = Column(Integer)
user_login = Column(String(50))
user_name = Column(String(200))
tags = Column(String(100))
temporary = Column(Boolean,
default=False, nullable=False)
workflow_id = Column(Integer)
task_id = Column(String(200))
attribute_delimiter = Column(String(20))
record_delimiter = Column(String(20))
text_delimiter = Column(String(20))
is_public = Column(Boolean,
default=False, nullable=False)
treat_as_missing = Column(LONGTEXT)
encoding = Column(String(200))
is_first_line_header = Column(Boolean,
default=0, nullable=False)
is_multiline = Column(Boolean,
default=0, nullable=False)
command = Column(LONGTEXT)
is_lookup = Column(Boolean,
default=0, nullable=False)
use_in_workflow = Column(Boolean,
default=0, nullable=False, index=True)
# Associations
storage_id = Column(Integer,
ForeignKey("storage.id",
name="fk_data_source_storage_id"),
nullable=False,
index=True)
storage = relationship(
"Storage",
overlaps='storage',
foreign_keys=[storage_id])
def __str__(self):
return self.name
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class DataSourceForeignKey(db.Model):
""" Foreign key in data sources """
__tablename__ = 'data_source_foreign_key'
# Fields
id = Column(Integer, primary_key=True)
# Associations
from_source_id = Column(Integer,
ForeignKey("data_source.id",
name="fk_data_source_foreign_key_from_source_id"),
nullable=False,
index=True)
from_source = relationship(
"DataSource",
overlaps='foreign_keys',
foreign_keys=[from_source_id],
backref=backref("foreign_keys",
cascade="all, delete-orphan"))
to_source_id = Column(Integer,
ForeignKey("data_source.id",
name="fk_data_source_foreign_key_to_source_id"),
nullable=False,
index=True)
to_source = relationship(
"DataSource",
overlaps='references',
foreign_keys=[to_source_id],
backref=backref("references",
cascade="all, delete-orphan"))
def __str__(self):
return 'DataSourceForeignKey'
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class DataSourcePermission(db.Model):
""" Associate users and permissions """
__tablename__ = 'data_source_permission'
# Fields
id = Column(Integer, primary_key=True)
permission = Column(Enum(*list(PermissionType.values()),
name='PermissionTypeEnumType'), nullable=False)
user_id = Column(Integer, nullable=False)
user_login = Column(String(50), nullable=False)
user_name = Column(String(200), nullable=False)
# Associations
data_source_id = Column(Integer,
ForeignKey("data_source.id",
name="fk_data_source_permission_data_source_id"),
nullable=False,
index=True)
data_source = relationship(
"DataSource",
overlaps='permissions',
foreign_keys=[data_source_id],
backref=backref("permissions",
cascade="all, delete-orphan"))
def __str__(self):
return self.permission
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class Model(db.Model):
""" Machine learning model """
__tablename__ = 'model'
# Fields
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
enabled = Column(Boolean,
default=True, nullable=False)
created = Column(DateTime,
default=func.now(), nullable=False)
path = Column(String(500), nullable=False)
class_name = Column(String(500), nullable=False)
type = Column(Enum(*list(ModelType.values()),
name='ModelTypeEnumType'),
default=ModelType.UNSPECIFIED, nullable=False)
deployment_status = Column(Enum(*list(DeploymentStatus.values()),
name='DeploymentStatusEnumType'),
default=DeploymentStatus.NOT_DEPLOYED, nullable=False)
user_id = Column(Integer, nullable=False)
user_login = Column(String(50), nullable=False)
user_name = Column(String(200), nullable=False)
workflow_id = Column(Integer)
workflow_name = Column(String(200))
task_id = Column(String(200))
job_id = Column(Integer)
# Associations
storage_id = Column(Integer,
ForeignKey("storage.id",
name="fk_model_storage_id"),
nullable=False,
index=True)
storage = relationship(
"Storage",
overlaps='storage',
foreign_keys=[storage_id])
def __str__(self):
return self.name
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class ModelPermission(db.Model):
""" Associate users and permissions to models """
__tablename__ = 'model_permission'
# Fields
id = Column(Integer, primary_key=True)
permission = Column(Enum(*list(PermissionType.values()),
name='PermissionTypeEnumType'), nullable=False)
user_id = Column(Integer, nullable=False)
user_login = Column(String(50), nullable=False)
user_name = Column(String(200), nullable=False)
# Associations
model_id = Column(Integer,
ForeignKey("model.id",
name="fk_model_permission_model_id"),
nullable=False,
index=True)
model = relationship(
"Model",
overlaps='permissions',
foreign_keys=[model_id],
backref=backref("permissions",
cascade="all, delete-orphan"))
def __str__(self):
return self.permission
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class PrivacyRisk(db.Model):
""" Privacy information associated to the data source """
__tablename__ = 'privacy_risk'
# Fields
id = Column(Integer, primary_key=True)
type = Column(Enum(*list(PrivacyRiskType.values()),
name='PrivacyRiskTypeEnumType'), nullable=False)
probability = Column(Float)
impact = Column(Float)
value = Column(Float, nullable=False)
detail = Column(LONGTEXT, nullable=False)
# Associations
data_source_id = Column(Integer,
ForeignKey("data_source.id",
name="fk_privacy_risk_data_source_id"),
nullable=False,
index=True)
data_source = relationship(
"DataSource",
overlaps='risks',
foreign_keys=[data_source_id],
backref=backref("risks",
cascade="all, delete-orphan"))
def __str__(self):
return self.type
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class Storage(db.Model):
""" Type of storage used by data sources """
__tablename__ = 'storage'
# Fields
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
type = Column(Enum(*list(StorageType.values()),
name='StorageTypeEnumType'), nullable=False)
enabled = Column(Boolean,
default=True, nullable=False)
url = Column(String(1000), nullable=False)
client_url = Column(String(1000))
extra_params = Column(LONGTEXT)
def __str__(self):
return self.name
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
class StoragePermission(db.Model):
""" Associate users and permissions """
__tablename__ = 'storage_permission'
# Fields
id = Column(Integer, primary_key=True)
permission = Column(Enum(*list(PermissionType.values()),
name='PermissionTypeEnumType'), nullable=False)
user_id = Column(Integer, nullable=False)
# Associations
storage_id = Column(Integer,
ForeignKey("storage.id",
name="fk_storage_permission_storage_id"),
nullable=False,
index=True)
storage = relationship(
"Storage",
overlaps='permissions',
foreign_keys=[storage_id],
backref=backref("permissions",
cascade="all, delete-orphan"))
def __str__(self):
return self.permission
def __repr__(self):
return '<Instance {}: {}>'.format(self.__class__, self.id)
| 22,751 | 6,828 |
"""
Przemienia liczbę na wartość binarną i zwraca sumę jedynek występującą w wartości binarnej
Example: The binary representation of 1234 is 10011010010, so the function should return 5 in this case
"""
def countBits(n):
# szybsza metoda
# return bin(n).count("1")
final = 0
for x in str(bin(n)):
if x == '1':
final += 1
return final
print(countBits(1234)) | 398 | 153 |
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
class DestinationType(Enum):
assets = "assets"
timeseries = "timeseries"
asset_hierarchy = "asset_hierarchy"
events = "events"
datapoints = "datapoints"
string_datapoints = "string_datapoints"
sequences = "sequences"
files = "files"
labels = "labels"
relationships = "relationships"
raw = "raw"
data_sets = "data_sets"
sequence_rows = "sequence_rows"
alpha_data_model_instances = "alpha_data_model_instances" # Experimental feature
class ActionType(Enum):
create = "create"
abort = "abort"
update = "update"
upsert = "upsert"
delete = "delete"
@dataclass
class AuthConfig:
api_key: Optional[str]
client_id: Optional[str]
client_secret: Optional[str]
token_url: Optional[str]
scopes: Optional[List[str]]
cdf_project_name: Optional[str]
audience: Optional[str]
@dataclass
class ReadWriteAuthentication:
read: AuthConfig
write: AuthConfig
@dataclass
class DestinationConfig:
"""
Valid type values are: assets, asset_hierarchy, events, timeseries, datapoints, string_datapoints, raw (needs database and table)
"""
type: DestinationType
raw_database: Optional[str] = None
raw_table: Optional[str] = None
external_id: Optional[str] = None
@dataclass
class QueryConfig:
file: str
@dataclass
class ScheduleConfig:
interval: str
is_paused: bool = False
@dataclass
class TransformationConfig:
"""
Master configuration class of a transformation
"""
external_id: str
name: str
query: Union[str, QueryConfig]
authentication: Union[AuthConfig, ReadWriteAuthentication]
schedule: Optional[Union[str, ScheduleConfig]]
destination: Union[DestinationType, DestinationConfig]
data_set_id: Optional[int]
data_set_external_id: Optional[str]
notifications: List[str] = field(default_factory=list)
shared: bool = True
ignore_null_fields: bool = True
action: ActionType = ActionType.upsert
legacy: bool = False
class TransformationConfigError(Exception):
"""Exception raised for config parser
Attributes:
message -- explanation of the error
"""
def __init__(self, message: str):
self.message = message
super().__init__(self.message)
| 2,392 | 726 |
"""
Sphinx AutoAPI
"""
from .extension import setup
from ._version import __version__, __version_info__
| 105 | 34 |
import logging
from org.openbaton.v2.cmd import BaseObCmd
from org.openbaton.v2.utils import get_result_to_list, get_result_to_show, parse_path_or_json, result_to_str
class Events(BaseObCmd):
"""Command to manage event endpoints: it is possible to:
* show details of a specific event endpoint passing an id
* list all saved event endpoints
* delete a specific event endpoint passing an id
* create a specific event endpoint passing a path to a file or directly the json content
"""
log = logging.getLogger(__name__)
keys_to_list = ["id", "name", "description"]
keys_to_exclude = []
def find(self, params):
if not params:
return "ERROR: missing <event-id>"
_id = params[0]
return result_to_str(get_result_to_show(self.app.ob_client.get_event(_id),
excluded_keys=self.keys_to_exclude,
_format=self.app.format))
def create(self, params):
if not params:
return "ERROR: missing <event> or <path-to-json>"
event = parse_path_or_json(params[0])
return result_to_str(get_result_to_show(self.app.ob_client.create_event(event),
excluded_keys=self.keys_to_exclude,
_format=self.app.format))
def delete(self, params):
if not params:
return "ERROR: missing <event-id>"
_id = params[0]
self.app.ob_client.delete_event(_id)
return "INFO: Deleted event with id %s" % _id
def list(self, params=None):
return result_to_str(
get_result_to_list(self.app.ob_client.list_events(), keys=self.keys_to_list, _format=self.app.format),
_format=self.app.format)
| 1,851 | 538 |
#coding:utf-8
#
# id: bugs.gh_5995
# title: Connection to server may hang when working with encrypted databases over non-TCP protocol [CORE5730]
# decription:
# https://github.com/FirebirdSQL/firebird/issues/5995
#
# Test implemented only to be run on Windows.
# It assumes that there are files keyholder.dll and keyholder.conf in the %FIREBIRD_HOME%\\plugins dir.
# These files were provided by IBSurgeon and added during fbt_run prepare phase by batch scenario (qa_rundaily).
# File keyholder.conf initially contains several keys.
#
# If we make this file EMPTY then usage of XNET and WNET protocols became improssible before this ticket was fixed.
# Great thanks to Alex for suggestions.
#
# Confirmed bug on 3.0.1.32609: ISQL hangs on attempt to connect to database when file plugins\\keyholder.conf is empty.
# In order to properly finish test, we have to kill hanging ISQL and change DB state to full shutdown (with subsequent
# returning it to online) - fortunately, connection using TCP remains avaliable in this case.
#
# Checked on:
# 5.0.0.181 SS; 5.0.0.169 CS;
# 4.0.1.2578 SS; 4.0.1.2578 CS;
# 3.0.8.33489 SS; 3.0.8.33476 CS.
#
# tracker_id:
# min_versions: ['3.0.4']
# versions: 3.0.4
# qmid: None
import pytest
from firebird.qa import db_factory, python_act, Action
# version: 3.0.4
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
# import os
# import subprocess
# from subprocess import Popen
# import datetime
# import time
# import shutil
# import re
# import fdb
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
# engine = db_conn.engine_version
# db_name = db_conn.database_name
# db_conn.close()
#
# svc = fdb.services.connect(host='localhost', user=user_name, password=user_password)
# FB_HOME = svc.get_home_directory()
# svc.close()
#
# #--------------------------------------------
#
# def flush_and_close( file_handle ):
# # https://docs.python.org/2/library/os.html#os.fsync
# # If you're starting with a Python file object f,
# # first do f.flush(), and
# # then do os.fsync(f.fileno()), to ensure that all internal buffers associated with f are written to disk.
# global os
#
# file_handle.flush()
# if file_handle.mode not in ('r', 'rb') and file_handle.name != os.devnull:
# # otherwise: "OSError: [Errno 9] Bad file descriptor"!
# os.fsync(file_handle.fileno())
# file_handle.close()
#
# #--------------------------------------------
#
# def cleanup( f_names_list ):
# global os
# for i in range(len( f_names_list )):
# if type(f_names_list[i]) == file:
# del_name = f_names_list[i].name
# elif type(f_names_list[i]) == str:
# del_name = f_names_list[i]
# else:
# print('Unrecognized type of element:', f_names_list[i], ' - can not be treated as file.')
# print('type(f_names_list[i])=',type(f_names_list[i]))
# del_name = None
#
# if del_name and os.path.isfile( del_name ):
# os.remove( del_name )
#
# #--------------------------------------------
#
#
# dts = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
#
# kholder_cur = os.path.join( FB_HOME, 'plugins', 'keyholder.conf')
# kholder_bak = os.path.join( context['temp_directory'], 'keyholder'+dts+'.bak')
#
# shutil.copy2( kholder_cur, kholder_bak)
#
# # Make file %FB_HOME%\\plugins\\keyholder.conf empty:
# with open(kholder_cur,'w') as f:
# pass
#
# MAX_SECONDS_TO_WAIT = 3
#
# # Trying to establish connection to database using WNET and XNET protocols.
# # Async. launch of ISQL with check that it will finished within some reasonable time (and w/o errors).
# # If it will hang - kill (this is bug dexcribed in the ticket)
# for p in ('wnet', 'xnet'):
# f_isql_sql=open(os.path.join(context['temp_directory'],'tmp_gh_5995.'+p+'.sql'),'w')
# f_isql_sql.write('set list on; select mon$remote_protocol from mon$attachments where mon$attachment_id = current_connection;')
# flush_and_close( f_isql_sql )
#
# protocol_conn_string = ''.join( (p, '://', db_name) )
# f_isql_log=open( os.path.join(context['temp_directory'],'tmp_gh_5995.'+p+'.log'), 'w')
# p_isql = Popen([ context['isql_path'], protocol_conn_string, "-i", f_isql_sql.name], stdout=f_isql_log, stderr=subprocess.STDOUT )
#
# time.sleep(0.2)
# for i in range(0,MAX_SECONDS_TO_WAIT):
# # Check if child process has terminated. Set and return returncode attribute. Otherwise, returns None.
# p_isql.poll()
# if p_isql.returncode is None:
# # A None value indicates that the process has not terminated yet.
# time.sleep(1)
# if i < MAX_SECONDS_TO_WAIT-1:
# continue
# else:
# f_isql_log.write( '\\nISQL process %d hangs for %d seconds and is forcedly killed.' % (p_isql.pid, MAX_SECONDS_TO_WAIT) )
# p_isql.terminate()
#
# flush_and_close(f_isql_log)
#
# with open(f_isql_log.name,'r') as f:
# for line in f:
# if line:
# print(line)
#
# cleanup((f_isql_sql,f_isql_log))
#
# shutil.move( kholder_bak, kholder_cur)
#
# # ::: NOTE ::: We have to change DB state to full shutdown and bring it back online
# # in order to prevent "Object in use" while fbtest will try to drop this DB
# #####################################
# runProgram('gfix',[dsn,'-shut','full','-force','0'])
# runProgram('gfix',[dsn,'-online'])
#
#
#---
act_1 = python_act('db_1', substitutions=substitutions_1)
expected_stdout_1 = """
MON$REMOTE_PROTOCOL WNET
MON$REMOTE_PROTOCOL XNET
"""
@pytest.mark.version('>=3.0.4')
@pytest.mark.platform('Windows')
@pytest.mark.xfail
def test_1(act_1: Action):
pytest.fail("Test not IMPLEMENTED")
| 6,390 | 2,204 |
from mednickdb_pyapi.mednickdb_pyapi import MednickAPI
import pytest
import time
import pandas as pd
import pprint
pp = pprint.PrettyPrinter(indent=4)
server_address = 'http://saclab.ss.uci.edu:8000'
file_update_time = 2
data_update_time = 10
data_upload_working = False
def dict_issubset(superset, subset, show_diffs=False):
if show_diffs:
return [item for item in subset.items() if item not in superset.items()]
return all(item in superset.items() for item in subset.items())
def pytest_namespace():
return {'usecase_1_filedata': None}
def test_clear_test_study():
"""
Clear all data and files with the studyid of "TEST". This esentually refreshes the database for new testing.
"""
med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
fids = med_api.extract_var(med_api.get_files(studyid='TEST'), '_id')
if fids:
for fid in fids:
med_api.delete_file(fid, delete_all_versions=True)
med_api.delete_data_from_single_file(fid)
fids2 = med_api.extract_var(med_api.get_files(studyid='TEST'),'_id')
assert fid not in fids2
assert (fids2 == [])
deleted_fids = med_api.extract_var(med_api.get_deleted_files(),'_id')
assert all([dfid in deleted_fids for dfid in fids])
med_api.delete_data(studyid='TEST')
assert len(med_api.get_data(studyid='TEST', format='nested_dict')) == 0 #TODO after clearing up sourceid bug
@pytest.mark.dependency(['test_clear_test_study'])
def test_usecase_1():
"""runs usecase one from the mednickdb_usecase document (fid=)"""
#a)
med_api = MednickAPI(server_address, 'test_ra_account@uci.edu', 'pass1234')
file_info_post = {
'fileformat':'eeg',
'studyid':'TEST',
'versionid':1,
'subjectid':1,
'visitid':1,
'sessionid':1,
'filetype':'sleep_eeg',
}
file_data_real = file_info_post.copy()
with open('testfiles/sleepfile1.edf','rb') as sleepfile:
file_info_returned = med_api.upload_file(fileobject=sleepfile, **file_info_post)
with open('testfiles/sleepfile1.edf', 'rb') as sleepfile:
downloaded_sleepfile = med_api.download_file(file_info_returned['_id'])
assert (downloaded_sleepfile == sleepfile.read())
# b)
time.sleep(file_update_time) # give db 5 seconds to update
file_info_get = med_api.get_file_by_fid(file_info_returned['_id'])
file_info_post.update({'filename': 'sleepfile1.edf', 'filedir': 'uploads/TEST/1/1/1/1/sleep_eeg/'})
assert dict_issubset(file_info_get, file_info_post)
time.sleep(data_update_time-file_update_time) # give db 5 seconds to update
file_datas = med_api.get_data_from_single_file(filetype='sleep_eeg', fid=file_info_returned['_id'], format='flat_dict')
file_data_real.pop('fileformat')
file_data_real.pop('filetype')
file_data_real.update({'sleep_eeg.eeg_nchan': 3, 'sleep_eeg.eeg_sfreq':128, 'sleep_eeg.eeg_meas_date':1041380737000, 'sleep_eeg.eeg_ch_names': ['C3A2', 'C4A1', 'ECG']}) # add actual data in file. # TODO add all
pytest.usecase_1_filedata = file_data_real
pytest.usecase_1_filename_version = file_info_get['filename_version']
assert(any([dict_issubset(file_data, file_data_real) for file_data in file_datas])), "Is pyparse running? (and working)"
@pytest.mark.dependency(['test_usecase_1'])
def test_usecase_2():
# a)
file_info_post = {'filetype':'demographics',
'fileformat':'tabular',
'studyid':'TEST',
'versionid':1}
med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
with open('testfiles/TEST_Demographics.xlsx', 'rb') as demofile:
# b)
file_info = med_api.upload_file(fileobject=demofile, **file_info_post)
fid = file_info['_id']
downloaded_demo = med_api.download_file(fid)
with open('testfiles/TEST_Demographics.xlsx', 'rb') as demofile:
assert downloaded_demo == demofile.read()
# c)
time.sleep(file_update_time) # Give file db 5 seconds to update
file_info_post.update({'filename': 'TEST_Demographics.xlsx', 'filedir': 'uploads/TEST/1/demographics/'})
file_info_get = med_api.get_file_by_fid(fid)
assert dict_issubset(file_info_get, file_info_post)
# d)
time.sleep(data_update_time-file_update_time) # Give data db 50 seconds to update
data_rows = med_api.get_data(studyid='TEST', versionid=1, format='flat_dict')
correct_row1 = {'studyid': 'TEST', 'versionid': 1, 'subjectid': 1,
'demographics.age': 23, 'demographics.sex': 'F', 'demographics.bmi': 23}
correct_row1.update(pytest.usecase_1_filedata)
correct_row2 = {'studyid': 'TEST', 'versionid': 1, 'subjectid': 2,
'demographics.age': 19, 'demographics.sex': 'M', 'demographics.bmi': 20}
correct_rows = [correct_row1, correct_row2]
pytest.usecase_2_row1 = correct_row1
pytest.usecase_2_row2 = correct_row2
pytest.usecase_2_filename_version = file_info_get['filename_version']
for correct_row in correct_rows:
assert any([dict_issubset(data_row, correct_row) for data_row in data_rows]), "demographics data downloaded does not match expected"
# e)
data_sleep_eeg = med_api.get_data(studyid='TEST', versionid=1, filetype='sleep_eeg')[0] #FIXME will fail here until filetype is query-able
assert dict_issubset(data_sleep_eeg, pytest.usecase_1_filedata), "sleep data downloaded does not match what was uploaded in usecase 1"
@pytest.mark.dependency(['test_usecase_2'])
def test_usecase_3():
# a)
med_api = MednickAPI(server_address, 'test_ra_account@uci.edu', 'Pass1234')
fid_for_manual_upload = med_api.extract_var(med_api.get_files(studyid='TEST'), '_id')[0] # get a random fid
data_post = {'studyid': 'TEST',
'filetype': 'memtesta',
'data': {'accuracy': 0.9},
'versionid': 1,
'subjectid': 2,
'visitid': 1,
'sessionid': 1}
med_api.upload_data(**data_post, fid=fid_for_manual_upload)
# b)
time.sleep(5) # Give db 5 seconds to update
correct_filename_versions = [pytest.usecase_1_filename_version, pytest.usecase_2_filename_version]
filename_versions = med_api.extract_var(med_api.get_files(studyid='TEST', versionid=1), 'filename_version')
assert all([fid in correct_filename_versions for fid in filename_versions]), "Missing expected filename versions from two previous usecases"
# c)
time.sleep(5) # Give db 5 seconds to update
data_rows = med_api.get_data(studyid='TEST', versionid=1, format='flat_dict')
correct_row_2 = pytest.usecase_2_row2.copy()
correct_row_2.update({'memtesta.accuracy': 0.9, 'visitid': 1})
pytest.usecase_3_row2 = correct_row_2
correct_rows = [pytest.usecase_2_row1, correct_row_2]
for correct_row in correct_rows:
assert any([dict_issubset(data_row, correct_row) for data_row in data_rows])
@pytest.mark.dependency(['test_usecase_3'])
def test_usecase_4():
# a)
med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
# b) uploading some scorefiles
file_info1_post = {
'fileformat':'sleep_scoring',
'studyid':'TEST',
'versionid':1,
'subjectid':2,
'visitid':1,
'sessionid':1,
'filetype':'sleep_scoring'
}
with open('testfiles/scorefile1.mat', 'rb') as scorefile1:
fid1 = med_api.upload_file(scorefile1,
**file_info1_post)
file_info2_post = file_info1_post.copy()
file_info2_post.update({'visitid':2})
with open('testfiles/scorefile2.mat', 'rb') as scorefile2:
fid2 = med_api.upload_file(scorefile2,
**file_info2_post)
scorefile1_data = {'sleep_scoring.epochstage': [-1, -1, -1, 0, 0, 0, 0, 0, 0, 0],
'sleep_scoring.epochoffset': [0, 30, 60, 90, 120, 150, 180, 210, 240, 270],
'sleep_scoring.starttime': 1451635302000, 'sleep_scoring.mins_in_0': 3.5, 'sleep_scoring.mins_in_1': 0,
'sleep_scoring.mins_in_2': 0, 'sleep_scoring.mins_in_3': 0, 'sleep_scoring.mins_in_4': 0,
'sleep_scoring.sleep_efficiency': 0, 'sleep_scoring.total_sleep_time': 0}
scorefile2_data = {'sleep_scoring.epochstage': [0, 0, 1, 1, 2, 2, 3, 3, 2, 2],
'sleep_scoring.epochoffset': [0, 30, 60, 90, 120, 150, 180, 210, 240, 270],
'sleep_scoring.starttime': 1451635302000, 'sleep_scoring.mins_in_0': 1, 'sleep_scoring.mins_in_1': 1,
'sleep_scoring.mins_in_2': 2, 'sleep_scoring.mins_in_3': 1, 'sleep_scoring.mins_in_4': 0,
'sleep_scoring.sleep_efficiency': 0.8, 'sleep_scoring.total_sleep_time': 4}
# c)
time.sleep(data_update_time) # Give db 50 seconds to update
data_rows = med_api.get_data(studyid='TEST', versionid=1, format='flat_dict')
correct_row_1 = pytest.usecase_2_row1.copy()
scorefile1_data.update(pytest.usecase_3_row2)
correct_row_2 = scorefile1_data
scorefile2_data.update(pytest.usecase_2_row2)
correct_row_3 = scorefile2_data
correct_rows = [correct_row_1, correct_row_2, correct_row_3]
for correct_row in correct_rows:
assert any([dict_issubset(data_row, correct_row) for data_row in data_rows])
pytest.usecase_4_row1 = correct_row_1
pytest.usecase_4_row2 = correct_row_2
pytest.usecase_4_row3 = correct_row_3
@pytest.mark.dependency(['test_usecase_4'])
def test_usecase_5():
# a)
med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
data_rows = med_api.get_data(query='studyid=TEST and data.memtesta.accuracy>=0.9', format='flat_dict')
assert any([dict_issubset(data_row, pytest.usecase_3_row2) for data_row in data_rows])
def test_get_specifiers():
med_api = MednickAPI(server_address, 'test_grad_account@uci.edu', 'Pass1234')
sids = med_api.get_unique_var_values('studyid', store='data')
assert 'TEST' in sids
vids = med_api.get_unique_var_values('versionid', studyid='TEST', store='data')
assert vids == [1]
sids = med_api.get_unique_var_values('subjectid', studyid='TEST', store='data')
assert sids == [1, 2]
vids = med_api.get_unique_var_values('visitid', studyid='TEST', store='data')
assert vids == [1, 2]
sids = med_api.get_unique_var_values('sessionid', studyid='TEST', store='data')
assert sids == [1]
filetypes = med_api.get_unique_var_values('filetype', studyid='TEST', store='data')
assert set(filetypes) == {'sleep_eeg', 'sleep_scoring', 'demographics', 'memtesta'}
| 10,799 | 4,064 |
import requests
from .enums import TransactionStatus
from .exceptions import InvalidPaymentException, SslcommerzAPIException
from .services import PayloadSchema, is_verify_sign_valid
DEFAULT_CONFIG = {
"base_url": "https://sandbox.sslcommerz.com",
"session_url": "/gwprocess/v4/api.php",
"validation_url": "/validator/api/validationserverAPI.php",
"transaction_url": "/validator/api/merchantTransIDvalidationAPI.php",
}
class SslcommerzStore:
def __init__(self, store_id, store_passwd, **kwargs):
self.id = store_id
self.credentials = dict(store_id=store_id, store_passwd=store_passwd)
self.config = {**DEFAULT_CONFIG, **kwargs}
def request(self, method, url, **kwargs):
url = self.config["base_url"] + url
return requests.request(method, url, **kwargs)
def create_session(self, **kwargs):
response = self.request(
method="POST",
url=self.config["session_url"],
data={**self.credentials, **kwargs},
)
if response.status_code != 200:
raise SslcommerzAPIException(
f"Unexpected status code: {response.status_code}"
)
response_json = response.json()
if response_json["status"] != "SUCCESS":
raise SslcommerzAPIException(f"Error: {response_json['failedreason']}")
return response_json
def validate_ipn_payload(self, payload):
try:
if not is_verify_sign_valid(
store_passwd=self.credentials["store_passwd"],
payload=payload["original"],
):
raise InvalidPaymentException("verify_sign mismatch")
if payload["status"] == TransactionStatus.VALID:
validation_response = self.validate_transaction(payload["val_id"])
if validation_response["status"] not in (
TransactionStatus.VALID,
TransactionStatus.VALIDATED,
):
raise InvalidPaymentException(
f"Payment status: {validation_response['status']}"
)
return PayloadSchema().load(validation_response)
except KeyError as key:
raise InvalidPaymentException(f"{key} is missing in payload") from key
def validate_transaction(self, val_id):
response = self.request(
method="GET",
url=self.config["validation_url"],
params=dict(**self.credentials, val_id=val_id, format="json"),
)
if response.status_code != 200:
raise SslcommerzAPIException(
f"Unexpected status code: {response.status_code}"
)
return response.json()
def query_transaction_by_sessionkey(self, sessionkey):
response = self.request(
method="GET",
url=self.config["transaction_url"],
params=dict(**self.credentials, sessionkey=sessionkey, format="json"),
)
return response.json()
def query_transaction_by_tran_id(self, tran_id):
response = self.request(
method="GET",
url=self.config["transaction_url"],
params=dict(**self.credentials, tran_id=tran_id, format="json"),
)
return response.json()
def init_refund(self, bank_tran_id, refund_amount, refund_remarks):
response = self.request(
method="GET",
url=self.config["transaction_url"],
params=dict(
**self.credentials,
bank_tran_id=bank_tran_id,
refund_amount=refund_amount,
refund_remarks=refund_remarks,
format="json",
),
)
return response.json()
def query_refund_status(self, refund_ref_id):
response = self.request(
method="GET",
url=self.config["transaction_url"],
params=dict(**self.credentials, refund_ref_id=refund_ref_id, format="json"),
)
return response.json()
| 4,078 | 1,120 |
import abc
import re
from .exceptions import FileSuffixError
from .stack import LineCounter, IndentStack
class ConverterBase(metaclass=abc.ABCMeta):
def __init__(self, src: str):
self.src = src
self.code_blocks
@property
def code_blocks(self):
'''
Aggregate code block into tuple.
A code block could be determined by intentation.
'''
indent_stack = IndentStack([''])
blankline_stack = LineCounter()
def complete_brace(indent, cur_indent):
if indent == cur_indent:
print('\n' * blankline_stack.pop(cur_indent, 0))
return
if len(indent) > len(cur_indent):
print(indent_stack.push(indent))
elif len(indent) < len(cur_indent):
print(indent_stack.pop())
else:
print('\n' * blankline_stack.pop(cur_indent, 0))
try:
complete_brace(indent, indent_stack.top)
except IndexError:
return
for line in self.src.split('\n'):
indent_match = re.match('^([ \t]+)[\S]+', line)
cur_indent = indent_stack[-1]
if indent_match:
indent = indent_match.group(1)
complete_brace(indent, cur_indent)
print(line, sep=', ')
else:
indent = None
if cur_indent:
blankline_stack.push(cur_indent)
else:
print(line)
del line
# handle eol
print('{}}}'.format(indent_stack[-2]))
| 1,636 | 448 |
# import the necessary packages
import numpy as np
import argparse
import cv2
im = cv2.imread('/var/www/test/test.jpg')
cv2.imshow("im", im)
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,127,255,0)
cv2.imshow("Thresh", thresh)
(cnts, _) = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#(cnts, _) = cv2.findContours(im.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(im,cnts,-1,(0,255,0),3)
cv2.drawContours(im,cnts,-1,(0,255,0),-1)
cv2.imshow("Image",im)
cv2.waitKey(0)
| 541 | 271 |
"""empty message
Revision ID: 783682226c9b
Revises: b882b9ab026c
Create Date: 2019-10-19 10:07:14.923441
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "783682226c9b"
down_revision = "b882b9ab026c"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"prices", "internal_product_id", existing_type=sa.INTEGER(), type_=sa.String(), existing_nullable=True
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"prices", "internal_product_id", existing_type=sa.String(), type_=sa.INTEGER(), existing_nullable=True
)
# ### end Alembic commands ###
| 810 | 317 |
# Generated by Django 3.1.4 on 2021-02-04 05:25
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('pages', '0001_add_homepage'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.core.fields.StreamField([('title', wagtail.core.blocks.CharBlock(form_classname='title', required=False)), ('paragraph', wagtail.core.blocks.TextBlock(form_classname='full')), ('rich', wagtail.core.blocks.RichTextBlock(form_classname='full'))]),
),
]
| 645 | 221 |
#
# PySNMP MIB module HPN-ICF-FCOE-MODE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPN-ICF-FCOE-MODE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:26:43 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
hpnicfCommon, = mibBuilder.importSymbols("HPN-ICF-OID-MIB", "hpnicfCommon")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, Integer32, IpAddress, Bits, ModuleIdentity, Counter32, Unsigned32, TimeTicks, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, NotificationType, Gauge32, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Integer32", "IpAddress", "Bits", "ModuleIdentity", "Counter32", "Unsigned32", "TimeTicks", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "NotificationType", "Gauge32", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hpnicfFcoeMode = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 135))
hpnicfFcoeMode.setRevisions(('2013-03-08 11:00',))
if mibBuilder.loadTexts: hpnicfFcoeMode.setLastUpdated('201303081100Z')
if mibBuilder.loadTexts: hpnicfFcoeMode.setOrganization('')
hpnicfFcoeModeMibObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 135, 1))
hpnicfFcoeModeCfgMode = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 135, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfFcoeModeCfgMode.setStatus('current')
hpnicfFcoeModeCfgLastResult = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 135, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("success", 1), ("noLicence", 2), ("needReset", 3), ("unknownFault", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfFcoeModeCfgLastResult.setStatus('current')
mibBuilder.exportSymbols("HPN-ICF-FCOE-MODE-MIB", PYSNMP_MODULE_ID=hpnicfFcoeMode, hpnicfFcoeModeCfgLastResult=hpnicfFcoeModeCfgLastResult, hpnicfFcoeModeMibObjects=hpnicfFcoeModeMibObjects, hpnicfFcoeMode=hpnicfFcoeMode, hpnicfFcoeModeCfgMode=hpnicfFcoeModeCfgMode)
| 2,800 | 1,211 |
import argparse
import pandas as pd
from config_builder import build_config
from utils.helpers import load_ymal
def app_dep_graph(yml):
nodes = []
source = []
target = []
print(yml)
for svc_name, service in yml["services"].items():
print(service)
nodes.append(svc_name)
for dep in service["dependencies"].values():
source.append(svc_name)
target.append(dep["name"])
edges = pd.DataFrame({'source': source,
'target': target, })
return edges
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run Kuberentes simulation')
parser.add_argument(
'--config_file_name',
type=str,
default="yamls/configurations/simple_run.yml",
help='A configuration file that describe the test'
)
args = parser.parse_args()
config_file_name = args.config_file_name
config = build_config(config_file_name)
apps = config["simulation_ymals"]["apps"]
for app_file in apps:
app_name = app_file.split("/")[-1].split(".")[0]
yml = load_ymal(app_file)
graph = app_dep_graph(yml)
graph.to_csv("{}.csv".format(app_name))
| 1,212 | 375 |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
# generate url for our django admin page
from django.urls import reverse
# allow us to make test requests to our app
class AdminSiteTests(TestCase):
# the set up test is a function run before every test that we run
def setUp(self):
# our setUp is going to consist of creating our test Client
# add a new user that we can use to test
# and make sure the user is loged into our client
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin.@gmail.com',
password='password123'
)
# Use the client help function that allows us to log a user in
# with the Django auth
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email="test@gmail.com",
password="password123",
name="Test"
)
# test the users are listed in our django admin
def test_users_listed(self):
"""Test that users are listed on user page"""
# generate a url for our listed user page
url = reverse('admin:core_user_changelist')
# perform http get on the url
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
# admin/core/user/id
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 1,896 | 538 |
import turtle
def circle():
while turtle.heading() < 359:
turtle.forward(1)
turtle.left(1)
turtle.left(1)
def poly(r, teta):
n = 360 / teta
while n > 0:
n = n - 1
turtle.forward(r)
turtle.left(teta)
n = 10
while n > 0:
n = n - 1
poly(10, 30)
turtle.forward(40)
turtle.done()
| 348 | 168 |
from ckan.plugins import toolkit
from ckan.lib.i18n import get_lang
import ckan.lib.i18n as i18n
from ckan.common import config, c
import ckan.logic as logic
import ckan.lib.base as base
import ckan.model as model
from ckan.model.package import Package
from ckan.lib.dictization.model_dictize import group_list_dictize
import logging
get_action = toolkit.get_action
NotFound = logic.NotFound
abort = base.abort
log = logging.getLogger(__name__)
def call_toolkit_function(fn, args, kwargs):
return getattr(toolkit,fn)(*args, **kwargs)
def add_locale_to_source(kwargs, locale):
copy = kwargs.copy()
source = copy.get('data-module-source', None)
if source:
copy.update({'data-module-source': source + '_' + locale})
return copy
return copy
def get_current_lang():
return get_lang()
def scheming_field_only_default_required(field, lang):
if field and field.get('only_default_lang_required') and lang == config.get('ckan.locale_default', 'en'):
return True
return False
def get_current_date():
import datetime
return datetime.date.today().strftime("%d.%m.%Y")
def get_package_groups_by_type(package_id, group_type):
context = {'model': model, 'session': model.Session,
'for_view': True, 'use_cache': False}
group_list = []
data_dict = {
'all_fields': True,
'include_extras': True,
'type': group_type
}
groups = logic.get_action('group_list')(context, data_dict)
try:
pkg_obj = Package.get(package_id)
pkg_group_ids = set(group['id'] for group in group_list_dictize(pkg_obj.get_groups(group_type, None), context))
group_list = [group
for group in groups if
group['id'] in pkg_group_ids]
except (NotFound):
abort(404, _('Dataset not found'))
return group_list
_LOCALE_ALIASES = {'en_GB': 'en'}
def get_lang_prefix():
language = i18n.get_lang()
if language in _LOCALE_ALIASES:
language = _LOCALE_ALIASES[language]
return language
def get_translated_or_default_locale(data_dict, field):
language = i18n.get_lang()
if language in _LOCALE_ALIASES:
language = _LOCALE_ALIASES[language]
try:
value = data_dict[field+'_translated'][language]
if value:
return value
else:
return data_dict[field+'_translated'][config.get('ckan.locale_default', 'en')]
except KeyError:
return data_dict.get(field, '')
def show_qa():
from ckan.plugins import plugin_loaded
if plugin_loaded('qa'):
return True
return False
def scheming_category_list(args):
from ckan.logic import NotFound
# FIXME: sometimes this might return 0 categories if in development
try:
context = {'model': model, 'session': model.Session, 'ignore_auth': True}
group_ids = get_action('group_list')(context, {})
except NotFound:
return None
else:
category_list = []
# filter groups to those user is allowed to edit
group_authz = get_action('group_list_authz')({
'model': model, 'session': model.Session, 'user': c.user
}, {})
user_group_ids = set(group[u'name'] for group in group_authz)
group_ids = [group for group in group_ids if group in user_group_ids]
for group in group_ids:
try:
context = {'model': model, 'session': model.Session, 'ignore_auth': True}
group_details = get_action('group_show')(context, {'id': group})
except Exception as e:
log.error(e)
return None
category_list.append({
"value": group,
"label": group_details.get('title')
})
return category_list
def check_group_selected(val, data):
log.info(val)
log.info(data)
if filter(lambda x: x['name'] == val, data):
return True
return False
def get_field_from_schema(schema, field_name):
field = next(field for field in schema.get('dataset_fields', []) if field.get('field_name') == field_name)
return field
| 4,187 | 1,322 |
# function that heals the player
import variables as var
from helpers.message import message
def cast_heal():
# heal the player
if var.player.fighter.hp == var.player.fighter.max_hp:
message('You are already at full health.', 'red')
return 'cancelled'
message('Your wounds start to feel better!', 'light violet')
var.player.fighter.heal(var.HEAL_AMOUNT)
| 389 | 126 |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
all
from viabel import all_bounds
from viabel.vb import black_box_klvi, black_box_chivi, adagrad_optimize
from utils import Timer
from psis import psislw
## Display bounds information ##
def print_bounds(results):
print('Bounds on...')
print(' 2-Wasserstein {:.3g}'.format(results['W2']))
print(' 2-divergence {:.3g}'.format(results['d2']))
print(' mean error {:.3g}'.format(results['mean_error']))
print(' stdev error {:.3g}'.format(results['std_error']))
print(' sqrt cov error {:.3g}'.format(np.sqrt(results['cov_error'])))
print(' cov error {:.3g}'.format(results['cov_error']))
## Check approximation accuracy ##
def check_accuracy(true_mean, true_cov, approx_mean, approx_cov, verbose=False,
method=None):
true_std = np.sqrt(np.diag(true_cov))
approx_std = np.sqrt(np.diag(approx_cov))
results = dict(mean_error=np.linalg.norm(true_mean - approx_mean),
cov_error_2=np.linalg.norm(true_cov - approx_cov, ord=2),
cov_norm_2=np.linalg.norm(true_cov, ord=2),
cov_error_nuc=np.linalg.norm(true_cov - approx_cov, ord='nuc'),
cov_norm_nuc=np.linalg.norm(true_cov, ord='nuc'),
std_error=np.linalg.norm(true_std - approx_std),
rel_std_error=np.linalg.norm(approx_std/true_std - 1),
)
if method is not None:
results['method'] = method
if verbose:
print('mean =', approx_mean)
print('stdevs =', approx_std)
print()
print('mean error = {:.3g}'.format(results['mean_error']))
print('stdev error = {:.3g}'.format(results['std_error']))
print('||cov error||_2^{{1/2}} = {:.3g}'.format(np.sqrt(results['cov_error_2'])))
print('||true cov||_2^{{1/2}} = {:.3g}'.format(np.sqrt(results['cov_norm_2'])))
return results
def check_approx_accuracy(var_family, var_param, true_mean, true_cov,
verbose=False, name=None):
return check_accuracy(true_mean, true_cov,
*var_family.mean_and_cov(var_param),
verbose, name)
## Convenience functions and PSIS ##
def get_samples_and_log_weights(logdensity, var_family, var_param, n_samples):
samples = var_family.sample(var_param, n_samples)
log_weights = logdensity(samples) - var_family.logdensity(samples, var_param)
return samples, log_weights
def psis_correction(logdensity, var_family, var_param, n_samples):
samples, log_weights = get_samples_and_log_weights(logdensity, var_family,
var_param, n_samples)
smoothed_log_weights, khat = psislw(log_weights)
return samples.T, smoothed_log_weights, khat
def improve_with_psis(logdensity, var_family, var_param, n_samples,
true_mean, true_cov, transform=None, verbose=False):
samples, slw, khat = psis_correction(logdensity, var_family,
var_param, n_samples)
if verbose:
print('khat = {:.3g}'.format(khat))
print()
if transform is not None:
samples = transform(samples)
slw -= np.max(slw)
wts = np.exp(slw)
wts /= np.sum(wts)
approx_mean = np.sum(wts[np.newaxis,:]*samples, axis=1)
approx_cov = np.cov(samples, aweights=wts, ddof=0)
res = check_accuracy(true_mean, true_cov, approx_mean, approx_cov, verbose)
res['khat'] = khat
return res, approx_mean, approx_cov
## Plotting ##
def plot_approx_and_exact_contours(logdensity, var_family, var_param,
xlim=[-10,10], ylim=[-3, 3],
cmap2='Reds', savepath=None):
xlist = np.linspace(*xlim, 100)
ylist = np.linspace(*ylim, 100)
X, Y = np.meshgrid(xlist, ylist)
XY = np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T
zs = np.exp(logdensity(XY))
Z = zs.reshape(X.shape)
zsapprox = np.exp(var_family.logdensity(XY, var_param))
Zapprox = zsapprox.reshape(X.shape)
plt.contour(X, Y, Z, cmap='Greys', linestyles='solid')
plt.contour(X, Y, Zapprox, cmap=cmap2, linestyles='solid')
if savepath is not None:
plt.savefig(savepath, bbox_inches='tight')
plt.show()
def plot_history(history, B=None, ylabel=None):
if B is None:
B = min(500, history.size//10)
window = np.ones(B)/B
smoothed_history = np.convolve(history, window, 'valid')
plt.plot(smoothed_history)
yscale = 'log' if np.all(smoothed_history > 0) else 'linear'
plt.yscale(yscale)
if ylabel is not None:
plt.ylabel(ylabel)
plt.xlabel('iteration')
plt.show()
def plot_dist_to_opt_param(var_param_history, opt_param):
plt.plot(np.linalg.norm(var_param_history - opt_param[np.newaxis,:], axis=1))
plt.title('iteration vs distance to optimal parameter')
plt.xlabel('iteration')
plt.ylabel('distance')
sns.despine()
plt.show()
## Run experiment with both KLVI and CHIVI ##
def _optimize_and_check_results(logdensity, var_family, objective_and_grad,
init_var_param, true_mean, true_cov,
plot_contours, ylabel, contour_kws=dict(),
elbo=None, n_iters=5000,
bound_w2=True, verbose=False, use_psis=True,
n_psis_samples=1000000, **kwargs):
opt_param, var_param_history, value_history, _ = \
adagrad_optimize(n_iters, objective_and_grad, init_var_param, **kwargs)
plot_dist_to_opt_param(var_param_history, opt_param)
accuracy_results = check_approx_accuracy(var_family, opt_param,
true_mean, true_cov, verbose);
other_results = dict(opt_param=opt_param,
var_param_history=var_param_history,
value_history=value_history)
if bound_w2 not in [False, None]:
if bound_w2 is True:
n_samples = 1000000
else:
n_samples = bound_w2
print()
with Timer('Computing CUBO and ELBO with {} samples'.format(n_samples)):
_, log_weights = get_samples_and_log_weights(
logdensity, var_family, opt_param, n_samples)
var_dist_cov = var_family.mean_and_cov(opt_param)[1]
moment_bound_fn = lambda p: var_family.pth_moment(p, opt_param)
other_results.update(all_bounds(log_weights,
q_var=var_dist_cov,
moment_bound_fn=moment_bound_fn,
log_norm_bound=elbo))
if verbose:
print()
print_bounds(other_results)
if plot_contours:
plot_approx_and_exact_contours(logdensity, var_family, opt_param,
**contour_kws)
if use_psis:
print()
print('Results with PSIS correction')
print('----------------------------')
other_results['psis_results'], _, _ = \
improve_with_psis(logdensity, var_family, opt_param, n_psis_samples,
true_mean, true_cov, verbose=verbose)
return accuracy_results, other_results
def run_experiment(logdensity, var_family, init_param, true_mean, true_cov,
kl_n_samples=100, chivi_n_samples=500,
alpha=2, **kwargs):
klvi = black_box_klvi(var_family, logdensity, kl_n_samples)
chivi = black_box_chivi(alpha, var_family, logdensity, chivi_n_samples)
dim = true_mean.size
plot_contours = dim == 2
if plot_contours:
plot_approx_and_exact_contours(logdensity, var_family, init_param,
**kwargs.get('contour_kws', dict()))
print('|--------------|')
print('| KLVI |')
print('|--------------|', flush=True)
kl_results, other_kl_results = _optimize_and_check_results(
logdensity, var_family, klvi, init_param,
true_mean, true_cov, plot_contours, '-ELBO', **kwargs)
kl_results['method'] = 'KLVI'
print()
print('|---------------|')
print('| CHIVI |')
print('|---------------|', flush=True)
elbo = other_kl_results['log_norm_bound']
chivi_results, other_chivi_results = _optimize_and_check_results(
logdensity, var_family, chivi, init_param, true_mean, true_cov,
plot_contours, 'CUBO', elbo=elbo, **kwargs)
chivi_results['method'] = 'CHIVI'
return klvi, chivi, kl_results, chivi_results, other_kl_results, other_chivi_results
| 8,794 | 2,996 |
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from nova.objects.request_spec import RequestSpec
from nova.scheduler.host_manager import HostState
from oslo_log import log as logging
import nova.conf
from nova.scheduler.filters import BaseHostFilter
from latency_meter.server import start_server_on_other_thread
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
LOG_TAG = "GLLS"
class NetworkAwareFilter(BaseHostFilter):
def __init__(self,
latency_filter=None,
bandwidth_filter=None
):
"""
:type bandwidth_filter: BandwidthFilter
:type latency_filter: LatencyFilter
"""
super(NetworkAwareFilter, self).__init__()
if latency_filter is not None:
self.latency_filter = latency_filter
else:
self.latency_filter = create_default_filter_backend()
if latency_filter is not None:
self.bandwidth_filter = bandwidth_filter
else:
self.bandwidth_filter = create_default_bandwidth_filter()
start_server_on_other_thread(LOG)
def host_passes(self, host_state, spec_obj):
"""
:type host_state: HostState
:type spec_obj: RequestSpec
"""
latency_passes = self.latency_filter.host_passes(host_state.host, hints=spec_obj.scheduler_hints)
bandwidth_passes = self.bandwidth_filter.host_passes(host_state.host, self.get_bandwidth_hints(spec_obj))
LOG.info(
"GLLS " + host_state.host + " Latency passes: " + str(
latency_passes) + ", Bandwidth passes: " + str(bandwidth_passes) + " with hints: " +
str(spec_obj.scheduler_hints))
return latency_passes and bandwidth_passes
def get_bandwidth_hints(self, spec_obj):
hints = []
if 'bandwidth_to' in spec_obj.scheduler_hints:
bandwidth_pairs = [hint.split(',') for hint in spec_obj.scheduler_hints['bandwidth_to']]
hints = [BandwidthHint(float(hint[0]), hint[1].strip()) for hint in bandwidth_pairs]
return hints
class HostLatencyService():
__metaclass__ = ABCMeta
@abstractmethod
def get_latencies_from_host(self, host):
pass
class HostBandwidthService():
__metaclass__ = ABCMeta
@abstractmethod
def get_bandwidth_from_host(self, host):
pass
class StaticHostLatencyService(HostLatencyService, HostBandwidthService):
latencies = {
'node-2': {
'node-2': 0,
'node-3': 30,
'node-4': 100
},
'node-3': {
'node-2': 30,
'node-3': 0,
'node-4': 45
},
'node-4': {
'node-2': 100,
'node-3': 45,
'node-4': 0
}
}
bandwidth = {
'node-2': {
'node-2': 1000000,
'node-3': 100000,
'node-4': 15000,
},
'node-3': {
'node-2': 100000,
'node-3': 1000000,
'node-4': 50000,
},
'node-4': {
'node-2': 15000,
'node-3': 50000,
'node-4': 1000000,
},
}
def get_latencies_from_host(self, host):
return self.latencies[host]
def get_bandwidth_from_host(self, host):
return self.bandwidth[host]
class LatencyFilter():
def __init__(self, measurements):
"""
:type measurements: HostLatencyService
"""
self.measurements = measurements
def host_passes(self, hostname, hints):
if 'latency_to' in hints:
latency_expectations = [hint.split(',') for hint in hints['latency_to']]
self._log("Scheduling with expectations: " + str(latency_expectations))
if len(latency_expectations) > 0:
latencies_to_host = self.measurements.get_latencies_from_host(host=hostname)
self._log("Got latency list: " + str(latencies_to_host))
for expected_latency, remote_host in latency_expectations:
if remote_host not in latencies_to_host:
self._log("Node " + str(remote_host) + " was not in nodes for " + hostname)
return False
latency_to_target = latencies_to_host[remote_host]
self._log("Checking node " + remote_host + " expected latency: " + str(
expected_latency) + " got latency " + str(latency_to_target))
if latency_to_target < float(expected_latency):
continue
else:
return False
return True
return True
return True
def _log(self, log):
LOG.info(LOG_TAG + " " + str(log))
class BandwidthHint():
def __init__(self, bandwidth_kbps, to_host):
self.bandwidth_kbps = bandwidth_kbps
self.to_host = to_host
def __eq__(self, other):
if isinstance(other, BandwidthHint):
return other.bandwidth_kbps == self.bandwidth_kbps and other.to_host == self.to_host
else:
return False
class BandwidthFilter():
def __init__(self, measurements):
"""
:type measurements: HostBandwidthService
"""
self.measurements = measurements
def host_passes(self, hostname, hints):
"""
:type hostname: str
:type hints: list[BandwidthHint]
"""
if len(hints) > 0:
bandwidths = self.measurements.get_bandwidth_from_host(hostname)
LOG.info(LOG_TAG + " BANDWIDTH to host " + hostname + " -" + str(bandwidths))
for hint in hints:
if hint.to_host not in bandwidths:
return False
bandwidth_to_host = bandwidths[hint.to_host]
if bandwidth_to_host >= hint.bandwidth_kbps:
continue
else:
return False
return True
return True
def create_default_filter_backend():
return LatencyFilter(StaticHostLatencyService())
def create_default_bandwidth_filter():
return BandwidthFilter(StaticHostLatencyService())
| 6,269 | 1,949 |
import logging
import traceback
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.db import transaction
from django_rq import job
from . import email
from .r10_spreadsheet_converter import Region10SpreadsheetConverter
from contracts.loaders.region_10 import Region10Loader
from contracts.models import Contract, BulkUploadContractSource
contracts_logger = logging.getLogger('contracts')
@transaction.atomic
def _process_bulk_upload(upload_source):
file = ContentFile(upload_source.original_file)
converter = Region10SpreadsheetConverter(file)
contracts_logger.info("Deleting contract objects related to region 10.")
# Delete existing contracts identified by the same
# procurement_center
Contract.objects.filter(
upload_source__procurement_center=BulkUploadContractSource.REGION_10
).delete()
contracts = []
bad_rows = []
contracts_logger.info("Generating new contract objects.")
for row in converter.convert_next():
try:
c = Region10Loader.make_contract(row, upload_source=upload_source)
contracts.append(c)
except (ValueError, ValidationError) as e:
bad_rows.append(row)
contracts_logger.info("Saving new contract objects.")
# Save new contracts
Contract.objects.bulk_create(contracts)
contracts_logger.info("Updating full-text search indexes.")
# Update search field on Contract models
Contract._fts_manager.update_search_field()
# Update the upload_source
upload_source.has_been_loaded = True
upload_source.save()
return len(contracts), len(bad_rows)
@job
def process_bulk_upload_and_send_email(upload_source_id):
contracts_logger.info(
"Starting bulk upload processing (pk=%d)." % upload_source_id
)
upload_source = BulkUploadContractSource.objects.get(
pk=upload_source_id
)
try:
num_contracts, num_bad_rows = _process_bulk_upload(upload_source)
email.bulk_upload_succeeded(upload_source, num_contracts, num_bad_rows)
except:
contracts_logger.exception(
'An exception occurred during bulk upload processing '
'(pk=%d).' % upload_source_id
)
tb = traceback.format_exc()
email.bulk_upload_failed(upload_source, tb)
contracts_logger.info(
"Ending bulk upload processing (pk=%d)." % upload_source_id
)
| 2,456 | 739 |
import itertools
from django.conf import settings
from django.dispatch import receiver
from django.core.signals import setting_changed
from api import models
_USER_UNREAD_GRACE_INTERVAL = None
_USER_UNREAD_GRACE_MIN_COUNT = None
@receiver(setting_changed)
def _load_global_settings(*args, **kwargs):
global _USER_UNREAD_GRACE_INTERVAL
global _USER_UNREAD_GRACE_MIN_COUNT
_USER_UNREAD_GRACE_INTERVAL = settings.USER_UNREAD_GRACE_INTERVAL
_USER_UNREAD_GRACE_MIN_COUNT = settings.USER_UNREAD_GRACE_MIN_COUNT
_load_global_settings()
def mark_archived_entries(read_mappings_generator, batch_size=768):
while True:
batch = list(itertools.islice(read_mappings_generator, batch_size))
if len(batch) < 1:
break
models.ReadFeedEntryUserMapping.objects.bulk_create(
batch, batch_size=batch_size, ignore_conflicts=True)
def read_mapping_generator_fn(feed, user):
grace_start = user.created_at + _USER_UNREAD_GRACE_INTERVAL
feed_entries = None
if models.FeedEntry.objects.filter(feed=feed, published_at__gte=grace_start).count() > _USER_UNREAD_GRACE_MIN_COUNT:
feed_entries = models.FeedEntry.objects.filter(
feed=feed, published_at__lt=grace_start)
else:
feed_entries = models.FeedEntry.objects.filter(feed=feed).order_by(
'published_at')[_USER_UNREAD_GRACE_MIN_COUNT:]
for feed_entry in feed_entries.iterator():
yield models.ReadFeedEntryUserMapping(feed_entry=feed_entry, user=user)
| 1,525 | 502 |