hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf444e38a6e54e7f70639c0425148b1acc2b373 | 1,471 | py | Python | openstack/tests/unit/workflow/test_workflow.py | horion/openstacksdk | cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3 | [
"Apache-2.0"
] | 99 | 2018-03-28T15:41:45.000Z | 2022-01-23T17:22:13.000Z | openstack/tests/unit/workflow/test_workflow.py | horion/openstacksdk | cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3 | [
"Apache-2.0"
] | 5 | 2018-05-25T16:54:23.000Z | 2021-11-21T02:27:16.000Z | openstack/tests/unit/workflow/test_workflow.py | horion/openstacksdk | cbb0e12e1dc944847f2ba0e67bf35b9c7a67b3a3 | [
"Apache-2.0"
] | 104 | 2018-04-06T14:33:54.000Z | 2022-03-01T01:58:09.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.tests.unit import base
from openstack.workflow.v2 import workflow
FAKE = {
'scope': 'private',
'id': 'ffaed25e-46f5-4089-8e20-b3b4722fd597',
'definition': 'workflow_def',
}
class TestWorkflow(base.TestCase):
def setUp(self):
super(TestWorkflow, self).setUp()
def test_basic(self):
sot = workflow.Workflow()
self.assertEqual('workflow', sot.resource_key)
self.assertEqual('workflows', sot.resources_key)
self.assertEqual('/workflows', sot.base_path)
self.assertTrue(sot.allow_fetch)
self.assertTrue(sot.allow_list)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_delete)
def test_instantiate(self):
sot = workflow.Workflow(**FAKE)
self.assertEqual(FAKE['id'], sot.id)
self.assertEqual(FAKE['scope'], sot.scope)
self.assertEqual(FAKE['definition'], sot.definition)
| 33.431818 | 75 | 0.702243 |
acf4453b77600ba2a53dc2526751ac973d12c609 | 1,941 | py | Python | demosim.py | enacom/TechShot | 5c3a61be47ff9d8bff93dc72fe59c6d24281f462 | [
"MIT"
] | null | null | null | demosim.py | enacom/TechShot | 5c3a61be47ff9d8bff93dc72fe59c6d24281f462 | [
"MIT"
] | null | null | null | demosim.py | enacom/TechShot | 5c3a61be47ff9d8bff93dc72fe59c6d24281f462 | [
"MIT"
] | 1 | 2022-03-24T22:07:58.000Z | 2022-03-24T22:07:58.000Z | from des_model import DES_model
from des_simulator import DES_simulator
import numpy as np
import matplotlib.pyplot as plt
# build model
d = np.array([[320.0e3]]) # port-terminal distance (m)
tu = np.array([[4 * 3600.0]]) # unloading time (s)
tl = np.array([[8 * 3600.0]]) # loading time (s)
v = np.array([40 / 3.6]) # train speed (m/s)
L = np.array([5.0e6]) # train load (kg)
ntmax = 8 # maximum number of trains
tc = tu[0][0] + tl[0][0] + 2 * d[0][0] / v[0]
ntm = tc / tl[0][0]
# simulate model
T = 50 * 24 * 3600 # time horizon (s)
Pn = [0] # numerical productivity (kg/s)
Pa = [0] # analytical productivity (kg/s)
tq = [0] # queue time (s)
n = [0] # number of trains
for i in range(1, ntmax):
# model
nt = np.array([i], dtype=int) # train count of each model
model = DES_model(d, tu, tl, nt, v, L)
# simulation
simulator = DES_simulator()
simulator.simulate(model, T)
Pt, P, t = model.productivity() # [kg/s], [kg], [s]
tq.append(model.queue_time())
# log
n.append(i)
Pn.append(Pt[-1])
Pa.append(min(nt[0], ntm) * L[0] / tc)
# line command output
print('\n Numerical productivity {:.0f}'.format(Pt[-1] * 3.6))
print('Analytical productivity {:.0f}'.format(Pa[-1] * 3.6))
# graphical output
if False:
hf, ha = plt.subplots()
plt.plot(t / 3600, Pt * 3.6)
plt.xlabel('time (hours)')
plt.ylabel('productivity (ton/hour)')
plt.title('{} trains'.format(i))
# graphical ouptut
hf, ha = plt.subplots()
plt.plot(np.array(n), np.array(Pn) * 3.6, label='numerical')
plt.plot(np.array(n), np.array(Pa) * 3.6, label='analytical')
plt.xlabel('number of trains')
plt.ylabel('productivity (ton/hour)')
plt.title('{} trains'.format(i))
plt.legend()
hf, ha = plt.subplots()
plt.plot(np.array(n), np.array(tq) / 3600)
plt.xlabel('number of trains')
plt.ylabel('queue time (hours)')
plt.title('{} trains'.format(i))
plt.show() | 29.409091 | 66 | 0.608964 |
acf44552231ba5c417c5492a45f1205dc18cc4f4 | 2,001 | py | Python | medium-show-and-tell-caption-generator-master/medium_show_and_tell_caption_generator/inference1.py | mansa53/captions | 5340e24d55dda1264e02973967b984f32f056f85 | [
"MIT"
] | null | null | null | medium-show-and-tell-caption-generator-master/medium_show_and_tell_caption_generator/inference1.py | mansa53/captions | 5340e24d55dda1264e02973967b984f32f056f85 | [
"MIT"
] | null | null | null | medium-show-and-tell-caption-generator-master/medium_show_and_tell_caption_generator/inference1.py | mansa53/captions | 5340e24d55dda1264e02973967b984f32f056f85 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import logging
import math
import os
# Import the required module for text
# to speech conversion
import tensorflow as tf
from medium_show_and_tell_caption_generator.caption_generator import CaptionGenerator
from medium_show_and_tell_caption_generator.model import ShowAndTellModel
from medium_show_and_tell_caption_generator.vocabulary import Vocabulary
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("model_path", "", "Model graph def path")
tf.flags.DEFINE_string("vocab_file", "", "Text file containing the vocabulary.")
tf.flags.DEFINE_string("input_files", "",
"File pattern or comma-separated list of file patterns "
"of image files.")
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main(_):
model = ShowAndTellModel(FLAGS.model_path)
vocab = Vocabulary(FLAGS.vocab_file)
filenames = _load_filenames()
generator = CaptionGenerator(model, vocab)
for filename in filenames:
with tf.gfile.GFile(filename, "rb") as f:
image = f.read()
captions = generator.beam_search(image)
print("Captions for image %s:" % os.path.basename(filename))
for i, caption in enumerate(captions):
# Ignore begin and end tokens <S> and </S>.
sentence = [vocab.id_to_token(w) for w in caption.sentence[1:-1]]
sentence = " ".join(sentence)
print(" %d) %s" % (i, sentence,))
def _load_filenames():
filenames = []
for file_pattern in FLAGS.input_files.split(","):
filenames.extend(tf.gfile.Glob(file_pattern))
logger.info("Running caption generation on %d files matching %s",
len(filenames), FLAGS.input_files)
return filenames
if __name__ == "__main__":
tf.app.run()
#
#print >> f, 'Filename:', filename # Python 2.x
| 26.68 | 85 | 0.682659 |
acf445554eeec87eddcba06304551eaab122c274 | 1,098 | py | Python | Python/Python For Absolute Beginner/84 Pickle module.py | omkarsutar1255/Python-Data | 169d0c54b23d9dd5a7f1aea41ab385121c3b3c63 | [
"CC-BY-3.0"
] | null | null | null | Python/Python For Absolute Beginner/84 Pickle module.py | omkarsutar1255/Python-Data | 169d0c54b23d9dd5a7f1aea41ab385121c3b3c63 | [
"CC-BY-3.0"
] | null | null | null | Python/Python For Absolute Beginner/84 Pickle module.py | omkarsutar1255/Python-Data | 169d0c54b23d9dd5a7f1aea41ab385121c3b3c63 | [
"CC-BY-3.0"
] | null | null | null | import pickle
# Pickling a python object
cars = ["Audi", "BMW", "Maruti Suzuki", "Harryti Tuzuki"] # it can be list, tupple
file = "picklefile.pkl" # file name
fileobj = open(file, 'wb') # open file in binary mode
pickle.dump(cars, fileobj) # pickle function
fileobj.close() # close file
file = "picklefile.pkl"
fileob = open(file, 'rb')
print(type(fileob)) # class bufferredReader type
picklefile = pickle.load(fileob) # use load for not read file
print(picklefile)
print(type(picklefile))
file = "picklefile.pkl" # file name
fileob = open(file, 'rb') # open file in binary mode
print(type(fileob)) # class bufferredReader type
f = fileob.read() # read file
print(type(f)) # class bytes type
picklefile = pickle.loads(f) # use loads 's' stands for string
print(picklefile) # pickled file
print(type(picklefile)) # class list
| 40.666667 | 87 | 0.546448 |
acf44643a3c759d7c74fe036f3152a62cfee1990 | 6,038 | py | Python | src/elpde2.py | songqsh/foo1 | 536bf44cc4fb43a3ac0f2a64695f619ac7526651 | [
"MIT"
] | 1 | 2020-03-14T03:04:24.000Z | 2020-03-14T03:04:24.000Z | src/elpde2.py | songqsh/foo1 | 536bf44cc4fb43a3ac0f2a64695f619ac7526651 | [
"MIT"
] | 1 | 2019-07-01T20:35:39.000Z | 2019-07-04T22:07:50.000Z | src/elpde2.py | songqsh/foo1 | 536bf44cc4fb43a3ac0f2a64695f619ac7526651 | [
"MIT"
] | 2 | 2019-08-25T00:50:05.000Z | 2020-02-25T20:06:32.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 26 16:26:05 2019
@author: songqsh
"""
import ipdb
import time
import torch
import torch.nn as nn
import itertools
import matplotlib.pyplot as plt
def deep_iter(*shape):
iters = (range(i) for i in shape)
return itertools.product(*iters)
class Pde:
def __init__(
self,
n_dim_ = 2,
lam_ = 0.,
verbose=True
):
self.n_dim_ = n_dim_
self.lam_ = lam_
if verbose:
print('>>>>Elliptic Linear PDE with '+str(n_dim_) + '-dim')
def drift(self, s):
return [0.]*self.n_dim_
Pde.drift = drift
def run(self, s):
return float(-self.n_dim_)
Pde.run = run
def term(self, s):
return sum(map(lambda a: (a-.5)**2, s))
Pde.term = term
def is_interior(self, s): #domain
return all(map(lambda a: 0.<a<1., s))
Pde.is_interior = is_interior
def exact_soln(self, s):
return sum(map(lambda a: (a-.5)**2, s))
Pde.exact_soln= exact_soln
###########MDP
class Mdp:
def __init__(
self,
pde,
n_mesh_ = 8,
method ='cfd'
):
###### index domain
self.pde = pde
self.n_mesh_ = n_mesh_
self.method = method
self.n_dim_ = pde.n_dim_
self.v_shape_ = tuple([n_mesh_ + 1]*self.n_dim_)
self.v_size_ = (n_mesh_+1)**self.n_dim_
self.h_ = 1./n_mesh_
print(
'>>>>MDP with ' + str(self.n_dim_)
+ '-dim ' + str(self.n_mesh_) + ' mesh num'
)
def i2s(self, ix):
return [x * self.h_ for x in ix]
def is_interior(self, ix):
return all(map(lambda a: 0<a<self.n_mesh_, ix))
#####transition
#return:
# a float of discount rate
# a float of run cost
# a list of next indices
# a list of prob
def step(self, ix):
s = self.i2s(ix)
b = self.pde.drift(s)
ix_list = list(ix)
discount_rate = 1; run_h = 0; ix_next = []; pr_next= []
if self.method=='cfd':
discount_rate = (
self.n_dim_/(self.n_dim_+self.pde.lam_*(self.h_**2))
)
run_h = self.pde.run(s)*self.h_**2/self.n_dim_
for i in range(self.n_dim_):
ix1 = ix_list; ix1[i]+=1; ix_next += [tuple(ix1),]
pr1 = (1+2.*self.h_*b[i])/self.n_dim_/2.0; pr_next += [pr1,]
for i in range(self.n_dim_):
ix1 = ix_list; ix1[i]-=1; ix_next += [tuple(ix1),]
pr1 = (1-2.*self.h_*b[i])/self.n_dim_/2.0; pr_next += [pr1,]
elif self.method=='ufd':
b_plus = [max(a,0.) for a in b]
b_minus = [min(-a,0.) for a in b]
c_ = self.n_dim_ + self.h_*(sum(b_plus)+sum(b_minus))
discount_rate= c_/(c_+self.h_**2*self.pde.lam_)
run_h = self.pde.run(s)*self.h_**2/c_
for i in range(self.n_dim_):
ix1 = ix_list; ix1[i]+=1; ix_next += [tuple(ix1),]
pr1 = (1+2.*self.h_*b_plus[i])/c_; pr_next += [pr1,]
for i in range(self.n_dim_):
ix1 = ix_list; ix1[i]-=1; ix_next += [tuple(ix1),]
pr1 = (1+2.*self.h_*b_minus[i])/c_; pr_next += [pr1,]
return discount_rate, run_h, ix_next, pr_next
def term_h(self, ix):
return self.pde.term(self.i2s(ix))
####Bellman equation and total loss
#v is a function with torch tensor as input
def bellman(self, ix, v):
s = self.i2s(ix)
disc, run_h, ix_next, pr_next = self.step(ix)
lhs = v(torch.FloatTensor(s)); rhs = 0.
#ipdb.set_trace()
if self.is_interior(ix):
rhs += run_h
for ix1, pr1 in zip(ix_next, pr_next):
rhs += pr1*v(torch.FloatTensor(self.i2s(ix1)))
rhs *= disc
else:
rhs = self.term_h(ix)
return (rhs - lhs)
def solver(mdp, n_epoch = 500):
######### nn for value
# Linear regression model
value = nn.Sequential(
nn.Linear(mdp.n_dim_, 2*mdp.n_dim_+2),
nn.ReLU(),
nn.Linear(2*mdp.n_dim_+2, 1),
)
print(value)
# optimizer
optimizer = torch.optim.SGD(value.parameters(), lr=0.01, momentum = .8)
#loss
def tot_loss():
out = 0.
for ix in deep_iter(*mdp.v_shape_):
out += mdp.bellman(ix,value)**2
return out#/mdp.v_size_
print_n = 10
epoch_per_print= int(n_epoch/print_n)
start_time = time.time()
for epoch in range(n_epoch):
#ipdb.set_trace()
loss = tot_loss() #forward pass
#backward propogation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % epoch_per_print == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(
epoch+1, n_epoch, loss.item()))
end_time = time.time()
print('>>>time elapsed is: ' + str(end_time - start_time))
return value
#####test
if __name__=="__main__":
p = Pde(n_dim_=1); m = Mdp(p, n_mesh_=8, method='cfd')
value = solver(m, n_epoch=100)
######check solution
err =0
for ix1 in deep_iter(*m.v_shape_):
s1 = m.i2s(ix1)
v1 = value(torch.FloatTensor(s1)).item()
exact_v1 =p.exact_soln(s1)
err1 = v1-exact_v1
err += err1**2
#print(ix1, i2s(ix1), v1, exact_soln(s1),err1)
err = err/m.v_size_
print('>>>L2-error-norm: '+str(err))
if p.n_dim_==1:
cod_x = []; cod_y=[]; cod_y_pred = []
for ix1 in deep_iter(*m.v_shape_):
s1 = m.i2s(ix1); cod_x += [s1,]
v1 = value(torch.FloatTensor(s1)).item(); cod_y_pred += [v1,]
exact_v1 =p.exact_soln(s1); cod_y += [exact_v1,]
plt.plot(cod_x, cod_y, cod_x, cod_y_pred)
print(cod_y_pred)
| 27.198198 | 76 | 0.515402 |
acf4464eda9c8de9d2395a1da788bab41868390b | 3,372 | py | Python | data_analysis/ch04-time-series-visualizer/time_series_visualizer.py | chaudha4/python-projects | baba3235069b7d6b084f28904f0662c043762175 | [
"MIT"
] | null | null | null | data_analysis/ch04-time-series-visualizer/time_series_visualizer.py | chaudha4/python-projects | baba3235069b7d6b084f28904f0662c043762175 | [
"MIT"
] | 3 | 2021-11-23T22:19:19.000Z | 2022-03-12T00:52:34.000Z | data_analysis/ch04-time-series-visualizer/time_series_visualizer.py | chaudha4/python-projects | baba3235069b7d6b084f28904f0662c043762175 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
'''
Visualize time series data using a line chart, bar chart, and box plots. Using Pandas, matplotlib, and seaborn
to visualize a dataset containing the number of page views each day on the freeCodeCamp.org forum from 2016-05-09
to 2019-12-03. The data visualizations will help you understand the patterns in visits and identify yearly
and monthly growth.
'''
# Import data (Make sure to parse dates. Consider setting index column to 'date'.)
currDir = ""
for aa in __file__.split("/")[:-1]:
currDir = currDir + aa + "/"
try:
print("Reading file ", currDir + "fcc-forum-pageviews.csv")
df = pd.read_csv(currDir + "fcc-forum-pageviews.csv",
parse_dates=True,
index_col=0)
except:
print("\n\nCannot Open file\n")
raise
#print(df.info())
# Clean data
df = df [(df.value >= df.value.quantile(0.025)) & (df.value <= df.value.quantile(0.975))]
def draw_line_plot():
# Draw line plot
# Set the plot size
plt.figure(figsize=(14, 8))
# Create a figure and a set of subplots(1X1).
fig, axs = plt.subplots(1, 1)
# Set the title
plt.title('Daily freeCodeCamp Forum Page Views 5/2016-12/2019')
# Now draw it.
sns.lineplot(x=df.index, y="value", data=df, ax=axs)
plt.xlabel('Date')
plt.ylabel('Page Views')
# Save image and return fig (don't change this part)
fig.savefig('line_plot.png')
return fig
def draw_bar_plot():
# Copy and modify data for monthly bar plot
df2 = df.copy()
df2 = df2.resample('M').mean()
df2["year"] = df2.index.year
df2["month"] = df2.index.month
#print(df2.info())
mm = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
df2.month = df2.month.apply(lambda x: mm[x-1])
#print(df2.head())
#print(df2.info())
# Set the plot size
plt.figure(figsize=(14, 8))
# Create a figure and a set of subplots(1X1).
fig, axs = plt.subplots(1, 1)
sns.barplot(x=df2["year"], y=df2.value, data=df2, ax=axs, hue=df2.month, hue_order=mm,
edgecolor=".2", palette="rainbow")
plt.xlabel('Years')
plt.ylabel('Average Page Views')
# Save image and return fig (don't change this part)
fig.savefig('bar_plot.png')
return fig
def draw_box_plot():
# Prepare data for box plots (this part is done!)
df_box = df.copy()
df_box.reset_index(inplace=True)
df_box['year'] = [d.year for d in df_box.date]
df_box['month'] = [d.strftime('%b') for d in df_box.date]
mm = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# Draw box plots (using Seaborn)
plt.figure(figsize=(24, 18))
fig, ax = plt.subplots(1, 2)
sns.boxplot(x="year", y="value", data=df_box, ax=ax[0])
sns.boxplot(x="month", y="value", data=df_box, ax=ax[1], order=mm)
ax[0].set_xlabel('Year')
ax[0].set_ylabel('Page Views')
ax[0].set_title("Year-wise Box Plot (Trend)")
ax[1].set_xlabel('Month')
ax[1].set_ylabel('Page Views')
ax[1].set_title("Month-wise Box Plot (Seasonality)")
# Save image and return fig (don't change this part)
fig.savefig('box_plot.png')
return fig
| 28.1 | 131 | 0.624555 |
acf446d4288352fbd07f7629993756476e96759a | 2,690 | py | Python | spider_movie/movie_main.py | BenjaminChiu/Spider_MovieHome | d93a4038fb2d8d586f38c3514119d309aed4103b | [
"MIT"
] | 4 | 2021-10-30T08:19:30.000Z | 2022-02-23T05:47:15.000Z | spider_movie/movie_main.py | BenjaminChiu/Spider_MovieHome | d93a4038fb2d8d586f38c3514119d309aed4103b | [
"MIT"
] | null | null | null | spider_movie/movie_main.py | BenjaminChiu/Spider_MovieHome | d93a4038fb2d8d586f38c3514119d309aed4103b | [
"MIT"
] | 1 | 2021-09-16T12:10:35.000Z | 2021-09-16T12:10:35.000Z | # import sqlite3
from util import cfg
from my_thread.task_queue import TaskQueue
from my_thread.ThreadOne import ThreadOne
from my_thread.ThreadTwo import ThreadTwo
# cfg.py为自定义的项目总配置文件
'''
使用子线程请求,拿到response.text
借助适配规则,在本文件中的逻辑控制,完成任务
这样只需要修改适配规则,与逻辑控制就能完成任务,达到通用爬虫的目的
'''
def start_spider():
# 确定起始页面 ,终止页面
# dytt_Lastest.getMaxsize()
LASTEST_MOIVE_TOTAL_SUM = dytt_Lastest.getMaxsize(cfg.WEBSITE + 'w.asp?p=1&f=3&l=t')
# dyttlastest = dytt_Lastest('http://www.idyjy.com/w.asp?p=1&f=3&l=t', 'p=', '&f', LASTEST_MOIVE_TOTAL_SUM)
dyttlastest = dytt_Lastest(cfg.WEBSITE + 'w.asp?p=1&f=3&l=t', 'p=', '&f', LASTEST_MOIVE_TOTAL_SUM)
pagelist = dyttlastest.getPageUrlList()
# ======将 pageList加入队列1,因为队列线程安全=========
pageQueue = TaskQueue.getQueue_1()
for item in pagelist:
pageQueue.put(item, 3) # timeout=3,等待3秒
# =======用线程请求pageQueue(pageList)(注意队列枯竭),将请求结果存入pageInfoList中=========
for i in range(cfg.THREAD_SUM):
thread_one = ThreadOne(i, pageQueue)
# thread_one.run() # thread.run()只能启动一个主线程
thread_one.start()
pageQueue.join() # 使用新api,这个队列完事了。搭配Queue.task_done()
# 监听thread_one是否干完活
while True:
# 逻辑生成的主页链接 枯竭(queue1枯竭)
if TaskQueue.isQueue_1Empty():
break
# # 队列2满,10页满
# elif TaskQueue.isQueue_2Full():
# break
else:
pass
# =====================================请求 pageList 结束=====================================
# 33333-取出itemQueue 存入数据库
service = EntityService('movie_home_210212')
# ===222222===请求 pageInfoList(MidQueue) 中的信息,存入itemQueue中
for i in range(cfg.THREAD_SUM):
thread_two = ThreadTwo(TaskQueue.getQueue_2(), i) # 为什么会从queue_2中提取数据,因为在thread_one中已将数据加入到了queue_2
thread_two.start()
# 爬取计数
count = 1
while True:
# 队列2为空,即爬取完成,将剩余数据添加到数据库,并关闭数据库连接
if TaskQueue.isQueue_2Empty():
service.finalSpider()
# 队列枯竭,关闭数据库连接
service.shutDownDB()
break
# 队列3满了,为避免内存溢出,立即将队列中的数据添加到数据库
elif TaskQueue.isQueue_3Full():
service.doTable()
service.finalSpider()
print("当前分析页面的叠加数:" + str(count * 200)) # 200:因为设置了队列3的上限个数为200
count = count + 1
else:
pass
# ====================请求 pageInfoList 结束======================
# 主函数 入口
if __name__ == '__main__':
# read_proxy_json() 读取代理
# 使用一个session 来支持所有request
start_spider()
# start_spider() 队列1
# start_spider() 队列2 这样当队列1中有内容时,就开始请求。不用等队列1完事了才开始第二阶段。队列1中的用完后,即释放内存
# if queue.join()
# session.close()
| 28.617021 | 111 | 0.604833 |
acf44703ba60c701a30e598c54e9d3dd23eb60c0 | 9,863 | py | Python | src/etm/_old/data_fomc.py | jm4474/FOMCTextAnalysis | 0a039d9b197f487d8ba8c5d230b587c48cf865f6 | [
"MIT"
] | 6 | 2020-07-03T23:39:50.000Z | 2022-03-30T07:55:23.000Z | src/etm/_old/data_fomc.py | jm4474/FOMCTextAnalysis | 0a039d9b197f487d8ba8c5d230b587c48cf865f6 | [
"MIT"
] | null | null | null | src/etm/_old/data_fomc.py | jm4474/FOMCTextAnalysis | 0a039d9b197f487d8ba8c5d230b587c48cf865f6 | [
"MIT"
] | 12 | 2019-12-10T13:34:21.000Z | 2022-01-24T16:39:15.000Z | from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import pickle
import random
from scipy import sparse
import itertools
from scipy.io import savemat, loadmat
import os
import pandas as pd
import argparse
# Maximum / minimum document frequency
max_df = 1.0
min_df = 10 # choose desired value for min_df
parser = argparse.ArgumentParser(description='The Embedded Topic Model')
parser.add_argument('--dataset', type=str, default='both_full', help='data source in {bluebook, transcipt, both_subsampled} -- or anything else for both_full')
args = parser.parse_args()
# Read stopwords
with open('stops.txt', 'r') as f:
stops = f.read().split('\n')
# Read data
print('reading text file...')
data_file = '../../analysis/python/output/lda_dataset.csv'
#with open(data_file, 'r') as f:
#docs = f.readlines()
docs = pd.read_csv(data_file)
print(docs.shape)
if args.dataset=="bluebook":
docs = docs.loc[~docs['FOMC_Section'].isin(['2.0'])]
print(docs.shape)
docs = docs.loc[docs['content'].str.contains(' ')]
if args.dataset=="transcript":
docs = docs.loc[docs['FOMC_Section'].isin(['2.0'])]
print(docs.shape)
docs = docs.loc[docs['content'].str.contains(' ')]
if args.dataset=="both_subsampled":
tmp1 = docs.loc[docs['FOMC_Section'].isin(['2.0'])]
tmp2 = docs.loc[~docs['FOMC_Section'].isin(['2.0'])]
tmp1 = tmp1.loc[tmp1['content'].str.contains(' ')]
tmp2 = tmp2.loc[tmp2['content'].str.contains(' ')]
tmp1 = tmp1.sample(tmp2.shape[0])
tmp1 = tmp1.append(tmp2, ignore_index=True)
docs = tmp1
print(docs.shape)
docs.to_csv("data_{}.csv".format(args.dataset), index=False)
docs = list(docs['content'])
docs = [d for d in docs if len(d.split(" "))>2]
docs = [d.lower().replace('alternative a ', 'alternative_a ').replace('alternative b ', 'alternative_b ').replace('alternative c ', 'alternative_c ') \
for d in docs]
print(len(docs))
# Create count vectorizer
print('counting document frequency of words...')
cvectorizer = CountVectorizer(min_df=min_df, max_df=max_df, stop_words=None)
cvz = cvectorizer.fit_transform(docs).sign()
# Get vocabulary
print('building the vocabulary...')
sum_counts = cvz.sum(axis=0)
v_size = sum_counts.shape[1]
sum_counts_np = np.zeros(v_size, dtype=int)
for v in range(v_size):
sum_counts_np[v] = sum_counts[0,v]
word2id = dict([(w, cvectorizer.vocabulary_.get(w)) for w in cvectorizer.vocabulary_])
id2word = dict([(cvectorizer.vocabulary_.get(w), w) for w in cvectorizer.vocabulary_])
del cvectorizer
print(' initial vocabulary size: {}'.format(v_size))
# Sort elements in vocabulary
idx_sort = np.argsort(sum_counts_np)
vocab_aux = [id2word[idx_sort[cc]] for cc in range(v_size)]
# Filter out stopwords (if any)
vocab_aux = [w for w in vocab_aux if w not in stops]
print(' vocabulary size after removing stopwords from list: {}'.format(len(vocab_aux)))
print(' vocabulary after removing stopwords: {}'.format(len(vocab_aux)))
# Create dictionary and inverse dictionary
vocab = vocab_aux
del vocab_aux
word2id = dict([(w, j) for j, w in enumerate(vocab)])
id2word = dict([(j, w) for j, w in enumerate(vocab)])
for i in word2id.keys():
print(i)
# Split in train/test/valid
print('tokenizing documents and splitting into train/test/valid...')
num_docs = cvz.shape[0]
trSize = int(np.floor(0.85*num_docs))
tsSize = int(np.floor(0.10*num_docs))
vaSize = int(num_docs - trSize - tsSize)
del cvz
idx_permute = np.random.permutation(num_docs).astype(int)
# Remove words not in train_data
vocab = list(set([w for idx_d in range(trSize) for w in docs[idx_permute[idx_d]].split() if w in word2id]))
word2id = dict([(w, j) for j, w in enumerate(vocab)])
id2word = dict([(j, w) for j, w in enumerate(vocab)])
print(' vocabulary after removing words not in train: {}'.format(len(vocab)))
docs_tr = [[word2id[w] for w in docs[idx_permute[idx_d]].split() if w in word2id] for idx_d in range(trSize)]
docs_ts = [[word2id[w] for w in docs[idx_permute[idx_d+trSize]].split() if w in word2id] for idx_d in range(tsSize)]
docs_va = [[word2id[w] for w in docs[idx_permute[idx_d+trSize+tsSize]].split() if w in word2id] for idx_d in range(vaSize)]
del docs
print(' number of documents (train): {} [this should be equal to {}]'.format(len(docs_tr), trSize))
print(' number of documents (test): {} [this should be equal to {}]'.format(len(docs_ts), tsSize))
print(' number of documents (valid): {} [this should be equal to {}]'.format(len(docs_va), vaSize))
# Remove empty documents
print('removing empty documents...')
def remove_empty(in_docs):
return [doc for doc in in_docs if doc!=[]]
docs_tr = remove_empty(docs_tr)
docs_ts = remove_empty(docs_ts)
docs_va = remove_empty(docs_va)
# Remove test documents with length=1
docs_ts = [doc for doc in docs_ts if len(doc)>1]
# Split test set in 2 halves
print('splitting test documents in 2 halves...')
docs_ts_h1 = [[w for i,w in enumerate(doc) if i<=len(doc)/2.0-1] for doc in docs_ts]
docs_ts_h2 = [[w for i,w in enumerate(doc) if i>len(doc)/2.0-1] for doc in docs_ts]
# Getting lists of words and doc_indices
print('creating lists of words...')
def create_list_words(in_docs):
return [x for y in in_docs for x in y]
words_tr = create_list_words(docs_tr)
words_ts = create_list_words(docs_ts)
words_ts_h1 = create_list_words(docs_ts_h1)
words_ts_h2 = create_list_words(docs_ts_h2)
words_va = create_list_words(docs_va)
print(' len(words_tr): ', len(words_tr))
print(' len(words_ts): ', len(words_ts))
print(' len(words_ts_h1): ', len(words_ts_h1))
print(' len(words_ts_h2): ', len(words_ts_h2))
print(' len(words_va): ', len(words_va))
# Get doc indices
print('getting doc indices...')
def create_doc_indices(in_docs):
aux = [[j for i in range(len(doc))] for j, doc in enumerate(in_docs)]
return [int(x) for y in aux for x in y]
doc_indices_tr = create_doc_indices(docs_tr)
doc_indices_ts = create_doc_indices(docs_ts)
doc_indices_ts_h1 = create_doc_indices(docs_ts_h1)
doc_indices_ts_h2 = create_doc_indices(docs_ts_h2)
doc_indices_va = create_doc_indices(docs_va)
print(' len(np.unique(doc_indices_tr)): {} [this should be {}]'.format(len(np.unique(doc_indices_tr)), len(docs_tr)))
print(' len(np.unique(doc_indices_ts)): {} [this should be {}]'.format(len(np.unique(doc_indices_ts)), len(docs_ts)))
print(' len(np.unique(doc_indices_ts_h1)): {} [this should be {}]'.format(len(np.unique(doc_indices_ts_h1)), len(docs_ts_h1)))
print(' len(np.unique(doc_indices_ts_h2)): {} [this should be {}]'.format(len(np.unique(doc_indices_ts_h2)), len(docs_ts_h2)))
print(' len(np.unique(doc_indices_va)): {} [this should be {}]'.format(len(np.unique(doc_indices_va)), len(docs_va)))
# Number of documents in each set
n_docs_tr = len(docs_tr)
n_docs_ts = len(docs_ts)
n_docs_ts_h1 = len(docs_ts_h1)
n_docs_ts_h2 = len(docs_ts_h2)
n_docs_va = len(docs_va)
# Remove unused variables
del docs_tr
del docs_ts
del docs_ts_h1
del docs_ts_h2
del docs_va
# Create bow representation
print('creating bow representation...')
def create_bow(doc_indices, words, n_docs, vocab_size):
return sparse.coo_matrix(([1]*len(doc_indices),(doc_indices, words)), shape=(n_docs, vocab_size)).tocsr()
bow_tr = create_bow(doc_indices_tr, words_tr, n_docs_tr, len(vocab))
bow_ts = create_bow(doc_indices_ts, words_ts, n_docs_ts, len(vocab))
bow_ts_h1 = create_bow(doc_indices_ts_h1, words_ts_h1, n_docs_ts_h1, len(vocab))
bow_ts_h2 = create_bow(doc_indices_ts_h2, words_ts_h2, n_docs_ts_h2, len(vocab))
bow_va = create_bow(doc_indices_va, words_va, n_docs_va, len(vocab))
del words_tr
del words_ts
del words_ts_h1
del words_ts_h2
del words_va
del doc_indices_tr
del doc_indices_ts
del doc_indices_ts_h1
del doc_indices_ts_h2
del doc_indices_va
# Save vocabulary to file
path_save = '../data/fomc/{}/min_df_'.format(args.dataset) + str(min_df) + '/'
if not os.path.isdir(path_save):
os.system('mkdir -p ' + path_save)
with open(path_save + 'vocab.pkl', 'wb') as f:
pickle.dump(vocab, f)
del vocab
# Split bow intro token/value pairs
print('splitting bow intro token/value pairs and saving to disk...')
def split_bow(bow_in, n_docs):
indices = [[w for w in bow_in[doc,:].indices] for doc in range(n_docs)]
counts = [[c for c in bow_in[doc,:].data] for doc in range(n_docs)]
return indices, counts
bow_tr_tokens, bow_tr_counts = split_bow(bow_tr, n_docs_tr)
savemat(path_save + 'bow_tr_tokens.mat', {'tokens': bow_tr_tokens}, do_compression=True)
savemat(path_save + 'bow_tr_counts.mat', {'counts': bow_tr_counts}, do_compression=True)
del bow_tr
del bow_tr_tokens
del bow_tr_counts
bow_ts_tokens, bow_ts_counts = split_bow(bow_ts, n_docs_ts)
savemat(path_save + 'bow_ts_tokens.mat', {'tokens': bow_ts_tokens}, do_compression=True)
savemat(path_save + 'bow_ts_counts.mat', {'counts': bow_ts_counts}, do_compression=True)
del bow_ts
del bow_ts_tokens
del bow_ts_counts
bow_ts_h1_tokens, bow_ts_h1_counts = split_bow(bow_ts_h1, n_docs_ts_h1)
savemat(path_save + 'bow_ts_h1_tokens.mat', {'tokens': bow_ts_h1_tokens}, do_compression=True)
savemat(path_save + 'bow_ts_h1_counts.mat', {'counts': bow_ts_h1_counts}, do_compression=True)
del bow_ts_h1
del bow_ts_h1_tokens
del bow_ts_h1_counts
bow_ts_h2_tokens, bow_ts_h2_counts = split_bow(bow_ts_h2, n_docs_ts_h2)
savemat(path_save + 'bow_ts_h2_tokens.mat', {'tokens': bow_ts_h2_tokens}, do_compression=True)
savemat(path_save + 'bow_ts_h2_counts.mat', {'counts': bow_ts_h2_counts}, do_compression=True)
del bow_ts_h2
del bow_ts_h2_tokens
del bow_ts_h2_counts
bow_va_tokens, bow_va_counts = split_bow(bow_va, n_docs_va)
savemat(path_save + 'bow_va_tokens.mat', {'tokens': bow_va_tokens}, do_compression=True)
savemat(path_save + 'bow_va_counts.mat', {'counts': bow_va_counts}, do_compression=True)
del bow_va
del bow_va_tokens
del bow_va_counts
print('Data ready !!')
print('*************')
| 36.802239 | 159 | 0.735882 |
acf447d8d5db2a78eb70ba30d7fb8be05f7417a4 | 89 | py | Python | edx_rest_framework_extensions/__init__.py | CredoEducation/edx-drf-extensions | 853fb5ec6392d57693008e1a1c1620b79cb8343b | [
"Apache-2.0"
] | null | null | null | edx_rest_framework_extensions/__init__.py | CredoEducation/edx-drf-extensions | 853fb5ec6392d57693008e1a1c1620b79cb8343b | [
"Apache-2.0"
] | null | null | null | edx_rest_framework_extensions/__init__.py | CredoEducation/edx-drf-extensions | 853fb5ec6392d57693008e1a1c1620b79cb8343b | [
"Apache-2.0"
] | null | null | null | """ edx Django REST Framework extensions. """
__version__ = '6.5.0' # pragma: no cover
| 22.25 | 45 | 0.662921 |
acf447fbd465081ed6719ef1dbeb76dc5a511ecd | 2,418 | py | Python | DeepLearning AI/Introduction to TF/Code/Week 4/test/main.py | Ace5584/Machine-Learning-Notes | 8d721895165833f6ea2ac3c75326ec5ed29111eb | [
"Apache-2.0"
] | 2 | 2021-10-01T07:28:58.000Z | 2022-01-23T00:20:34.000Z | DeepLearning AI/Introduction to TF/Code/Week 4/test/main.py | Ace5584/Machine-Learning-Notes | 8d721895165833f6ea2ac3c75326ec5ed29111eb | [
"Apache-2.0"
] | null | null | null | DeepLearning AI/Introduction to TF/Code/Week 4/test/main.py | Ace5584/Machine-Learning-Notes | 8d721895165833f6ea2ac3c75326ec5ed29111eb | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import os
from os import path, getcwd, chdir
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import RMSprop
# GRADED FUNCTION: train_happy_sad_model
def train_happy_sad_model():
# Please write your code only where you are indicated.
# please do not remove # model fitting inline comments.
DESIRED_ACCURACY = 0.999
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, log={}):
if log.get('accuracy') > DESIRED_ACCURACY:
print('Reached 99.9% accuracy so cancelling training!')
model.stop_training = True
callbacks = myCallback()
# This Code Block should Define and Compile the Model. Please assume the images are 150 X 150 in your implementation.
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(40, activation='relu'),
tf.keras.layers.Dense(1, 'sigmoid')
])
model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['accuracy'])
# This code block should create an instance of an ImageDataGenerator called train_datagen
# And a train_generator by calling train_datagen.flow_from_directory
train_datagen = ImageDataGenerator(rescale=1./255)
# Please use a target_size of 150 X 150.
train_generator = train_datagen.flow_from_directory(
'C:/Users/Alex Lai.DESKTOP-AJOHRHM/Desktop/Deep Learning AI course/Week 4/test/happy-or-sad',
target_size=(150, 150), batch_size=10, class_mode='binary'
)
# Expected output: 'Found 80 images belonging to 2 classes'
# This code block should call model.fit_generator and train for
# a number of epochs.
# model fitting
history = model.fit_generator(train_generator, steps_per_epoch=8, epochs=15, callbacks=[callbacks], verbose=1)
# model fitting
return history.history['accuracy'][-1]
# The Expected output: "Reached 99.9% accuracy so cancelling training!""
train_happy_sad_model()
| 36.089552 | 121 | 0.696857 |
acf44822bad57b68a7492750baa5fa03e4227cb8 | 387 | py | Python | tests/tests/wsgi.py | jaap3/django-replay | 8fd192a2f404608b97c4ff5d6236a415a62a2e0f | [
"Apache-2.0"
] | 18 | 2015-11-08T16:22:19.000Z | 2021-07-01T10:05:02.000Z | tests/tests/wsgi.py | jaap3/django-replay | 8fd192a2f404608b97c4ff5d6236a415a62a2e0f | [
"Apache-2.0"
] | 5 | 2017-10-24T07:45:40.000Z | 2021-03-08T16:58:59.000Z | tests/tests/wsgi.py | jaap3/django-replay | 8fd192a2f404608b97c4ff5d6236a415a62a2e0f | [
"Apache-2.0"
] | 5 | 2015-04-07T10:39:45.000Z | 2019-01-10T12:53:24.000Z | """
WSGI config for tests project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.settings")
application = get_wsgi_application()
| 22.764706 | 78 | 0.782946 |
acf4484ebbbab8f04f605d964c1cca5dcdfd3bec | 399 | py | Python | misclientes/migrations/0019_enterprise_pic.py | mrbrazzi/django-misclientes | 8017cc67e243e4384c3f52ae73d06e16f8fb8d5b | [
"Apache-2.0"
] | null | null | null | misclientes/migrations/0019_enterprise_pic.py | mrbrazzi/django-misclientes | 8017cc67e243e4384c3f52ae73d06e16f8fb8d5b | [
"Apache-2.0"
] | null | null | null | misclientes/migrations/0019_enterprise_pic.py | mrbrazzi/django-misclientes | 8017cc67e243e4384c3f52ae73d06e16f8fb8d5b | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.6 on 2018-10-12 18:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('misclientes', '0018_cliente_signature'),
]
operations = [
migrations.AddField(
model_name='enterprise',
name='pic',
field=models.ImageField(null=True, upload_to=''),
),
]
| 21 | 61 | 0.598997 |
acf449024bc98fdace56a9905e3afde13f6c3879 | 14,134 | py | Python | nff/nn/models/cp3d.py | jkaraguesian/NeuralForceField | 4ca4f4c7edc0ed1f70952db9e42d8ef9bbe109d8 | [
"MIT"
] | null | null | null | nff/nn/models/cp3d.py | jkaraguesian/NeuralForceField | 4ca4f4c7edc0ed1f70952db9e42d8ef9bbe109d8 | [
"MIT"
] | null | null | null | nff/nn/models/cp3d.py | jkaraguesian/NeuralForceField | 4ca4f4c7edc0ed1f70952db9e42d8ef9bbe109d8 | [
"MIT"
] | null | null | null | from torch import nn
import torch
import numpy as np
import math
from nff.data.graphs import get_bond_idx
from nff.nn.models.conformers import WeightedConformers
from nff.nn.modules import (ChemPropConv, ChemPropMsgToNode,
ChemPropInit, SchNetEdgeFilter,
CpSchNetConv)
from nff.utils.tools import make_directed
from nff.utils.confs import split_batch
REINDEX_KEYS = ["nbr_list", "bonded_nbr_list"]
class ChemProp3D(WeightedConformers):
"""
Model that uses a representation of a molecule in terms of different 3D
conformers to predict properties. The fingerprints of each conformer are
generated using a 3D extension of the ChemProp model to include distance
information. The 3D information is featurized using a SchNet Gaussian
filter.
"""
def __init__(self, modelparams):
"""
Initialize model.
Args:
modelparams (dict): dictionary of parameters for the model
Returns:
None
"""
WeightedConformers.__init__(self, modelparams)
# get rid of the atom embedding, as we'll be using graph-based
# atom features instead of atomic number embeddings
delattr(self, "atom_embed")
cp_input_layers = modelparams["cp_input_layers"]
schnet_input_layers = modelparams["schnet_input_layers"]
output_layers = modelparams["output_layers"]
# make the convolutions, the input networks W_i for both
# SchNet and ChemProp, and the output network W_o
self.W_i_cp = ChemPropInit(input_layers=cp_input_layers)
self.W_i_schnet = ChemPropInit(input_layers=schnet_input_layers)
self.convolutions = self.make_convs(modelparams)
self.W_o = ChemPropMsgToNode(output_layers=output_layers)
# dimension of the hidden SchNet distance edge ector
self.n_filters = modelparams["n_filters"]
# edge filter to convert distances to SchNet feature vectors
self.edge_filter = SchNetEdgeFilter(
cutoff=modelparams["cutoff"],
n_gaussians=modelparams["n_gaussians"],
trainable_gauss=modelparams["trainable_gauss"],
n_filters=modelparams["n_filters"],
dropout_rate=modelparams["dropout_rate"],
activation=modelparams["activation"])
def make_convs(self, modelparams):
"""
Make the convolution layers.
Args:
modelparams (dict): dictionary of parameters for the model
Returns:
convs (nn.ModuleList): list of networks for each convolution
"""
num_conv = modelparams["n_convolutions"]
modelparams.update({"n_edge_hidden": modelparams["mol_basis"]})
# call `CpSchNetConv` to make the convolution layers
convs = nn.ModuleList([ChemPropConv(**modelparams)
for _ in range(num_conv)])
return convs
def get_distance_feats(self,
batch,
xyz,
offsets,
bond_nbrs):
"""
Get distance features.
Args:
batch (dict): batched sample of species
xyz (torch.Tensor): xyz of the batch
offsets (float): periodic boundary condition offsets
bond_nbrs (torch.LongTensor): directed bonded neighbor list
Returns:
nbr_list (torch.LongTensor): directed neighbor list
distance_feats (torch.Tensor): distance-based edge features
bond_idx (torch.LongTensor): indices that map bonded atom pairs
to their location in the neighbor list.
"""
# get directed neighbor list
nbr_list, nbr_was_directed = make_directed(batch["nbr_list"])
# distances
distances = (xyz[nbr_list[:, 0]] - xyz[nbr_list[:, 1]] -
offsets).pow(2).sum(1).sqrt()[:, None]
# put through Gaussian filter and dense layer to get features
distance_feats = self.edge_filter(distances)
# get the bond indices, and adjust as necessary if the neighbor list
# wasn't directed before
if "bond_idx" in batch:
bond_idx = batch["bond_idx"]
if not nbr_was_directed:
nbr_dim = nbr_list.shape[0]
bond_idx = torch.cat([bond_idx,
bond_idx + nbr_dim // 2])
else:
bond_idx = get_bond_idx(bond_nbrs, nbr_list)
return nbr_list, distance_feats, bond_idx
def make_h(self,
batch,
r,
xyz,
offsets):
"""
Initialize the hidden edge features.
Args:
batch (dict): batched sample of species
r (torch.Tensor): initial atom features
xyz (torch.Tensor): xyz of the batch
offsets (float): periodic boundary condition offsets
Returns:
h_0 (torch.Tensor): initial hidden edge features
"""
# get the directed bond list and bond features
bond_nbrs, was_directed = make_directed(batch["bonded_nbr_list"])
bond_feats = batch["bond_features"]
device = bond_nbrs.device
# if it wasn't directed before, repeat the bond features twice
if not was_directed:
bond_feats = torch.cat([bond_feats] * 2, dim=0)
# get the distance-based edge features
nbr_list, distance_feats, bond_idx = self.get_distance_feats(
batch=batch,
xyz=xyz,
offsets=offsets,
bond_nbrs=bond_nbrs)
# combine node and bonded edge features to get the bond component
# of h_0
cp_bond_feats = self.W_i_cp(r=r,
bond_feats=bond_feats,
bond_nbrs=bond_nbrs)
h_0_bond = torch.zeros((nbr_list.shape[0], cp_bond_feats.shape[1]))
h_0_bond = h_0_bond.to(device)
h_0_bond[bond_idx] = cp_bond_feats
# combine node and distance edge features to get the schnet component
# of h_0
h_0_distance = self.W_i_schnet(r=r,
bond_feats=distance_feats,
bond_nbrs=nbr_list)
# concatenate the two together
h_0 = torch.cat([h_0_bond, h_0_distance], dim=-1)
return h_0
def convolve_sub_batch(self,
batch,
xyz=None,
xyz_grad=False):
"""
Apply the convolution layers to a sub-batch.
Args:
batch (dict): batched sample of species
xyz (torch.Tensor): xyz of the batch
xyz_grad (bool): whether to set xyz.requires_grad = True
Returns:
new_node_feats (torch.Tensor): new node features after
the convolutions.
xyz (torch.Tensor): xyz of the batch
"""
if xyz is None:
xyz = batch["nxyz"][:, 1:4]
if xyz_grad:
xyz.requires_grad = True
# get the directed neighbor list
a, _ = make_directed(batch["nbr_list"])
# get the atom features
r = batch["atom_features"]
# offsets for periodic boundary conditions
offsets = batch.get("offsets", 0)
# to deal with any shape mismatches
if hasattr(offsets, 'max') and offsets.max() == 0:
offsets = 0
# initialize hidden bond features
h_0 = self.make_h(batch=batch,
r=r,
xyz=xyz,
offsets=offsets)
h_new = h_0.clone()
# update edge features
for conv in self.convolutions:
h_new = conv(h_0=h_0,
h_new=h_new,
nbrs=a,
kj_idx=batch.get("kj_idx"),
ji_idx=batch.get("ji_idx"))
# convert back to node features
new_node_feats = self.W_o(r=r,
h=h_new,
nbrs=a)
return new_node_feats, xyz
class OnlyBondUpdateCP3D(ChemProp3D):
def __init__(self, modelparams):
"""
Initialize model.
Args:
modelparams (dict): dictionary of parameters for the model
Returns:
None
"""
WeightedConformers.__init__(self, modelparams)
input_layers = modelparams["input_layers"]
output_layers = modelparams["output_layers"]
# make the convolutions, the input network W_i, and the output
# network W_o
self.W_i = ChemPropInit(input_layers=input_layers)
self.convolutions = self.make_convs(modelparams)
self.W_o = ChemPropMsgToNode(
output_layers=output_layers)
# dimension of the hidden bond vector
self.n_bond_hidden = modelparams["n_bond_hidden"]
def make_convs(self, modelparams):
"""
Make the convolution layers.
Args:
modelparams (dict): dictionary of parameters for the model
Returns:
convs (nn.ModuleList): list of networks for each convolution
"""
num_conv = modelparams["n_convolutions"]
same_filters = modelparams["same_filters"]
# call `CpSchNetConv` to make the convolution layers
convs = nn.ModuleList([CpSchNetConv(**modelparams)
for _ in range(num_conv)])
# if you want to use the same filters for every convolution, repeat
# the initial network and delete all the others
if same_filters:
convs = nn.ModuleList([convs[0] for _ in range(num_conv)])
return convs
def make_h(self,
batch,
nbr_list,
r,
nbr_was_directed):
"""
Initialize the hidden bond features.
Args:
batch (dict): batched sample of species
nbr_list (torch.LongTensor): neighbor list
r (torch.Tensor): initial atom features
nbr_was_directed (bool): whether the old neighbor list
was directed or not
Returns:
h_0 (torch.Tensor): initial hidden bond features
bond_nbrs (torch.LongTensor): bonded neighbor list
bond_idx (torch.LongTensor): indices that map
an element of `bond_nbrs` to the corresponding
element in `nbr_list`.
"""
# get the directed bond list and bond features
bond_nbrs, was_directed = make_directed(batch["bonded_nbr_list"])
bond_feats = batch["bond_features"]
device = bond_nbrs.device
# if it wasn't directed before, repeat the bond features twice
if not was_directed:
bond_feats = torch.cat([bond_feats] * 2, dim=0)
# initialize hidden bond features
h_0_bond = self.W_i(r=r,
bond_feats=bond_feats,
bond_nbrs=bond_nbrs)
# initialize `h_0`, the features of all edges
# (including bonded ones), to zero
nbr_dim = nbr_list.shape[0]
h_0 = torch.zeros((nbr_dim, self.n_bond_hidden))
h_0 = h_0.to(device)
# set the features of bonded edges equal to the bond
# features
if "bond_idx" in batch:
bond_idx = batch["bond_idx"]
if not nbr_was_directed:
nbr_dim = nbr_list.shape[0]
bond_idx = torch.cat([bond_idx,
bond_idx + nbr_dim // 2])
else:
bond_idx = get_bond_idx(bond_nbrs, nbr_list)
bond_idx = bond_idx.to(device)
h_0[bond_idx] = h_0_bond
return h_0, bond_nbrs, bond_idx
def convolve_sub_batch(self,
batch,
xyz=None,
xyz_grad=False):
"""
Apply the convolution layers to a sub-batch.
Args:
batch (dict): batched sample of species
xyz (torch.Tensor): xyz of the batch
xyz_grad (bool): whether to set xyz.requires_grad = True
Returns:
new_node_feats (torch.Tensor): new node features after
the convolutions.
xyz (torch.Tensor): xyz of the batch
"""
if xyz is None:
xyz = batch["nxyz"][:, 1:4]
if xyz_grad:
xyz.requires_grad = True
a, nbr_was_directed = make_directed(batch["nbr_list"])
# get the atom features
r = batch["atom_features"]
offsets = batch.get("offsets", 0)
# to deal with any shape mismatches
if hasattr(offsets, "max") and offsets.max() == 0:
offsets = 0
# get the distances between neighbors
e = (xyz[a[:, 0]] - xyz[a[:, 1]] -
offsets).pow(2).sum(1).sqrt()[:, None]
# initialize hidden bond features
h_0, bond_nbrs, bond_idx = self.make_h(
batch=batch,
nbr_list=a,
r=r,
nbr_was_directed=nbr_was_directed)
h_new = h_0.clone()
# update edge features
for conv in self.convolutions:
# don't use any kj_idx or ji_idx
# because they are only relevant when
# you're doing updates with all neighbors,
# not with just the bonded neighbors like
# we do here
h_new = conv(h_0=h_0,
h_new=h_new,
all_nbrs=a,
bond_nbrs=bond_nbrs,
bond_idx=bond_idx,
e=e,
kj_idx=None,
ji_idx=None)
# convert back to node features
new_node_feats = self.W_o(r=r,
h=h_new,
nbrs=a)
return new_node_feats, xyz
| 34.057831 | 77 | 0.563039 |
acf449ddc88d2e9068678a5a0860decdc7858015 | 1,058 | py | Python | package/awesome_panel/application/components/gallery_page_component.py | Jhsmit/awesome-panel | 53f7754f7c505a2666f6724df26c851ae942ec40 | [
"Apache-2.0"
] | null | null | null | package/awesome_panel/application/components/gallery_page_component.py | Jhsmit/awesome-panel | 53f7754f7c505a2666f6724df26c851ae942ec40 | [
"Apache-2.0"
] | null | null | null | package/awesome_panel/application/components/gallery_page_component.py | Jhsmit/awesome-panel | 53f7754f7c505a2666f6724df26c851ae942ec40 | [
"Apache-2.0"
] | null | null | null | """In this module we define the GalleryPageComponent"""
import panel as pn
import param
from awesome_panel.application.models import Page
from awesome_panel.application.services import PageService
from awesome_panel.application.views.gallery_page_view import GalleryPageView
class GalleryPageComponent(param.Parameterized):
"""The GalleryPageComponent shows thumbnail of the page and enables the user to navigate to
the page"""
page = param.ClassSelector(class_=Page, constant=True)
page_service = param.ClassSelector(class_=PageService, constant=True)
view = param.ClassSelector(class_=pn.Column)
def __init__(self, page, **params):
params["view"] = GalleryPageView(page=page)
params["page"] = page
if "page_service" not in params:
params["page_service"] = PageService()
super().__init__(**params)
@param.depends("view.clicks", watch=True)
def _load_page(self, _=None):
self.page_service.page = self.page
print(self.page.name)
| 34.129032 | 96 | 0.703214 |
acf449de30c697b4a73f7d764c27db178a664907 | 552 | py | Python | tests/base/test_endpoint_config.py | saleweaver/rapid_rest_client | 6b249d9476487a89d09f78006d3422432490403e | [
"MIT"
] | 5 | 2022-01-11T00:59:45.000Z | 2022-01-16T20:26:51.000Z | tests/base/test_endpoint_config.py | saleweaver/rapid_rest_client | 6b249d9476487a89d09f78006d3422432490403e | [
"MIT"
] | null | null | null | tests/base/test_endpoint_config.py | saleweaver/rapid_rest_client | 6b249d9476487a89d09f78006d3422432490403e | [
"MIT"
] | null | null | null | from dataclasses import FrozenInstanceError
import pytest
from rest_client.base.config import BaseUrlConfig
base_url = 'https://www.saleweaver.com/'
def test_create_endpoint_config():
endpoint_config = BaseUrlConfig(base_url)
assert endpoint_config.base_url == base_url
assert endpoint_config.sandbox_url is None
def test_fail_on_assign():
endpoint_config = BaseUrlConfig(base_url)
with pytest.raises(FrozenInstanceError) as excinfo:
endpoint_config.sandbox_url = 'Foo'
assert excinfo.type == FrozenInstanceError
| 26.285714 | 55 | 0.78442 |
acf44a42d4a016e9df42dfae4c00c1c12ebbca68 | 3,050 | py | Python | st2common/tests/unit/base.py | ekhavana/st2 | 2b47b0e317a2dfd7d92d63ec6dcf706493148890 | [
"Apache-2.0"
] | null | null | null | st2common/tests/unit/base.py | ekhavana/st2 | 2b47b0e317a2dfd7d92d63ec6dcf706493148890 | [
"Apache-2.0"
] | null | null | null | st2common/tests/unit/base.py | ekhavana/st2 | 2b47b0e317a2dfd7d92d63ec6dcf706493148890 | [
"Apache-2.0"
] | null | null | null | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import mongoengine
from st2common.models import db
from st2common.models.db import stormbase
from st2common.persistence.base import Access
from st2common.exceptions.db import StackStormDBObjectNotFoundError
__all__ = [
'BaseDBModelCRUDTestCase',
'FakeModelDB',
'FakeModelDB'
]
class BaseDBModelCRUDTestCase(object):
model_class = None
persistance_class = None
model_class_kwargs = {}
update_attribute_name = None
skip_check_attribute_names = []
def test_crud_operations(self):
# 1. Test create
model_db = self.model_class(**self.model_class_kwargs)
saved_db = self.persistance_class.add_or_update(model_db)
retrieved_db = self.persistance_class.get_by_id(saved_db.id)
self.assertEqual(saved_db.id, retrieved_db.id)
for attribute_name, attribute_value in self.model_class_kwargs.items():
if attribute_name in self.skip_check_attribute_names:
continue
self.assertEqual(getattr(saved_db, attribute_name), attribute_value)
self.assertEqual(getattr(retrieved_db, attribute_name), attribute_value)
# 2. Test update
updated_attribute_value = 'updated-%s' % (str(time.time()))
setattr(model_db, self.update_attribute_name, updated_attribute_value)
saved_db = self.persistance_class.add_or_update(model_db)
self.assertEqual(getattr(saved_db, self.update_attribute_name), updated_attribute_value)
retrieved_db = self.persistance_class.get_by_id(saved_db.id)
self.assertEqual(saved_db.id, retrieved_db.id)
self.assertEqual(getattr(retrieved_db, self.update_attribute_name), updated_attribute_value)
# 3. Test delete
self.persistance_class.delete(model_db)
self.assertRaises(StackStormDBObjectNotFoundError, self.persistance_class.get_by_id,
model_db.id)
class FakeModelDB(stormbase.StormBaseDB):
context = stormbase.EscapedDictField()
index = mongoengine.IntField(min_value=0)
category = mongoengine.StringField()
timestamp = mongoengine.DateTimeField()
class FakeModel(Access):
impl = db.MongoDBAccess(FakeModelDB)
@classmethod
def _get_impl(cls):
return cls.impl
| 36.309524 | 100 | 0.738689 |
acf44a512128279581f122ea891e6a1d6519378d | 2,501 | py | Python | Examples/ImageRegistrationMethod1/ImageRegistrationMethod1.py | HongdaZ/SimpleITK | c4bc2f9beb25f7c9bbc2daa934c08072a04949d6 | [
"Apache-2.0"
] | 1 | 2021-03-30T19:29:34.000Z | 2021-03-30T19:29:34.000Z | Examples/ImageRegistrationMethod1/ImageRegistrationMethod1.py | resace3/SimpleITK | 4e04ab7936038d91c5dc8bac991833becb88a69e | [
"Apache-2.0"
] | null | null | null | Examples/ImageRegistrationMethod1/ImageRegistrationMethod1.py | resace3/SimpleITK | 4e04ab7936038d91c5dc8bac991833becb88a69e | [
"Apache-2.0"
] | 1 | 2021-03-09T07:13:26.000Z | 2021-03-09T07:13:26.000Z | #!/usr/bin/env python
# =========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =========================================================================
from __future__ import print_function
import SimpleITK as sitk
import sys
import os
def command_iteration(method):
print("{0:3} = {1:10.5f} : {2}".format(method.GetOptimizerIteration(),
method.GetMetricValue(),
method.GetOptimizerPosition()))
if len(sys.argv) < 4:
print("Usage:", sys.argv[0], "<fixedImageFilter> <movingImageFile>",
"<outputTransformFile>")
sys.exit(1)
fixed = sitk.ReadImage(sys.argv[1], sitk.sitkFloat32)
moving = sitk.ReadImage(sys.argv[2], sitk.sitkFloat32)
R = sitk.ImageRegistrationMethod()
R.SetMetricAsMeanSquares()
R.SetOptimizerAsRegularStepGradientDescent(4.0, .01, 200)
R.SetInitialTransform(sitk.TranslationTransform(fixed.GetDimension()))
R.SetInterpolator(sitk.sitkLinear)
R.AddCommand(sitk.sitkIterationEvent, lambda: command_iteration(R))
outTx = R.Execute(fixed, moving)
print("-------")
print(outTx)
print("Optimizer stop condition: {0}"
.format(R.GetOptimizerStopConditionDescription()))
print(" Iteration: {0}".format(R.GetOptimizerIteration()))
print(" Metric value: {0}".format(R.GetMetricValue()))
sitk.WriteTransform(outTx, sys.argv[3])
if ("SITK_NOSHOW" not in os.environ):
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(fixed)
resampler.SetInterpolator(sitk.sitkLinear)
resampler.SetDefaultPixelValue(100)
resampler.SetTransform(outTx)
out = resampler.Execute(moving)
simg1 = sitk.Cast(sitk.RescaleIntensity(fixed), sitk.sitkUInt8)
simg2 = sitk.Cast(sitk.RescaleIntensity(out), sitk.sitkUInt8)
cimg = sitk.Compose(simg1, simg2, simg1 // 2. + simg2 // 2.)
sitk.Show(cimg, "ImageRegistration1 Composition")
| 34.260274 | 75 | 0.668133 |
acf44b2ca63bf1f30d00623208b467d1c3435c13 | 572 | py | Python | python/062_Unique_Paths.py | dvlpsh/leetcode-1 | f965328af72113ac8a5a9d6624868c1502be937b | [
"MIT"
] | 4,416 | 2016-03-30T15:02:26.000Z | 2022-03-31T16:31:03.000Z | python/062_Unique_Paths.py | YinpuLi/leetcode-6 | 1371de2631d745efba39de41b51c3424e35da434 | [
"MIT"
] | 20 | 2018-11-17T13:46:25.000Z | 2022-03-13T05:37:06.000Z | python/062_Unique_Paths.py | YinpuLi/leetcode-6 | 1371de2631d745efba39de41b51c3424e35da434 | [
"MIT"
] | 1,374 | 2017-05-26T15:44:30.000Z | 2022-03-30T19:21:02.000Z | class Solution:
def uniquePaths(self, m, n):
"""
:type m: int
:type n: int
:rtype: int
"""
dmap = [[0] * n for _ in range(m)]
for i in range(m):
dmap[i][0] = 1
for j in range(n):
dmap[0][j] = 1
for i in range(1, m):
for j in range(1, n):
l = u = 0
if i-1 >= 0:
u = dmap[i-1][j]
if j-1>= 0:
l = dmap[i][j-1]
dmap[i][j] = l + u
return dmap[m-1][n-1]
| 26 | 42 | 0.332168 |
acf44b8832e156297e89831470a4a0cacac738b2 | 1,381 | py | Python | class-notes/chapter_4/c4-4.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | class-notes/chapter_4/c4-4.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | class-notes/chapter_4/c4-4.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 09:42:24 2021
@author: user15
"""
# =============================================================================
# s = "Apple iPhone と Google Android"
# print(s.upper())
#
# print(s.lower())
#
# print(s.swapcase())
# print(s)
#
# s1 = "may the force be with you!"
# print(s1.capitalize())
# print(s1.title())
# =============================================================================
# =============================================================================
# # p87
# s1 ="どっどどどどどうど"
# print("count Do", s1.count("どど"))
# s = "apple pie"
# print("count p",s.count("p"))
#
# print("count p up to 4th char:", s.count("p", 0 , 4))
#
# print("find e",s.find("e"))
# print("find x",s.find("x"))
# print("reverse find e",s.rfind("e"))
#
# s3 = "愛知県半田市"
#
# ken_index = s3.find("県")
#
# print(s3[:ken_index + 1])
#
# s4 = 'employee'
# print(s4.replace("e", "x"))
#
# print(s4.replace("e", "x", 2))
#
# sj = "サクラ咲く"
# print(sj.replace("咲く", "舞う風"))
#
# =============================================================================
t = " Hello \n"
print(t.strip())
t = 'abc .....'
print(t.rstrip("."))
t1 = '2, 3, 4,'
print(t1.rstrip(".,\n"))
t2 = "Hello World \n"
print(t2.rstrip(".,\n"))
t = "dog.peg.jp"
print(t.rstrip(".jpeg"))
| 20.014493 | 80 | 0.384504 |
acf44bf9c6a6c1239e6d5cf9cd812d3aa504b5ac | 1,717 | py | Python | src/rest-api/app/main.py | geometry-labs/craft-multi-token-api | e533fd02c928c4857076ee11e14d8c0608bf367d | [
"Apache-2.0"
] | null | null | null | src/rest-api/app/main.py | geometry-labs/craft-multi-token-api | e533fd02c928c4857076ee11e14d8c0608bf367d | [
"Apache-2.0"
] | null | null | null | src/rest-api/app/main.py | geometry-labs/craft-multi-token-api | e533fd02c928c4857076ee11e14d8c0608bf367d | [
"Apache-2.0"
] | null | null | null | import logging
import uvicorn
from fastapi import FastAPI
from starlette_exporter import PrometheusMiddleware, handle_metrics
from multiprocessing.pool import ThreadPool
from app.core.config import settings
from app.routes.v1.router import api_router
from app.db.setup import index_mongo_collections
from prometheus_client import start_http_server
logging_level = logging.INFO
if settings.LOGGING_LEVEL == "CRITICAL":
logging_level = logging.CRITICAL
elif settings.LOGGING_LEVEL == "ERROR":
logging_level = logging.ERROR
elif settings.LOGGING_LEVEL == "WARNING":
logging_level = logging.WARNING
elif settings.LOGGING_LEVEL == "INFO":
logging_level = logging.INFO
elif settings.LOGGING_LEVEL == "DEBUG":
logging_level = logging.DEBUG
logging.basicConfig(
level=logging.INFO, format="%(asctime)s :: %(levelname)s :: %(message)s"
)
tags_metadata = [
{
"name": "craft-multi-token",
"description": settings.CRAFT_MULTI_TOKEN_CONTRACT_ADDRESS,
},
]
app = FastAPI(
title="CraftMultiToken REST API",
description="...",
version="v0.1.0",
openapi_tags=tags_metadata,
openapi_url=f"{settings.PREFIX}/openapi.json",
docs_url=f"{settings.PREFIX}/docs",
)
@app.on_event("startup")
async def setup():
# set up mongo
index_mongo_collections()
# Start prom server
logging.info("Starting metrics server.")
pool = ThreadPool(1)
pool.apply_async(start_http_server, (settings.METRICS_PORT,settings.METRICS_ADDRESS))
app.include_router(api_router, prefix=settings.PREFIX)
app.add_middleware(
PrometheusMiddleware, prefix="balanced_rest", app_name="balanced_rest", group_paths=True
)
app.add_route("/metrics", handle_metrics)
| 26.415385 | 92 | 0.747234 |
acf44ccaffb3d7eacf59316111b9861770ac7d46 | 7,628 | py | Python | test/functional/mining_pos_reorg.py | INFAQCOIN/INFAQ | 487de82c26135eb8ac93c9393e7fdb29bbc2822c | [
"MIT"
] | 1 | 2022-01-18T14:48:23.000Z | 2022-01-18T14:48:23.000Z | test/functional/mining_pos_reorg.py | martin-braun/INFAQ | fca6db067b8079fbedf4e9160180424c95470fed | [
"MIT"
] | null | null | null | test/functional/mining_pos_reorg.py | martin-braun/INFAQ | fca6db067b8079fbedf4e9160180424c95470fed | [
"MIT"
] | 1 | 2022-01-18T14:48:28.000Z | 2022-01-18T14:48:28.000Z | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import infaqcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
connect_nodes_clique,
disconnect_nodes,
set_node_times,
DecimalAmt,
)
class ReorgStakeTest(infaqcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.extra_args = [['-nuparams=PoS:201', '-nuparams=PoS_v2:201']] * self.num_nodes
def setup_chain(self):
self.log.info("Initializing test directory " + self.options.tmpdir)
self._initialize_chain()
self.enable_mocktime()
def setup_network(self):
# connect all nodes between each other
self.setup_nodes()
connect_nodes_clique(self.nodes)
self.sync_all()
def log_title(self):
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
description = "Tests reorganisation for PoS blocks."
self.log.info("\n\n%s\n%s\n%s\n", title, underline, description)
def disconnect_all(self):
self.log.info("Disconnecting nodes...")
for i in range(self.num_nodes):
for j in range(self.num_nodes):
if j != i:
disconnect_nodes(self.nodes[i], j)
self.log.info("Nodes disconnected")
def get_tot_balance(self, nodeid):
wi = self.nodes[nodeid].getwalletinfo()
assert_equal(self.nodes[nodeid].getblockcount(), wi['last_processed_block'])
return wi['balance'] + wi['immature_balance']
def check_money_supply(self, expected_iqc):
# verify that nodes have the expected IQC supply
iqc_supply = [self.nodes[i].getsupplyinfo(True)['transparentsupply']
for i in range(self.num_nodes)]
assert_equal(iqc_supply, [DecimalAmt(expected_iqc)] * self.num_nodes)
def run_test(self):
def findUtxoInList(txid, vout, utxo_list):
for x in utxo_list:
if x["txid"] == txid and x["vout"] == vout:
return True, x
return False, None
# IQC supply: block rewards
expected_money_supply = 250.0 * 200
self.check_money_supply(expected_money_supply)
block_time_0 = block_time_1 = self.mocktime
# Check balances
self.log.info("Checking balances...")
initial_balance = [self.get_tot_balance(i) for i in range(self.num_nodes)]
# -- 50 pow blocks each
assert_equal(initial_balance, [DecimalAmt(250.0 * 50)] * self.num_nodes)
self.log.info("Balances ok.")
# Disconnect nodes
self.disconnect_all()
# Stake one block with node-0 and save the stake input
self.log.info("Staking 1 block with node 0...")
initial_unspent_0 = self.nodes[0].listunspent()
self.nodes[0].generate(1)
block_time_0 += 60
set_node_times(self.nodes, block_time_0)
last_block = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
assert(len(last_block["tx"]) > 1) # a PoS block has at least two txes
coinstake_txid = last_block["tx"][1]
coinstake_tx = self.nodes[0].getrawtransaction(coinstake_txid, True)
assert (coinstake_tx["vout"][0]["scriptPubKey"]["hex"] == "") # first output of coinstake is empty
stakeinput = coinstake_tx["vin"][0]
# The stake input was unspent 1 block ago, now it's not
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], initial_unspent_0)
assert (res)
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (not res)
self.log.info("Coinstake input %s...%s-%d is no longer spendable." % (
stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
# Stake 10 more blocks with node-0 and check balances
self.log.info("Staking 10 more blocks with node 0...")
for i in range(10):
block_time_0 = self.generate_pos(0, block_time_0)
expected_balance_0 = initial_balance[0] + DecimalAmt(11 * 250.0)
assert_equal(self.get_tot_balance(0), expected_balance_0)
self.log.info("Balance for node 0 checks out.")
# Connect with node 2 and sync
self.log.info("Reconnecting node 0 and node 2")
connect_nodes(self.nodes[0], 2)
self.sync_blocks([self.nodes[i] for i in [0, 2]])
# verify that the stakeinput can't be spent
stakeinput_tx_json = self.nodes[0].getrawtransaction(stakeinput["txid"], True)
stakeinput_amount = float(stakeinput_tx_json["vout"][int(stakeinput["vout"])]["value"])
rawtx_unsigned = self.nodes[0].createrawtransaction(
[{"txid": stakeinput["txid"], "vout": int(stakeinput["vout"])}],
{"xxncEuJK27ygNh7imNfaX8JV6ZQUnoBqzN": (stakeinput_amount-0.01)})
rawtx = self.nodes[0].signrawtransaction(rawtx_unsigned)
assert(rawtx["complete"])
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[0].sendrawtransaction, rawtx["hex"])
txid = self.nodes[0].decoderawtransaction(rawtx["hex"])["txid"]
assert_raises_rpc_error(-5, "No such mempool or blockchain transaction",
self.nodes[0].getrawtransaction, txid)
self.log.info("GOOD: spending the stake input was not possible.")
# Stake 12 blocks with node-1
set_node_times(self.nodes, block_time_1)
self.log.info("Staking 12 blocks with node 1...")
for i in range(12):
block_time_1 = self.generate_pos(1, block_time_1)
expected_balance_1 = initial_balance[1] + DecimalAmt(12 * 250.0)
assert_equal(self.get_tot_balance(1), expected_balance_1)
self.log.info("Balance for node 1 checks out.")
# re-connect and sync nodes and check that node-0 and node-2 get on the other chain
new_best_hash = self.nodes[1].getbestblockhash()
self.log.info("Connecting and syncing nodes...")
set_node_times(self.nodes, block_time_1)
connect_nodes_clique(self.nodes)
self.sync_blocks()
for i in [0, 2]:
assert_equal(self.nodes[i].getbestblockhash(), new_best_hash)
# check balance of node-0
assert_equal(self.get_tot_balance(0), initial_balance[0])
self.log.info("Balance for node 0 checks out.")
# check that NOW the original stakeinput is present and spendable
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (res and utxo["spendable"])
self.log.info("Coinstake input %s...%s-%d is spendable again." % (
stakeinput["txid"][:9], stakeinput["txid"][-4:], stakeinput["vout"]))
self.nodes[0].sendrawtransaction(rawtx["hex"])
self.nodes[1].generate(1)
self.sync_blocks()
res, utxo = findUtxoInList(stakeinput["txid"], stakeinput["vout"], self.nodes[0].listunspent())
assert (not res or not utxo["spendable"])
# Verify that IQC supply was properly updated after the reorgs
self.log.info("Check IQC supply...")
expected_money_supply += 250.0 * (self.nodes[1].getblockcount() - 200)
self.check_money_supply(expected_money_supply)
self.log.info("Supply checks out.")
if __name__ == '__main__':
ReorgStakeTest().main() | 44.348837 | 107 | 0.643681 |
acf44cdc260f5064d69f563556e7d700cfdc08ff | 10,194 | py | Python | release/scripts/startup/bl_ui/properties_material_gpencil.py | wangyxuan/blender | d09289ff7a8e8fe6d4da6b46dd153033d7cfd426 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2019-06-27T09:30:33.000Z | 2019-11-05T12:41:21.000Z | release/scripts/startup/bl_ui/properties_material_gpencil.py | tin2tin/blender | 42e0cf1a026bbde7e3a65157de5c54106e948cd8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/startup/bl_ui/properties_material_gpencil.py | tin2tin/blender | 42e0cf1a026bbde7e3a65157de5c54106e948cd8 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Menu, Panel, UIList
from rna_prop_ui import PropertyPanel
from bl_ui.utils import PresetPanel
from .properties_grease_pencil_common import (
GreasePencilMaterialsPanel,
)
class GPENCIL_MT_color_context_menu(Menu):
bl_label = "Layer"
def draw(self, _context):
layout = self.layout
layout.operator("gpencil.color_reveal", icon='RESTRICT_VIEW_OFF', text="Show All")
layout.operator("gpencil.color_hide", icon='RESTRICT_VIEW_ON', text="Hide Others").unselected = True
layout.separator()
layout.operator("gpencil.color_lock_all", icon='LOCKED', text="Lock All")
layout.operator("gpencil.color_unlock_all", icon='UNLOCKED', text="UnLock All")
layout.operator("gpencil.stroke_lock_color", text="Lock Unselected")
layout.operator("gpencil.lock_layer", text="Lock Unused")
class GPENCIL_UL_matslots(UIList):
def draw_item(self, _context, layout, _data, item, icon, _active_data, _active_propname, _index):
slot = item
ma = slot.material
if (ma is not None) and (ma.grease_pencil is not None):
gpcolor = ma.grease_pencil
if self.layout_type in {'DEFAULT', 'COMPACT'}:
if gpcolor.lock:
layout.active = False
row = layout.row(align=True)
row.enabled = not gpcolor.lock
row.prop(ma, "name", text="", emboss=False, icon_value=icon)
row = layout.row(align=True)
row.prop(gpcolor, "lock", text="", emboss=False)
row.prop(gpcolor, "hide", text="", emboss=False)
if gpcolor.ghost is True:
icon = 'ONIONSKIN_OFF'
else:
icon = 'ONIONSKIN_ON'
row.prop(gpcolor, "ghost", text="", icon=icon, emboss=False)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class GPMaterialButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "material"
@classmethod
def poll(cls, context):
ma = context.material
return ma and ma.grease_pencil
class MATERIAL_PT_gpencil_slots(GreasePencilMaterialsPanel, Panel):
bl_label = "Grease Pencil Material Slots"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "material"
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
ob = context.object
ma = context.material
return (ma and ma.grease_pencil) or (ob and ob.type == 'GPENCIL')
# Used as parent for "Stroke" and "Fill" panels
class MATERIAL_PT_gpencil_surface(GPMaterialButtonsPanel, Panel):
bl_label = "Surface"
def draw_header_preset(self, _context):
MATERIAL_PT_gpencil_material_presets.draw_panel_header(self.layout)
def draw(self, _context):
layout = self.layout
layout.use_property_split = True
class MATERIAL_PT_gpencil_strokecolor(GPMaterialButtonsPanel, Panel):
bl_label = "Stroke"
bl_parent_id = 'MATERIAL_PT_gpencil_surface'
def draw_header(self, context):
ma = context.material
if ma is not None and ma.grease_pencil is not None:
gpcolor = ma.grease_pencil
self.layout.prop(gpcolor, "show_stroke", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
ma = context.material
if ma is not None and ma.grease_pencil is not None:
gpcolor = ma.grease_pencil
col = layout.column()
col.active = not gpcolor.lock
col.prop(gpcolor, "mode")
col.prop(gpcolor, "stroke_style", text="Style")
if gpcolor.stroke_style == 'TEXTURE':
row = col.row()
row.enabled = not gpcolor.lock
col = row.column(align=True)
col.template_ID(gpcolor, "stroke_image", open="image.open")
if gpcolor.mode == 'LINE':
col.prop(gpcolor, "pixel_size", text="UV Factor")
col.prop(gpcolor, "use_stroke_pattern", text="Use As Pattern")
if gpcolor.use_stroke_pattern is False:
col.prop(gpcolor, "use_stroke_texture_mix", text="Mix Color")
if gpcolor.use_stroke_texture_mix is True:
col.prop(gpcolor, "mix_stroke_factor", text="Factor")
if gpcolor.stroke_style == 'SOLID' or \
gpcolor.use_stroke_pattern is True or \
gpcolor.use_stroke_texture_mix is True:
col.prop(gpcolor, "color", text="Color")
if gpcolor.mode in {'DOTS', 'BOX'}:
col.prop(gpcolor, "alignment_mode")
class MATERIAL_PT_gpencil_fillcolor(GPMaterialButtonsPanel, Panel):
bl_label = "Fill"
bl_parent_id = 'MATERIAL_PT_gpencil_surface'
def draw_header(self, context):
ma = context.material
gpcolor = ma.grease_pencil
self.layout.prop(gpcolor, "show_fill", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
ma = context.material
gpcolor = ma.grease_pencil
# color settings
col = layout.column()
col.active = not gpcolor.lock
col.prop(gpcolor, "fill_style", text="Style")
if gpcolor.fill_style == 'GRADIENT':
col.prop(gpcolor, "gradient_type")
if gpcolor.fill_style != 'TEXTURE':
col.prop(gpcolor, "fill_color", text="Color")
if gpcolor.fill_style in {'GRADIENT', 'CHESSBOARD'}:
col.prop(gpcolor, "mix_color", text="Secondary Color")
if gpcolor.fill_style == 'GRADIENT':
col.prop(gpcolor, "mix_factor", text="Mix Factor", slider=True)
if gpcolor.fill_style in {'GRADIENT', 'CHESSBOARD'}:
col.prop(gpcolor, "flip", text="Flip Colors")
col.prop(gpcolor, "pattern_shift", text="Location")
col.prop(gpcolor, "pattern_scale", text="Scale")
if gpcolor.gradient_type == 'RADIAL' and gpcolor.fill_style not in {'SOLID', 'CHESSBOARD'}:
col.prop(gpcolor, "pattern_radius", text="Radius")
else:
if gpcolor.fill_style != 'SOLID':
col.prop(gpcolor, "pattern_angle", text="Angle")
if gpcolor.fill_style == 'CHESSBOARD':
col.prop(gpcolor, "pattern_gridsize", text="Box Size")
# Texture
if gpcolor.fill_style == 'TEXTURE' or (gpcolor.use_fill_texture_mix is True and gpcolor.fill_style == 'SOLID'):
col.template_ID(gpcolor, "fill_image", open="image.open")
if gpcolor.fill_style == 'TEXTURE':
col.prop(gpcolor, "use_fill_pattern", text="Use As Pattern")
if gpcolor.use_fill_pattern is True:
col.prop(gpcolor, "fill_color", text="Color")
col.prop(gpcolor, "texture_offset", text="Offset")
col.prop(gpcolor, "texture_scale", text="Scale")
col.prop(gpcolor, "texture_angle")
col.prop(gpcolor, "texture_opacity")
col.prop(gpcolor, "texture_clamp", text="Clip Image")
if gpcolor.use_fill_pattern is False:
col.prop(gpcolor, "use_fill_texture_mix", text="Mix With Color")
if gpcolor.use_fill_texture_mix is True:
col.prop(gpcolor, "fill_color", text="Mix Color")
col.prop(gpcolor, "mix_factor", text="Mix Factor", slider=True)
class MATERIAL_PT_gpencil_preview(GPMaterialButtonsPanel, Panel):
bl_label = "Preview"
COMPAT_ENGINES = {'BLENDER_EEVEE'}
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
ma = context.material
self.layout.label(text=ma.name)
self.layout.template_preview(ma)
class MATERIAL_PT_gpencil_custom_props(GPMaterialButtonsPanel, PropertyPanel, Panel):
COMPAT_ENGINES = {'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
_context_path = "object.active_material"
_property_type = bpy.types.Material
class MATERIAL_PT_gpencil_options(GPMaterialButtonsPanel, Panel):
bl_label = "Options"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
ma = context.material
gpcolor = ma.grease_pencil
layout.prop(gpcolor, "pass_index")
class MATERIAL_PT_gpencil_material_presets(PresetPanel, Panel):
"""Material settings"""
bl_label = "Material Presets"
preset_subdir = "gpencil_material"
preset_operator = "script.execute_preset"
preset_add_operator = "scene.gpencil_material_preset_add"
classes = (
GPENCIL_UL_matslots,
GPENCIL_MT_color_context_menu,
MATERIAL_PT_gpencil_slots,
MATERIAL_PT_gpencil_preview,
MATERIAL_PT_gpencil_material_presets,
MATERIAL_PT_gpencil_surface,
MATERIAL_PT_gpencil_strokecolor,
MATERIAL_PT_gpencil_fillcolor,
MATERIAL_PT_gpencil_options,
MATERIAL_PT_gpencil_custom_props,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| 35.273356 | 119 | 0.637924 |
acf44cfaf1a317111e7f043eda2013ee0cbb88c5 | 811 | py | Python | platon_utils/units.py | shinnng/platon-utils | 50f2b95279ab12bf295b430b83827d9db440da74 | [
"MIT"
] | null | null | null | platon_utils/units.py | shinnng/platon-utils | 50f2b95279ab12bf295b430b83827d9db440da74 | [
"MIT"
] | null | null | null | platon_utils/units.py | shinnng/platon-utils | 50f2b95279ab12bf295b430b83827d9db440da74 | [
"MIT"
] | null | null | null | import decimal
# Units are in their own module here, so that they can keep this
# formatting, as this module is excluded from black in pyproject.toml
# fmt: off
units = {
'von': decimal.Decimal('1'),
'kvon': decimal.Decimal('1000'),
'mvon': decimal.Decimal('1000000'),
'gvon': decimal.Decimal('1000000000'),
'microlat': decimal.Decimal('1000000000000'),
'millilat': decimal.Decimal('1000000000000000'),
'lat': decimal.Decimal('1000000000000000000'),
'klat': decimal.Decimal('1000000000000000000000'),
'mlat': decimal.Decimal('1000000000000000000000000'),
'glat': decimal.Decimal('1000000000000000000000000000'),
'tlat': decimal.Decimal('1000000000000000000000000000000'),
}
# fmt: on
| 40.55 | 71 | 0.633785 |
acf44d0add0a848dfaa10473d9801a6c8d000d50 | 2,218 | py | Python | parse.py | Fantop/wickedQuotes | f430a3b7c921cd7e71cc7555425f81e311c206cc | [
"MIT"
] | 14 | 2019-03-27T06:36:12.000Z | 2022-01-17T21:37:38.000Z | parse.py | Fantop/wickedQuotes | f430a3b7c921cd7e71cc7555425f81e311c206cc | [
"MIT"
] | 1 | 2019-07-17T18:45:33.000Z | 2019-07-17T18:47:54.000Z | parse.py | Fantop/wickedQuotes | f430a3b7c921cd7e71cc7555425f81e311c206cc | [
"MIT"
] | 4 | 2018-08-13T07:29:02.000Z | 2022-01-17T00:51:00.000Z | import re
import sys
import json
import unwiki
import xmltodict
from langdetect import detect
from xml.dom.minidom import parse
quotesObject = {}
if (len(sys.argv) == 1):
print("You must specify an input file.")
sys.exit()
if (len(sys.argv) == 2):
cutoffArg = 50
langArg = "en"
if (len(sys.argv) == 3):
cutoffArg = int(sys.argv[2])
langArg = "en"
if (len(sys.argv) > 3):
cutoffArg = int(sys.argv[2])
langArg = str(sys.argv[3])
def writeQuotes(content):
global langArg
global cutoffArg
quoteList = []
write = False
i = 0
while i < len(content):
line = content[i]
if line.startswith('==') and line[2] != "=":
write = False
if write and line.startswith('* ') and len(line) < (cutoffArg + 3):
# would optimize, but since the program only needs to be run once, not really a priority
cleaned_line = unwiki.loads(line) + '\n'
cleaned_line = multireplace(cleaned_line, {"\\u2018": "'", "\\u2019": "'", "\\u2026": "...", "\\u2013": "-", "\\u2014": "-", "\\u201c": '"', "\\u201d": '"', "\\'": "'", "'''": "", "\n": ""})
cleaned_line = re.sub(r"<.*>|'('+)|\\\\x..|\\u....", "", cleaned_line)
cleaned_line = re.sub(r' +', ' ', cleaned_line)
cleaned_line = cleaned_line[2:]
if (detect(cleaned_line) == langArg and "://" not in cleaned_line):
quoteList.append(cleaned_line)
if line == '==Quotes==' or line == '== Quotes ==':
write = True
i += 1
return quoteList
def handle(_, value):
global quotesObject
try:
quoteList = writeQuotes(str(value['revision']['text']).split('\\n'))
if len(quoteList) > 0:
quotesObject[str(value['title'])] = quoteList
except Exception as e:
pass
return True
def multireplace(string, replacements):
substrs = sorted(replacements, key=len, reverse=True)
regexp = re.compile('|'.join(map(re.escape, substrs)))
return regexp.sub(lambda match: replacements[match.group(0)], string)
xmltodict.parse(open(str(sys.argv[1]), "rbU"), item_depth=2, item_callback=handle)
with open('quotes-' + str(cutoffArg) + '-' + str(langArg) + '.json', 'w') as outfile:
json.dump(quotesObject, outfile, sort_keys = True, indent = 4, ensure_ascii = False) | 30.383562 | 194 | 0.612263 |
acf44e99276e63906ad47325db0410eed33f46dc | 7,890 | py | Python | main.py | AgamChopra/MNIST | f3e0d8953785b8660ea92a682370920ffe55c683 | [
"MIT"
] | null | null | null | main.py | AgamChopra/MNIST | f3e0d8953785b8660ea92a682370920ffe55c683 | [
"MIT"
] | null | null | null | main.py | AgamChopra/MNIST | f3e0d8953785b8660ea92a682370920ffe55c683 | [
"MIT"
] | null | null | null | import sys
sys.path.append('R:\classes 2020-22\Fall 2021\mnist')
import my_dataset as db
import models
import torch
#%%
tr,ts,vl = db.dataset(True)
x = tr[0][:,:,:,0].reshape(tr[0].shape[0],28*28)
y = tr[1]
xv = ts[0][:,:,:,0].reshape(ts[0].shape[0],28*28)
yv = ts[1]
xt = vl[0].reshape(vl[0].shape[0],28*28)
yt = vl[1]
#%%
#LogisticRegression
model = models.Logistic_Regression()
model.fit(x, tr[1])
print('LogisticRegression')
print('Train Accuracy:',models.accuracy(y,model.predict(x)))
print('Validation Accuracy:',models.accuracy(yv,model.predict(xv)))
print('Test(custom dataset) Accuracy:',models.accuracy(yt,model.predict(xt)))
'''
Observations:
The model fails to converge.
Reported accuracy statistics:
LogisticRegression
Test Accuracy: tensor(0.9339)
Evaluation Accuracy: tensor(0.9255)
Test(custom dataset) Accuracy: tensor(0.3600)
'''
#%%
#SVM
model = models.SVM()
model.fit(x, tr[1])
print('SVM')
print('Train Accuracy:',models.accuracy(y,model.predict(x)))
print('Validation Accuracy:',models.accuracy(yv,model.predict(xv)))
print('Test(custom dataset) Accuracy:',models.accuracy(yt,model.predict(xt)))
'''
Observations:
Reported accuracy statistics:
SVM
Test Accuracy: tensor(0.9899)
Evaluation Accuracy: tensor(0.9792)
Test(custom dataset) Accuracy: tensor(0.6000)
'''
#%%
x=torch.from_numpy(x)
xv=torch.from_numpy(xv)
xt=torch.from_numpy(xt)
y=torch.from_numpy(y)
yv=torch.from_numpy(yv)
yt=torch.from_numpy(yt)
#%%
#1 Layvr NN with reg
model = models.NN1layer(device='cuda')
losses = model.fit(x, y, xv, yv,regularize=True,print_losses=50,eps=100,lr=1E-4,batch_size=32)#100,1E-5,16
models.plot_loss(losses,title='3 Layvr NN, w/ Reg&dropout')
print('Dense NN 1 layers(perceptron)')
print('Train Accuracy:',models.accuracy(y,model.predict(x)))
print('Validation Accuracy:',models.accuracy(yv,model.predict(xv)))
print('Test(custom dataset) Accuracy:',models.accuracy(yt,model.predict(xt)))
'''
Epoch 1 :
Training loss: 1.9810410239537557 , Evaluation loss: 1.7682199802918313
Epoch 50 :
Training loss: 1.5525813439687093 , Evaluation loss: 1.540137346738424
Epoch 100 :
Training loss: 1.545045236269633 , Evaluation loss: 1.533633223328835
Observations:
Reported accuracy statistics:
Dense NN 1 layers(perceptron)
Test Accuracy: tensor(0.9285)
Evaluation/Validation Accuracy: tensor(0.9272)
Test(custom dataset) Accuracy: tensor(0.4600)
'''
#%%
#3 Layvr DNN with reg & dropout
model = models.NN3layer(dropout=0.5,device='cuda')
losses = model.fit(x, y, xv, yv,regularize=True,print_losses=50,eps=100,lr=1E-4,batch_size=32)#100,1E-5,16
models.plot_loss(losses,title='3 Layvr NN, w/ Reg&dropout')
print('Dense NN 3 layers')
print('Train Accuracy:',models.accuracy(y,model.predict(x)))
print('Validation Accuracy:',models.accuracy(yv,model.predict(xv)))
print('Test(custom dataset) Accuracy:',models.accuracy(yt,model.predict(xt)))
'''
Epoch 1 :
Training loss: 1.7473403388341269 , Evaluation loss: 1.559548319914402
Epoch 50 :
Training loss: 1.51746822903951 , Evaluation loss: 1.4876713236937156
Epoch 100 :
Training loss: 1.509052510579427 , Evaluation loss: 1.4844706054681387
Observations:
Reported accuracy statistics:
Dense NN 3 layers
Test Accuracy: tensor(0.9821)
Evaluation/Validation Accuracy: tensor(0.9768)
Test(custom dataset) Accuracy: tensor(0.6600)
'''
#%%
#3 layer CNN 3 + layer DNN reg & dropout
model = models.CNN3NN3layer(dropout=0.5,device='cuda')
losses = model.fit(x, y, xv, yv,regularize=True,print_losses=50,eps=100,lr=1E-4,batch_size=32)#100,1E-5,16
models.plot_loss(losses,title='3 Layvr CNN + 3 layer NN, w/ Reg&dropout')
print('3 layer CNN 3 + layer DNN')
print('Train Accuracy:',models.accuracy(y,model.predict(x)))
print('Validation Accuracy:',models.accuracy(yv,model.predict(xv)))
print('Test(custom dataset) Accuracy:',models.accuracy(yt,model.predict(xt)))
'''
Epoch 1 :
Training loss: 1.8404939883550009 , Evaluation loss: 1.5636011086977446
Epoch 50 :
Training loss: 1.5114175075531007 , Evaluation loss: 1.4820362539627614
Epoch 100 :
Training loss: 1.5021543897628784 , Evaluation loss: 1.477834382882485
Observations:
Reported accuracy statistics:
3 layer CNN 3 + layer DNN
Test Accuracy: tensor(0.9855)
Evaluation/Validation Accuracy: tensor(0.9833)
Test(custom dataset) Accuracy: tensor(0.8600)
'''
#%%
#10 layer FCNN with reg & dropout
model = models.CNN10(dropout=0.5,device='cuda')
losses = model.fit(x, y, xv, yv,regularize=True,print_losses=50,eps=100,lr=1E-4,batch_size=32)#100,1E-5,16
models.plot_loss(losses,title='10 layer FCNN, w/ Reg&dropout')
print('FCNN 10 layers')
print('Train Accuracy:',models.accuracy(y,model.predict(x)))
print('Validation Accuracy:',models.accuracy(yv,model.predict(xv)))
print('Test(custom dataset) Accuracy:',models.accuracy(yt,model.predict(xt)))
'''
Epoch 1 :
Training loss: 2.022997138977051 , Evaluation loss: 1.6752129124525266
Epoch 50 :
Training loss: 1.508178492863973 , Evaluation loss: 1.4789460106537893
Epoch 100 :
Training loss: 1.499118377304077 , Evaluation loss: 1.4755478910146616
Observations:
Reported accuracy statistics:
FCNN 10 layers
Test Accuracy: tensor(0.9869)
Evaluation/Validation Accuracy: tensor(0.9855)
Test(custom dataset) Accuracy: tensor(0.8400)
'''
#%%
#10 Layvr NN with reg & dropout
model = models.NN10layer(dropout=0.5,device='cuda')
losses = model.fit(x, y, xv, yv,regularize=True,print_losses=50,eps=100,lr=1E-4,batch_size=32)#100,1E-5,16
models.plot_loss(losses,title='Dense NN 10 layers, w/ Reg&dropout')
print('Dense NN 10 layers')
print('Train Accuracy:',models.accuracy(y[30000:],model.predict(x[30000:])))
print('Validation Accuracy:',models.accuracy(yv,model.predict(xv)))
print('Test(custom dataset) Accuracy:',models.accuracy(yt,model.predict(xt)))
'''
Epoch 1 :
Training loss: 2.3038975012461345 , Evaluation loss: 2.2993134390085173
Epoch 50 :
Training loss: 1.7635783201853434 , Evaluation loss: 1.6921955935465984
Epoch 100 :
Training loss: 1.6673925411224366 , Evaluation loss: 1.5521868937290633
Observations:
The model is too complex to converge in 100 epochs...
Reported accuracy statistics:
Dense NN 10 layers
Test Accuracy: tensor(0.9036)
Evaluation/Validation Accuracy: tensor(0.9087)
Test(custom dataset) Accuracy: tensor(0.4400)
'''
#%%
#4 layer Vison Transformer with reg & dropout
model = models.VisionTransformer(dropout=0.5,device='cuda',depth=4,patch_size=14)
losses = model.fit(x, y, xv, yv,regularize=True,print_losses=1,eps=50,lr=1E-5,batch_size=32)
models.plot_loss(losses,title='4 layer Vison Transformer, w/ Reg&dropout')
print('Vison Transformer')
k = 100
a = []
for i in range(0,x.shape[0]-k, k):
a.append(models.accuracy(y[i:i+k],model.predict(x[i:i+k])))
a = sum(a)/len(a)
print('Train Accuracy:',a)
a = []
for i in range(0,xv.shape[0]-k, k):
a.append(models.accuracy(yv[i:i+10],model.predict(xv[i:i+10])))
a = sum(a)/len(a)
print('Validation Accuracy:', a)
print('Test(custom dataset) Accuracy:',models.accuracy(yt,model.predict(xt)))
'''
Epoch 1 :
Training loss: 1.6465816840489707 , Evaluation loss: 1.5319998199358964
Epoch 50 :
Training loss: 1.4974361883163452 , Evaluation loss: 1.4868688782056172
Epoch 100 :
Training loss: 1.4819398591995239 , Evaluation loss: 1.4781510864312832
Observations:
The model is too complex to converge in 100 epochs...
Reported accuracy statistics:
Vison Transformer
Train Accuracy: tensor(0.9906)
Validation Accuracy: tensor(0.9838)
Test(custom dataset) Accuracy: tensor(0.6800)
'''
#%%
| 37.751196 | 107 | 0.707605 |
acf4501201d81e5055960c6bf311f9f43debbd15 | 26,118 | py | Python | Data Analysis/variation_analysis.py | YuJames/Python | 5212be2431e1693d0fc73a883d0b01673a5079b8 | [
"MIT"
] | 1 | 2017-05-01T10:41:35.000Z | 2017-05-01T10:41:35.000Z | Data Analysis/variation_analysis.py | YuJames/Python | 5212be2431e1693d0fc73a883d0b01673a5079b8 | [
"MIT"
] | 5 | 2018-05-10T01:40:45.000Z | 2018-05-20T01:19:54.000Z | Data Analysis/variation_analysis.py | YuJames/Python | 5212be2431e1693d0fc73a883d0b01673a5079b8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Visualization of Data Variation.
This module uses Bokeh v.0.12.12 to perform an interactive variation analysis on
data.
ToDo:
~~~~NOW~~~~
control data types better
fix HoverTool
~~~~CONSIDERATION~~~~
add moving range graph
add option to pick preprocessing steps
drop row in dataframe if not fitting a data type (type check)
add more checks/preprocessing (including json file)
fix "float division by zero error" (when sigma = 0)
impossible? Give warning instead?
Look into static type checking
float vs np.float64
work on class methods
~~~~PERIODICALLY~~~~
improve docstrings
improve modularity (globals, fxns, variables)
improve naming
return vs return None vs nothing
"""
#~~~~ IMPORTS ~~~~#
import analysis_core as ac
import enum
import functools
import json
import logging
import math
import subprocess # not currently used
import sys
import time # not currently used
import typing
import bokeh.client as bkc # not currently used
import bokeh.core.enums as bkce
import bokeh.io.doc as bkiod
import bokeh.layouts as bkl
import bokeh.models.annotations as bkma
import bokeh.models.sources as bkms
import bokeh.models.tools as bkmt
import bokeh.models.widgets.buttons as bkmwb
import bokeh.models.widgets.groups as bkmwg
import bokeh.models.widgets.inputs as bkmwi
import bokeh.models.widgets.sliders as bkmws
import bokeh.models.widgets.tables as bkmwt
import bokeh.plotting as bkp
import numpy as np
import pandas as pd
import tools
#~~~~ PRIVATE (GLOBAL CONSTANTS and ENUMS) ~~~~#
class _DataKey(enum.Enum):
"""keys to access general data"""
# static
XCL_DF = "excel dataframe"
XCL_Y_MAX = "excel y max"
XCL_Y_MIN = "excel y min"
XCL_X_MAX = "excel x max"
XCL_X_MIN = "excel x min"
# dynamic
PLT_Y_MAX = "plot y max"
PLT_Y_MIN = "plot y min"
PLT_X_MAX = "plot x max"
PLT_X_MIN = "plot x min"
PLT_CDS = "plot source"
TBL_CDS = "table source"
"""keys to access json data"""
_JsonKey = None
class _TableCdsKey(enum.Enum):
"""keys to access output data"""
# general
AVG = "average"
PASS_MAX = "max passing threshold"
PASS_MIN = "min passing threshold"
FAIL_NUM = "failures (num)"
FAIL_RATIO = "failures (%)"
# analysis-specific
CPK = "cpk"
VAR_MAX = "max variation threshold"
VAR_MIN = "min variation threshold"
class _WidgetKey(enum.Enum):
"""keys to access widgets"""
# x inputs
X_IN_PART = "X Partitions"
X_IN_CURR = "X Current Partitions"
X_IN_PREC = "X Precision"
X_IN = "X Index Range"
# y inputs
Y_IN_PART = "Y Partitions"
Y_IN_CURR = "Y Current Partitions"
Y_IN_PREC = "Y Precision"
Y_IN = "Y Value Range"
# outputs
COL_NAME_OUT = "Calculation"
COL_VALUE_OUT = "Value"
# checkboxes
BOXES = "test title"
GEN_BOX = "General Analysis"
VAR_BOX = "Variation Analysis"
#~~~~ PUBLIC (GLOBAL CONSTANTS and ENUMS) ~~~~#
#~~~~ PRIVATE GLOBAL VARIABLES ~~~~#
#~~~~ PUBLIC GLOBAL VARIABLES ~~~~#
#~~~~ PRIVATE CLASSES ~~~~#
class _VariationAnalysisFigure(ac.AnalysisFigure):
class _VariationAnalysisData(ac.AnalysisData):
def __init__(self, data):
"""Container for variation analysis data.
Args:
data: data to analyze (pd.DataFrame)
Returns:
None
"""
super().__init__()
y_axis = data.loc[:, _JsonKey.Y_NAME.value]
x_axis = data.loc[:, _JsonKey.X_NAME.value]
col_1_key = _WidgetKey.COL_NAME_OUT.value
col_2_key = _WidgetKey.COL_VALUE_OUT.value
# init sources
table_dict = {col_1_key: [_TableCdsKey.AVG.value,
_TableCdsKey.PASS_MAX.value,
_TableCdsKey.PASS_MIN.value,
_TableCdsKey.FAIL_NUM.value,
_TableCdsKey.FAIL_RATIO.value,
_TableCdsKey.CPK.value,
_TableCdsKey.VAR_MAX.value,
_TableCdsKey.VAR_MIN.value],
col_2_key: [ac.calc_avg(data=y_axis, prec=_JsonKey.PREC.value),
_JsonKey.PASS_MAX.value,
_JsonKey.PASS_MIN.value,
ac.calc_failures(data=y_axis,
lower=_JsonKey.PASS_MIN.value,
upper=_JsonKey.PASS_MAX.value,
prec=_JsonKey.PREC.value)[0],
ac.calc_failures(data=y_axis,
lower=_JsonKey.PASS_MIN.value,
upper=_JsonKey.PASS_MAX.value,
prec=_JsonKey.PREC.value)[1],
ac.calc_cpk(data=y_axis,
lower=_JsonKey.PASS_MIN.value,
upper=_JsonKey.PASS_MAX.value,
prec=_JsonKey.PREC.value),
ac.calc_var_limits(data=y_axis, prec=_JsonKey.PREC.value)[1],
ac.calc_var_limits(data=y_axis, prec=_JsonKey.PREC.value)[0]]}
self._sources[_DataKey.PLT_CDS] = bkms.ColumnDataSource(data=data)
self._sources[_DataKey.TBL_CDS] = bkms.ColumnDataSource(data=table_dict)
# init data
self._data[_DataKey.XCL_DF] = data
self._data[_DataKey.XCL_Y_MAX] = y_axis.max()
self._data[_DataKey.XCL_Y_MIN] = y_axis.min()
self._data[_DataKey.XCL_X_MAX] = x_axis.size - 1
self._data[_DataKey.XCL_X_MIN] = 0
self._data[_DataKey.PLT_Y_MAX] = y_axis.max()
self._data[_DataKey.PLT_Y_MIN] = y_axis.min()
self._data[_DataKey.PLT_X_MAX] = x_axis.size - 1
self._data[_DataKey.PLT_X_MIN] = 0
def __getitem__(self, key):
try:
if key in _TableCdsKey:
print("first")
cds = self._sources[_DataKey.TBL_CDS]
print("second")
index_of_key = cds.data[_WidgetKey.COL_NAME_OUT.value].index(key.value)
print("third")
value_at_index = cds.data[_WidgetKey.COL_VALUE_OUT.value][index_of_key]
return value_at_index
elif key is _DataKey.PLT_CDS or key is _DataKey.TBL_CDS:
value_at_index = self._sources[key]
return value_at_index
elif key in _DataKey:
value_at_index = self._data[key]
return value_at_index
else:
self.__missing__(key)
return None
except:
print("in except")
return None
def __setitem__(self, key, value):
if key in _TableCdsKey:
cds = self._sources[_DataKey.TBL_CDS]
index_of_key = cds.data[_WidgetKey.COL_NAME_OUT.value].index(key.value)
cds.data[_WidgetKey.COL_VALUE_OUT.value][index_of_key] = value
elif key is _DataKey.PLT_CDS or key is _DataKey.TBL_CDS:
self._sources[key].data = value
elif key in _DataKey:
self._data[key] = value
else:
self.__missing__(key)
def __missing__(self, key):
print("Class {} does not use key '{}'.".format(self.__class__, key))
def __repr__(self):
result = ""
result += "Class: {}\n".format(self.__class__.__name__)
# result += "Sources\n"
result += "Data\n"
for key, val in self._data.items():
if key is not _DataKey.XCL_DF:
result += " {}: {}\n".format(key, val)
return result
def update_plot_cds(self):
"""Update plot source based on current data.
Args:
None
Returns:
None
"""
x_min = int(self._data[_DataKey.PLT_X_MIN])
x_max = int(self._data[_DataKey.PLT_X_MAX])
y_min = self._data[_DataKey.PLT_Y_MIN]
y_max = self._data[_DataKey.PLT_Y_MAX]
col_x_key = _JsonKey.X_NAME.value
col_y_key = _JsonKey.Y_NAME.value
# filter source
xcl_df = self[_DataKey.XCL_DF]
new_df = xcl_df.iloc[x_min: x_max + 1, :]
y_axis = new_df[_JsonKey.Y_NAME.value]
new_df = new_df[(y_axis <= y_max) & (y_axis >= y_min)]
# update source
new_dict = {col_x_key: new_df[_JsonKey.X_NAME.value].as_matrix(),
col_y_key: new_df[_JsonKey.Y_NAME.value].as_matrix()}
self[_DataKey.PLT_CDS] = new_dict
def update_table_cds(self, checkboxes):
"""Update table source based on current data.
Args:
None
Returns:
None
"""
y_axis = self._sources[_DataKey.PLT_CDS].data[_JsonKey.Y_NAME.value]
col_1_key = _WidgetKey.COL_NAME_OUT.value
col_2_key = _WidgetKey.COL_VALUE_OUT.value
# update source
table_dict = {col_1_key: [],
col_2_key: []}
if 0 in checkboxes:
table_dict[col_1_key].extend([_TableCdsKey.AVG.value,
_TableCdsKey.PASS_MAX.value,
_TableCdsKey.PASS_MIN.value])
table_dict[col_2_key].extend([ac.calc_avg(data=y_axis, prec=_JsonKey.PREC.value),
_JsonKey.PASS_MAX.value,
_JsonKey.PASS_MIN.value])
if 1 in checkboxes:
table_dict[col_1_key].extend([_TableCdsKey.FAIL_NUM.value,
_TableCdsKey.FAIL_RATIO.value,
_TableCdsKey.CPK.value,
_TableCdsKey.VAR_MAX.value,
_TableCdsKey.VAR_MIN.value])
table_dict[col_2_key].extend([ac.calc_failures(data=y_axis,
lower=_JsonKey.PASS_MIN.value,
upper=_JsonKey.PASS_MAX.value,
prec=_JsonKey.PREC.value)[0],
ac.calc_failures(data=y_axis,
lower=_JsonKey.PASS_MIN.value,
upper=_JsonKey.PASS_MAX.value,
prec=_JsonKey.PREC.value)[1],
ac.calc_cpk(data=y_axis,
lower=_JsonKey.PASS_MIN.value,
upper=_JsonKey.PASS_MAX.value,
prec=_JsonKey.PREC.value),
ac.calc_var_limits(data=y_axis,
prec=_JsonKey.PREC.value)[1],
ac.calc_var_limits(data=y_axis,
prec=_JsonKey.PREC.value)[0]])
self[_DataKey.TBL_CDS] = table_dict
def __init__(self, data):
"""Container for variation analysis.
Args:
data: data container (pd.DataFrame)
Returns:
None
"""
super().__init__()
self._data = self._VariationAnalysisData(data=data)
self._figure = bkp.figure(title=_JsonKey.TITLE.value,
x_axis_label=_JsonKey.X_NAME.value,
y_axis_label=_JsonKey.Y_NAME.value,
x_axis_type="datetime",
y_axis_type="linear",
plot_width=1200)
# add tools
self._figure.add_tools(bkmt.HoverTool(tooltips=[("x", "@{}".format(_JsonKey.X_NAME.value)),
("y", "@{}".format(_JsonKey.Y_NAME.value)),
("index", "$index")],
formatters={"datetime": "datetime"}))
# add plot data glyphs
self._figure.circle(x=_JsonKey.X_NAME.value, y=_JsonKey.Y_NAME.value,
fill_color="white", legend="points", size=5,
source=self._data[_DataKey.PLT_CDS])
self._figure.line(x=_JsonKey.X_NAME.value, y=_JsonKey.Y_NAME.value,
legend="lines", source=self._data[_DataKey.PLT_CDS])
# add legend
self._figure.legend.background_fill_alpha = 0
self._figure.legend.border_line_color = "navy"
self._figure.legend.border_line_width = 3
self._figure.legend.click_policy="hide"
# init lines
_avg = bkma.Span(dimension="width", line_color="#000000", line_dash="dashed",
line_width=3, location=self._data[_TableCdsKey.AVG])
_pass_min = bkma.Span(dimension="width", line_color="#FF0000", line_dash="dashed",
line_width=3, location=_JsonKey.PASS_MIN.value)
_pass_max = bkma.Span(dimension="width", line_color="#FF0000", line_dash="dashed",
line_width=3, location=_JsonKey.PASS_MAX.value)
_var_min = bkma.Span(dimension="width", line_color="#FFA500", line_dash="dashed",
line_width=3, location=self._data[_TableCdsKey.VAR_MIN])
_var_max = bkma.Span(dimension="width", line_color="#FFA500", line_dash="dashed",
line_width=3, location=self._data[_TableCdsKey.VAR_MAX])
self._figure.add_layout(obj=_avg)
self._figure.add_layout(obj=_pass_max)
self._figure.add_layout(obj=_pass_min)
self._figure.add_layout(obj=_var_max)
self._figure.add_layout(obj=_var_min)
self._annotations = {_TableCdsKey.AVG: _avg,
_TableCdsKey.VAR_MAX: _var_max,
_TableCdsKey.VAR_MIN: _var_min,
_TableCdsKey.PASS_MAX: _pass_max,
_TableCdsKey.PASS_MIN: _pass_min}
# init input widgets
_x_in_partitions = bkmws.Slider(title=_WidgetKey.X_IN_PART.value,
start=2, end=10, value=2, step=1)
_x_in_current = bkmws.RangeSlider(title=_WidgetKey.X_IN_CURR.value,
start=1, end=2, step=1, value=(1,2))
_x_in_precision = bkmwi.Select(title=_WidgetKey.X_IN_PREC.value,
options=["1", "10", "100"], value="1")
_x_in = bkmws.RangeSlider(title=_WidgetKey.X_IN.value, start=self._data[_DataKey.XCL_X_MIN],
end=self._data[_DataKey.XCL_X_MAX], step=1,
value=(self._data[_DataKey.XCL_X_MIN], self._data[_DataKey.XCL_X_MAX]))
_y_in_partitions = None
_y_in_current = None
_Y_IN_PREC = None
_y_in = bkmws.RangeSlider(title=_WidgetKey.Y_IN.value,
start=self._data[_DataKey.XCL_Y_MIN],
end=self._data[_DataKey.XCL_Y_MAX], step=1,
value=(self._data[_DataKey.XCL_Y_MIN], self._data[_DataKey.XCL_Y_MAX]))
# self._save_data = bkmwb.Button(label="save data", button_type="success")
_x_in_partitions.on_change("value", functools.partial(self._cb_input_settings, widget=_x_in_partitions))
_x_in_current.on_change("value", functools.partial(self._cb_input_settings, widget=_x_in_current))
_x_in_precision.on_change("value", functools.partial(self._cb_input_settings, widget=_x_in_precision))
_x_in.on_change("value", functools.partial(self._cb_input_settings, widget=_x_in))
# _Y_IN_PREC.on_change("value", functools.partial(self._callback_select, widget=_Y_IN_PREC))
# _y_in.on_change("value", functools.partial(self._callback_slider, widget=_y_in))
# self._save_data.on_click(callback_save_output)
in_table_display = bkmwg.CheckboxGroup(labels=[_WidgetKey.GEN_BOX.value, _WidgetKey.VAR_BOX.value], active=[0, 1], name=_WidgetKey.BOXES.value)
in_table_display.on_click(self._cb_table_settings)
# init output widgets
_tbl_col1 = bkmwt.TableColumn(field=_WidgetKey.COL_NAME_OUT.value,
title=_WidgetKey.COL_NAME_OUT.value)
_tbl_col2 = bkmwt.TableColumn(field=_WidgetKey.COL_VALUE_OUT.value,
title=_WidgetKey.COL_VALUE_OUT.value)
_tbl_out = bkmwt.DataTable(source=self._data[_DataKey.TBL_CDS],
columns=[_tbl_col1, _tbl_col2],
fit_columns=False, row_headers=False, sizing_mode="scale_width",
sortable=True, selectable=True, scroll_to_selection=True)
# init widgets
self._widgets = {_WidgetKey.X_IN_PART: _x_in_partitions,
_WidgetKey.X_IN_CURR: _x_in_current,
_WidgetKey.X_IN_PREC: _x_in_precision,
_WidgetKey.X_IN: _x_in,
_WidgetKey.COL_NAME_OUT: _tbl_col1,
_WidgetKey.COL_VALUE_OUT: _tbl_col2,
_WidgetKey.BOXES: in_table_display}
# init layout
input_left = bkl.column(children=[_x_in_partitions, _x_in_current, _x_in_precision, _x_in])
input_right = bkl.column(children=[])
text_input = bkl.row(children=[input_left, input_right])
input = bkl.column(children=[text_input])
widgets = bkl.row(children=[input, in_table_display, _tbl_out])
plot_and_io = bkl.column(children=[self._figure, widgets])
bkiod.curdoc().add_root(model=plot_and_io)
self._flag = False
def __getitem__(self, key):
if key in _WidgetKey:
return self._widgets[key]
elif key in _TableCdsKey:
return self._annotations[key]
def __setitem__(self, key, value):
if key in _WidgetKey:
self._widgets[key].value = value
elif key in _TableCdsKey:
self._annotations[key].location = value
def _update_limits(self):
"""Update data range with input widget values.
Args:
None
Returns:
None
"""
x_min, x_max = self[_WidgetKey.X_IN].value
self._data[_DataKey.PLT_X_MIN] = x_min
self._data[_DataKey.PLT_X_MAX] = x_max
def _update_plot_lines(self):
"""Update the horizontal plot lines based on current data.
Args:
None
Returns:
None
"""
self[_TableCdsKey.AVG] = self._data[_TableCdsKey.AVG]
self[_TableCdsKey.VAR_MAX] = self._data[_TableCdsKey.VAR_MAX]
self[_TableCdsKey.VAR_MIN] = self._data[_TableCdsKey.VAR_MIN]
def _cb_input_settings(self, attr, old, new, widget):
"""
"""
widget_enum = _WidgetKey(widget.title)
# terminate early
if self._flag is True:
return
self._flag = True
print("Callback from input: {}".format(widget.title))
# get widgets
partitions = self[_WidgetKey.X_IN_PART]
current = self[_WidgetKey.X_IN_CURR]
prec = self[_WidgetKey.X_IN_PREC]
input = self[_WidgetKey.X_IN]
# basic calcs
print("basic calcs")
size = math.floor((self._data[_DataKey.XCL_X_MAX] + 1) / partitions.value)
remainder = (self._data[_DataKey.XCL_X_MAX] + 1) % partitions.value
start_part, end_part = current.value
# interact with widgets
print("interaction")
if widget_enum == _WidgetKey.X_IN_PART:
current.start = 1
current.end = new
current.value = (current.start, current.end)
prec.value = "1"
input.start = 0
input.end = self._data[_DataKey.XCL_X_MAX]
input.step = 1
input.value = (input.start, input.end)
elif widget_enum == _WidgetKey.X_IN_CURR:
prec.value = "1"
# calcs
input.start = size * (start_part - 1)
if end_part == partitions.value:
input.end = size * end_part + remainder - 1
else:
input.end = size * end_part - 1
# calcs
input.step = 1
input.value = (input.start, input.end)
elif widget_enum == _WidgetKey.X_IN_PREC:
input.step = int(new)
input.value = (input.start, input.end)
elif widget_enum == _WidgetKey.X_IN:
pass
self._flag = False
# data calcs
self._update_limits()
self._data.update_plot_cds()
self._data.update_table_cds(list(self[_WidgetKey.BOXES].active))
self._update_plot_lines()
def _cb_table_settings(self, new):
print("checkbox clicked: {}".format(new))
self._data.update_table_cds(tuple(new))
# def callback_save_output():
# tools.create_timestamp(output_file_path)
# with open(output_file_path, "a") as f:
# dict = {"input": [(item, value[0]) for item, value in input_source.data.items() if len(value) == 1],
# "output": [(item, value) for item, value in zip(output_data.data["calculation"], output_data.data["value"])]}
# JSON_STRING = json.dumps(dict, indent = 2, sort_keys = True)
# f.write(JSON_STRING + "\n\n")
#~~~~ PUBLIC CLASSES ~~~~#
#~~~~ PRIVATE FUNCTIONS ~~~~#
def _create_json_enum(file_path: str) -> None:
"""Parse configuration json file.
Args:
file_path: json file path (str)
Returns:
None
"""
global _JsonKey
with open(file=file_path, mode="r") as f:
json_obj = json.load(fp=f)
_JsonKey = enum.Enum(value="_JsonKey",
names={"XCL_FILE_PATH": json_obj["excel file path"],
"LOG_FILE_PATH": json_obj["logging file path"],
"PREC": json_obj["rounding precision"],
"SHEET_NAME": json_obj["data sheet name"],
"TITLE": json_obj["analysis title"],
"PASS_MAX": json_obj["max passing value"],
"PASS_MIN": json_obj["min passing value"],
"Y_NAME": json_obj["y axis name"],
"X_NAME": json_obj["x axis name"]})
def _prepare_variation_analysis_data(json_file_path: str):
"""Preprocess data for variation analysis.
Args:
json_file_path: json file path (str)
Returns:
preprocessed data (pd.DataFrame)
"""
# update globals
_create_json_enum(file_path=json_file_path)
# grab data
data_df = pd.read_excel(io=_JsonKey.XCL_FILE_PATH.value, sheetname=_JsonKey.SHEET_NAME.value)
# clean variable data
data_df = data_df.dropna()
data_df = data_df.round(decimals={_JsonKey.Y_NAME.value: _JsonKey.PREC.value})
data_df = data_df.drop_duplicates()
data_df = data_df.sort_values(by=[_JsonKey.X_NAME.value, _JsonKey.Y_NAME.value])
return data_df
def _create_variation_analysis_UI(data) -> None:
"""Create the UI for variation analysis.
Args:
data: data container (pd.DataFrame)
Returns:
None
"""
figure = _VariationAnalysisFigure(data=data)
#~~~~ PUBLIC FUNCTIONS ~~~~#
def variation_analysis(json_file_path: str) -> None:
"""Perform and display a variation analysis.
Args:
json_file_path: json file path (str)
Returns:
None
"""
preprocessed_data = _prepare_variation_analysis_data(json_file_path=json_file_path)
_create_variation_analysis_UI(data=preprocessed_data)
#~~~~ MAIN ~~~~#
#~~~~ DEAD CODE ~~~~#
# def _filter_out_nonnumbers(data_set):
# """Filter out non-number data _set elements.
#
# Args:
# data_set: what to filter (list)
# Returns:
# filtered copy of argument (list)
# """
#
# print("debug: _filter_out_nonnumbers")
# return [x for x in data_set if _is_string_number(x)] | 42.816393 | 151 | 0.532621 |
acf45044b505981a94aeee68c8396e135c83a049 | 390 | py | Python | src/bilbyui/migrations/0014_label_protected.py | gravitationalwavedc/gwcloud_bilby | f5074fe60ff2a3cfa6a7e8d3e97c9573a6152563 | [
"MIT"
] | 1 | 2020-10-26T02:35:26.000Z | 2020-10-26T02:35:26.000Z | src/bilbyui/migrations/0014_label_protected.py | gravitationalwavedc/gwcloud_bilby | f5074fe60ff2a3cfa6a7e8d3e97c9573a6152563 | [
"MIT"
] | 31 | 2020-05-04T05:57:45.000Z | 2022-02-23T04:35:35.000Z | src/bilbyui/migrations/0014_label_protected.py | gravitationalwavedc/gwcloud_bilby | f5074fe60ff2a3cfa6a7e8d3e97c9573a6152563 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.19 on 2021-05-31 00:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bilbyui', '0013_auto_20210321_2322'),
]
operations = [
migrations.AddField(
model_name='label',
name='protected',
field=models.BooleanField(default=False),
),
]
| 20.526316 | 53 | 0.602564 |
acf450460723575149f81578228ab39c55a80fe1 | 80 | py | Python | microsetta_public_api/api/diversity/beta.py | wasade/microsetta-public-api | cdbaa18103bbb2e49927a35ad956e079ad445f53 | [
"BSD-3-Clause"
] | null | null | null | microsetta_public_api/api/diversity/beta.py | wasade/microsetta-public-api | cdbaa18103bbb2e49927a35ad956e079ad445f53 | [
"BSD-3-Clause"
] | null | null | null | microsetta_public_api/api/diversity/beta.py | wasade/microsetta-public-api | cdbaa18103bbb2e49927a35ad956e079ad445f53 | [
"BSD-3-Clause"
] | null | null | null | def pcoa_contains(named_sample_set, sample_id):
raise NotImplementedError()
| 26.666667 | 47 | 0.8125 |
acf450c9552eaff1059e832ea7edca54911b867d | 1,870 | py | Python | kNN/handwriting.py | yangmqglobe/Machine_Learning_in_Action_Practice | 6418e2d780b554b292710dc346c5aa248d82fd0e | [
"MIT"
] | null | null | null | kNN/handwriting.py | yangmqglobe/Machine_Learning_in_Action_Practice | 6418e2d780b554b292710dc346c5aa248d82fd0e | [
"MIT"
] | null | null | null | kNN/handwriting.py | yangmqglobe/Machine_Learning_in_Action_Practice | 6418e2d780b554b292710dc346c5aa248d82fd0e | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
@author: yangmqglobe
@file: handwriting.py
@time: 2017/2/11
"""
from .kNN import knn_classify
import numpy as np
import glob
import re
def img2vector(path):
"""
将一张图片的矩阵读入并生成numpy数组
:param path: 图片文件路径
:return:图片数组
"""
vector = np.zeros((1, 1024))
with open(path) as f:
for x, line in enumerate(f):
line = line.rstrip()
for y, col in enumerate(line):
vector[0, 32*x+y] = int(col)
return vector
def get_data_set(path):
"""
把文件夹中的所有图片读入生成数据集矩阵
:param path: 文件路径
:return: 数据集矩阵和分类
"""
num_re = re.compile(r'(\d)_\d+\.txt')
paths = glob.glob('{}/*'.format(path))
mat = np.zeros((len(paths), 1024))
labels = []
for i, path in enumerate(paths):
mat[i, :] = img2vector(path)
labels.append(int(num_re.findall(path)[0]))
return mat, labels
def handwriting_class_test(train_path, test_path, k=3):
"""
以给定的目录作为训练和测试数据集,对分类进行测试
:param train_path: 训练数据集
:param test_path: 测试数据集
:param k: k值
"""
hw_mat, hw_labels = get_data_set(train_path)
paths = glob.glob('{}/*'.format(test_path))
num_re = re.compile(r'(\d)_\d+\.txt')
error_count = 0
test_count = len(paths)
for i, path in enumerate(paths):
test_mat = img2vector(path)
class_result = knn_classify(test_mat, hw_mat, hw_labels, k)
real_result = int(num_re.findall(path)[0])
if class_result != real_result:
error_count += 1
print('class {} as {} ×'.format(path, class_result))
else:
print('class {} as {} √'.format(path, class_result))
print('total error count: {}'.format(error_count))
print('total error rate: {:.3f}%'.format(error_count/test_count*100))
def classify_handwring(path, train_path, k=3):
# todo
pass
| 25.616438 | 73 | 0.6 |
acf450ed100ce7728d1327d2cbd1cd5a8ba12f54 | 446 | py | Python | lesson3.py | mckeown12/GettingStarted | b323a11d9041b197e1caf1a8c2237739183356e1 | [
"Apache-2.0"
] | null | null | null | lesson3.py | mckeown12/GettingStarted | b323a11d9041b197e1caf1a8c2237739183356e1 | [
"Apache-2.0"
] | null | null | null | lesson3.py | mckeown12/GettingStarted | b323a11d9041b197e1caf1a8c2237739183356e1 | [
"Apache-2.0"
] | null | null | null | from time import sleep
def print_thirteen_times_table_until(stop_at = 38):
n = 0
while n < stop_at:
print(f"13 x {n} = {13*n}")
n = n+1
sleep(0.5)
print(f"n = {n} which is no longer less than {stop_at}")
return n
print("Calling Function...")
print_thirteen_times_table_until()
# print_thirteen_times_until(5)
# print_thirteen_times_until(stop_at = 5)
# n = print_thirteen_times_until(5.7)
print("End") | 22.3 | 60 | 0.663677 |
acf45323efee5c17d43947625dad9216184f7c7b | 33,851 | py | Python | .history/neuroformer/model_perceiver_20220114165815.py | woanderer/neuroformer | df3462d55977b6c9adcb6753e7c474b8b76e8021 | [
"MIT"
] | null | null | null | .history/neuroformer/model_perceiver_20220114165815.py | woanderer/neuroformer | df3462d55977b6c9adcb6753e7c474b8b76e8021 | [
"MIT"
] | null | null | null | .history/neuroformer/model_perceiver_20220114165815.py | woanderer/neuroformer | df3462d55977b6c9adcb6753e7c474b8b76e8021 | [
"MIT"
] | null | null | null | # from code.transformer_vid.utils import convert_weights
# import rotary_embedding_torch
from torch.nn.modules.activation import GELU, ReLU
# from data.OneCombo3.trainer import TrainerConfig
import math
import numpy as np
import itertools
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.autograd import Variable
from torchvision.models.video import r3d_18
# from ResNet3D import r3d_18
from scipy.optimize import linear_sum_assignment
# from rotary_embedding_torch import apply_rotary_emb, RotaryEmbedding
from einops.layers.torch import Rearrange
logger = logging.getLogger(__name__)
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): # nn.Conv3d,
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.2
resid_pdrop = 0.2
attn_pdrop = 0.2
pos_pdrop = 0.2
temp_pdrop = 0.2
pos_emb = True
temp_emb = True
start_prune = 30
epoch = 0
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class neuralGPTConfig:
""" base GPT config, params common to all GPT versions """
n = 0.4
im_drop = 0.2
id_drop = n
embd_pdrop = n
resid_pdrop = n
attn_pdrop = n
pos_pdrop = n
temp_pdrop = n
pos_emb = True
temp_emb = True
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k, v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
""" GPT-1 like network roughly 125M params """
n_layer = 12
n_head = 12
n_embd = 768
class VideoFeaturesExtractor(nn.Module):
"""
R3D: (3 x T x H x W)
H, W = 112
"""
def __init__(self):
super().__init__()
self.backbone = torch.nn.Sequential(*(list(r3d_18(pretrained=True).children())[:-2]))
convert_weights(self.backbone)
# # freeze backbone
# for k, v in self.backbone.named_parameters():
# v.requires_grad = False
def forward(self, x):
# B = Batch, T, C, Fm, H, W
features = self.backbone(x) # (B, C, T, H, W)
B, C, T, H, W = features.shape
features = features.permute(0, 2, 3, 4, 1)
features = features.view(B, -1, C)
return features
class VideoEncoder(nn.Module):
def __init__(self):
super().__init__()
self.to_patch_embedding = nn.Sequential(
Rearrange('b c t (h p1) (w p2) -> b (t h w) (p1 p2 c)', p1=16, p2=16)
)
def forward(self, x):
return self.to_patch_embedding(x)
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
self.register_buffer("mask", self.build_mask(config.block_size))
self.n_head = config.n_head
self.att = None
self.T = config.block_size
# self.rotary_embedding = RotarySpatioTemporalEmbedding(config)
def build_mask(self, block_size):
mask = torch.tril(torch.ones((block_size, block_size)),
).view(1, 1, block_size, block_size)
return mask
def forward(self, x, pad=None, dtx=None):
# B = Batch, T = Sequence, C = n_embed
B, T, C = x.size()
# calculate query, key, values for all head in batch and move head forward to the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# # apply rotary embeddings
# if dtx is not None:
# q, k = self.rotary_embedding(q, k, dtx)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
# if pad is not None:
# for idx, i in enumerate(pad):
# att[idx, :, :, self.T - i:] = float('-inf') # only able to see first padding token
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
self.att = att
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y
class PositionalEmbedding(nn.Module):
""" Implement the PE function. """
def __init__(self, n_embd, p_drop, max_len=1500):
super().__init__()
self.dropout = nn.Dropout(p=p_drop)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_embd)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_embd, 2) *
-(math.log(10000.0) / n_embd))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
# class RotarySpatioTemporalEmbedding(nn.Module):
# """ Rotary temporal embeddings - block_size = id_blk_sz """
# def __init__(self, config):
# super().__init__()
# self.frame_block_size = config.frame_block_size
# self.id_block_size = config.id_block_size
# self.emb = RotaryEmbedding(dim=32)
# def forward(self, q, k, t):
# b = t.shape[0]
# tf = self.frame_block_size
# queries = []
# keys = []
# for B in range(b):
# im_temp_emb = torch.tensor([-0.5] * (tf//2) + [0.5] * (tf//2))
# im_pos_emb = torch.arange(self.frame_block_size)
# im_emb = torch.stack([im_temp_emb, im_pos_emb], dim=0)
# id_temp_emb = self.temp_emb(t[B], cache_key=self.block_size)
# freqs = self.emb(torch.cat(im_emb, id_temp_emb))
# queries.append(apply_rotary_emb(freqs, q[B][None, ...]))
# keys.append(apply_rotary_emb(freqs, k[B][None, ...]))
# q, k = torch.cat(queries), torch.cat(keys)
# return q, k
class TemporalEmbedding(nn.Module):
""" encoding temporal information using fourrier signals """
def __init__(self, n_embd, p_drop, max_len=1500):
super().__init__()
self.dropout = nn.Dropout(p=p_drop)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_embd)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_embd, 2) *
-(math.log(10000.0) / n_embd))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
class LearntTemporalEmbedding(nn.Module):
"""
Project B x T x 1 time sequence to
B x T x C
"""
def __init__(self, block_sz, n_embd, p_drop=0.2):
super().__init__()
self.temp_emb = nn.Sequential(
nn.Linear(1, n_embd // 2),
nn.GELU(),
nn.Linear(n_embd // 2, n_embd),
nn.Dropout(p_drop)
)
def forward(self, x):
return self.temp_emb(x.unsqueeze(-1))
class Decoder(nn.Module):
def __init__(self, config):
super().__init__()
# decoder_layer = nn.TransformerDecoderLayer(config.n_embd, config.n_head,
# activation='gelu', dropout=0.2, batch_first=True)
# self.decoder = nn.TransformerDecoder(decoder_layer, config.n_layer)
self.decoder = nn.Transformer(d_model=config.n_embd, nhead=config.n_head,
num_encoder_layers=3, num_decoder_layers=config.n_layer,
activation="gelu", dropout=0.4, batch_first=True)
self.register_buffer("tgt_mask", self.generate_square_subsequent_mask(config.id_block_size))
# self.register_buffer("tgt_pad_mask", self.generate_padding_mask(config.ids_block_size))
self.T = config.id_block_size
def generate_square_subsequent_mask(self, sz: int, pad=None):
r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
Unmasked positions are filled with float(0.0).
"""
mask = (torch.triu(torch.ones(sz, sz), diagonal=0) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def generate_padding_mask(self, sz: int, pad=None):
r"""Build a (B x T) mask that resides on the GPU and can be
manipulated by build_padding_mask according to padded sequence
"""
mask = torch.zeros(1, sz, dtype=torch.bool)
return mask
def generate_sparse_mask(self, sz: int, pad=None):
r""" Build a square mask that employs
teacher forcing according to P
"""
rand_mat = torch.rand(1, sz)
k = round(0.75 * sz)
k_th_quant = torch.topk(rand_mat, k, largest = False)[0][:,-1:]
bool_tensor = rand_mat <= k_th_quant
mask = torch.where(bool_tensor, torch.tensor(1), torch.tensor(0)).repeat(sz, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask.cuda(self.tgt_mask.get_device()) if self.tgt_mask.is_cuda else mask
def build_padding_mask(self, tgt, pad):
# mask = self.tgt_pad_mask.repeat(tgt.shape[0], 1)
mask = torch.zeros(tgt.shape[0], self.T, dtype=torch.bool)
# print(mask.shape)
# print(pad.shape)
for B, P in enumerate(pad):
mask[B, self.T - P:] = True
return mask # .to(torch.cuda.current_device())
def forward(self, tgt, memory, pad):
# padding_mask = self.build_padding_mask(tgt, pad)
# tgt_mask = self.generate_sparse_mask(self.T) if self.training else self.tgt_mask
return self.decoder(src=memory, tgt=tgt, tgt_mask=self.tgt_mask,
tgt_key_padding_mask=None)
class ProjectNorm(nn.Module):
def __init__(self, feat_size, target_size):
super().__init__()
self.ln = nn.LayerNorm(feat_size)
self.mlp = nn.Sequential(
nn.Linear(feat_size, math.floor(2 * feat_size), bias=False),
nn.GELU(),
nn.Linear(math.floor(2 * feat_size), target_size, bias=False),
)
def forward(self, x):
return self.mlp(self.ln(x))
class TimeProjection(nn.Module):
def __init__(self, seq_size, id_seq_size, feat_size, target_size):
super().__init__()
self.mlp_seq = nn.Sequential(
nn.Linear(seq_size, id_seq_size),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(id_seq_size, id_seq_size)
)
self.mlp_t = nn.Sequential(
nn.Linear(feat_size, feat_size // 2),
nn.ReLU(),
nn.Dropout(p=0.3),
nn.Linear(feat_size // 2, target_size)
)
def forward(self, x):
x = x.permute(0, 2, 1) # B, T, C -> B, C, T
x = self.mlp_seq(x) # B, C, T / 2
x = x.permute(0, 2, 1) # B, T / 2, C
return self.mlp_t(x) # B, T / 2, 1
class PSTHProjection(nn.Module):
"""Takes Last Output of Block -> (B, C)
Builds PSTH table
"""
def __init__(self, config):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd, bias=False),
nn.Dropout(p=0.2),
nn.GELU(),
nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)
)
def forward(self, x):
return self.mlp(x)
# class PSTHProjection(nn.Module):
# def __init__(self, config):
# super().__init__()
# self.mlp_seq = nn.Sequential(
# nn.Linear(config.id_block_size, config.id_block_size // 2, bias=False),
# nn.GELU(),
# nn.Dropout(p=0.2),
# nn.Linear(config.id_block_size // 2, 1, bias=False)
# )
# self.mlp_t = nn.Sequential(
# nn.Linear(config.n_embd, config.n_embd * 4, bias=False),
# nn.GELU(),
# nn.Dropout(p=0.2),
# nn.Linear(config.n_embd * 4, config.id_vocab_size, bias=False)
# )
# def forward(self, x):
# x = x.transpose(-1, -2) # B, T, C -> B, C, T
# x = self.mlp_seq(x) # B, C, 1
# x = x.transpose(-2, -1) # B, 1, Vocab_id
# return self.mlp_t(x)
class TimeRNN(nn.Module):
def __init__(self, feat_size, target_size):
super().__init__()
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(),
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x, pad=None, dtx=None):
x = x + self.attn(self.ln1(x), pad)
x = x + self.mlp(self.ln2(x))
return x
class BlockSequential(nn.Sequential):
def forward(self, x, pad=None, dtx=None):
for module in self._modules.values():
x = module(x, pad, dtx)
return x
class DiceLossPSTH(nn.Module):
def __init__(self, size_average=True, smooth=1):
super().__init__()
def cross_entropy(self, input, target):
return torch.mean(-torch.sum(target * torch.log(input), 1))
def forward(self, logits, targets, smooth=1, class_weights=None):
total_logits = F.layer_norm(torch.sum(logits, dim=-2), [logits.size()[-1]])
# probs = F.log_softmax(logits, dim=-1)
probs = F.softmax(total_logits, dim=-1)
# logits = F.gelu(logits)
# probs = logits / (logits.max(dim=-1).values.unsqueeze(-1))
# flatten label and prediction tensors
outputs = probs.contiguous().view(-1)
targets = targets.contiguous().view(-1)
labels = torch.zeros_like(outputs)
labels[targets] = 1 / len(targets)
# intersection = (outputs * labels).sum()
# dice = (2. * intersection + smooth) / (outputs.sum() + labels.sum() + smooth)
return self.cross_entropy(outputs[None, ...], labels[None, ...])
class SetLoss(nn.Module):
def __init__(self):
super().__init__()
def cross_entropy(self, input, target):
return torch.mean(-torch.sum(target * torch.log(input), 1))
def forward(self, logits, targets):
targets = targets.contiguous().view(-1)
loss = 0
for n_step, n_logits in enumerate(logits):
n_logits = F.softmax(n_logits, dim=-1)
n_target = targets[n_step:]
n_target_dist = torch.zeros_like(n_logits)
if len(n_target) != 0:
n_target_dist[n_target] = 1 / len(n_target)
loss += self.cross_entropy(n_logits[None,...], n_target_dist[None, ...])
return loss / len(logits)
class TruncatedLoss(nn.Module):
def __init__(self, q=0.8, k=0.2, trainset_size=50000):
super(TruncatedLoss, self).__init__()
self.q = q
self.k = k
self.weight = torch.nn.Parameter(data=torch.ones(trainset_size, 1), requires_grad=False)
def forward(self, logits, targets, indexes):
p = F.softmax(logits, dim=-1)
Yg = torch.gather(p, 2, targets.unsqueeze(2))
loss = ((1-(Yg**self.q))/self.q)*self.weight[indexes] - ((1-(self.k**self.q))/self.q)*self.weight[indexes]
loss = torch.mean(loss)
return loss
def update_weight(self, logits, targets, indexes):
p = F.softmax(logits, dim=-1)
Yg = torch.gather(p, 2, targets.unsqueeze(2))
Lq = ((1-(Yg**self.q))/self.q)
Lqk = np.repeat(((1-(self.k**self.q))/self.q), targets.size(0))
Lqk = torch.from_numpy(Lqk).type(torch.cuda.FloatTensor)
Lqk = torch.unsqueeze(Lqk, 1)
condition = torch.gt(Lqk, Lq)
self.weight[indexes] = condition.type(torch.cuda.FloatTensor)
# class PSTHLOSS(nn.Module):
# def __init__(self):
# super().__init__()
# def forward(self, logits, targets):
# total_logits = torch.sum(logits, dim=-2) # sum over sequence dimension
# probs = F.softmax(total_logits, dim=-1)
# outptu
class HungarianMatcher(nn.Module):
def __init__(self):
super().__init__()
@torch.no_grad()
def forward(self, logits, targets):
T, C = logits.size()
probs = F.softmax(logits, dim=-1)
cost_id = (1 - probs[:, targets]).cpu().view(T, -1).unsqueeze(0)
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_id.split(len(targets), -1))]
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
class KLDivLoss(nn.Module):
def __init__(self):
super().__init__()
self.log_softmax = nn.LogSoftmax(dim=-1)
self.KLdiv = nn.KLDivLoss()
def forward(self, logits, targets):
log_probs = self.log_softmax(logits)
return self.KLdiv(log_probs.long(), targets)
class PoissonCrossEntropyLoss(nn.Module):
def __init__(self):
super().__init__()
self.log_softmax = nn.LogSoftmax(dim=-1)
# self.softmax = nn.Softmax(dim=-1)
self.nll_poisson = nn.PoissonNLLLoss()
# self.nll_poisson = nn.NLLLoss()
def forward(self, logits, targets):
log_probs = self.log_softmax(logits)
return self.nll_poisson(log_probs, targets)
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, config):
super().__init__()
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.config = config
# input embedding stem
self.n_embd = config.n_embd
self.tok_emb = nn.Embedding(config.id_vocab_size, config.n_embd)
self.pos_emb = PositionalEmbedding(config.n_embd, p_drop=0.2)
# self.pos_emb_id = nn.Parameter(torch.zeros(1, config.id_block_size, config.n_embd))
self.pos_emb_frames = nn.Parameter(torch.zeros(1, config.frame_block_size, config.n_embd))
# self.temp_emb = TemporalEmbedding(config.n_embd, p_drop=0.2)
# self.temp_emb = RotaryTemporalEmbedding(config.id_block_size)
self.temp_emb = LearntTemporalEmbedding(config.id_block_size, config.n_embd)
self.frame_temp_emb = LearntTemporalEmbedding(config.frame_block_size, config.n_embd)
self.id_drop = nn.Dropout(config.id_drop)
self.im_drop = nn.Dropout(config.im_drop)
self.drop = nn.Dropout(config.embd_pdrop)
# -- Visual Backbone -- #
# self.visual_backbone = VideoFeaturesExtractor()
self.video_encoder = VideoEncoder()
frame_temp_emb = torch.tensor(list(itertools.chain(*[[n * 0.05] * (config.frame_block_size//20) for n in range(20)]))).unsqueeze(0)
self.register_buffer("frame_temp_emb_seq", frame_temp_emb)
# -- Contrastive Loss -- ##
# self.proj_id = ProjectNorm(config.n_embd, config.n_embd)
# self.proj_vid = VidProjectNorm(config.n_embd, config.n_embd) # im_shape
## -- IM_Decoder -- ##
# self.blocks_id = BlockSequential(*[Block(config) for _ in range(2)])
# self.blocks_im = BlockSequential(*[Block(config) for _ in range(2)])
# self.ln_f_id = nn.LayerNorm(config.n_embd)
# self.ln_f_im = nn.LayerNorm(config.n_embd)
## -- Decoder -- ##
# self.ln_f = nn.LayerNorm(config.n_embd)
## GPT
self.blocks = BlockSequential(*[Block(config) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd)
## enc_dec
# self.state_decoder = Decoder(config)
# self.ln_f_state_dec = nn.LayerNorm(config.n_embd)
# self.stimulus_decoder = Decoder(config)
# self.ln_f_stimulus_dec = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
## -- Time -- ##
# self.proj_time = TimeProjection(config.block_size, config.id_block_size, config.n_embd, config.n_dt)
# self.proj_time = ProjectNorm(config.n_embd, config.n_dt)
# self.proj_time = ProjectNorm(config.n_embd, 1)
## -- PSTH -- ##
# self.proj_psth = PSTHProjection(config)
# Loss
# self.dice_loss = DiceLossPSTH()
# self.poisson_loss = PoissonCrossEntropyLoss()
# self.hungarian_matcher = HungarianMatcher()
# self.kldiv_loss = KLDivLoss()
# self.truncated_loss = TruncatedLoss(trainset_size=config.data_size)
# self.set_loss = SetLoss()
# self.a = torch.tensor(0.5, requires_grad=True)
self.block_size = config.block_size
self.apply(self._init_weights)
if config.class_weights is not None:
self.register_buffer("class_weights", config.class_weights)
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
"""
Separates parameters into those who will experience weight decay and those that will not
"""
if train_config.decay_weights:
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
else: no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
black_list_mods = ['pos_emb', 'temp_emb']
for mods in black_list_mods:
for name, param in self.named_parameters():
if mods in name:
no_decay.add(name) # also pos_emb
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
no_decay -= decay & no_decay
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
% (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)
else:
parameters = self.parameters()
optimizer = torch.optim.Adam(parameters, lr=train_config.learning_rate)
return optimizer
def process_features(self, x):
# batch, block_size, feature
# p_idx = x['id_prev']
idx = x['id']
dtx = x['dt']
# dtx_prev = x['dt_prev']
frames = self.video_encoder(x['frames'])
pad = x['pad']
b, t = idx.size()
# b_p, t_p = p_idx.size()
bf, tf = frames.size()[0:2]
# forward the GPT model
'''
positional and temporal embeddings implemented in multiple ways, learnt,
fourrier decomposition and in the case of time, just passed as is.
'''
# # Embeddings
# prev_id_position_embeddings = 0 # self.pos_emb(p_idx)
# prev_id_temporal_embeddings = self.temp_emb(dtx_prev.float())
id_position_embeddings = 0 # self.pos_emb(idx)
im_position_embeddings = self.pos_emb_frames
temporal_embeddings = self.temp_emb(dtx.float())
# Extract ID features
# prev_token_embeddings = self.id_drop(self.tok_emb(p_idx) + prev_id_temporal_embeddings + prev_id_position_embeddings)
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
token_embeddings = token_embeddings + temporal_embeddings + id_position_embeddings
token_embeddings = self.id_drop(token_embeddings)
# Extract image features and add time embeddings
im_temporal_embeddings = self.frame_temp_emb(self.frame_temp_emb_seq)
im_embeddings = frames # self.tok_emb(frames)
im_embeddings = im_embeddings + im_position_embeddings + im_temporal_embeddings
im_embeddings = self.im_drop(im_embeddings) # separate pos emb?
# Tidy up
features = dict()
# features['id_prev'] = prev_token_embeddings
features['id'] = token_embeddings
features['frames'] = im_embeddings
return features, pad
def perceiver(self, features, pad):
x = self.state_decoder(tgt=features['id'], memory=features['id_prev'], pad=pad)
x = self.ln_f_state_dec(x)
x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)
x = self.ln_f_stimulus_dec(x)
logits = self.head(x)
return logits, x
def enc_dec(self, features, pad):
x = self.stimulus_decoder(tgt=features['id'], memory=features['frames'], pad=pad)
x = self.ln_f_stimulus_dec(x)
logits = self.head(x)
return logits, x
def GPTdecoder(self, features, pad, dtx=None):
# image + neural features
x = torch.cat((features['frames'], features['id']), dim=1)
# Decoder
x = self.blocks(x, pad, dtx) # (B, T, C)
x = self.ln_f(x)
logits = self.head(x)
# print(logits.shape) # (B, T, Vocab)
# logits_psth = x[:, -1] # (B, C)
return logits, x
def forward(self, x, targets=None):
idx = x['id']
dtx = x['dt']
frames = x['frames']
pad = x['pad']
b, t = idx.size()
# b, t = x['id'].shape[0], x['id'].shape[1] + x['id_prev'].shape[1]
bf, tf = frames.size()[0:2]
tf = self.config.frame_block_size
# assert t + tf == self.config.block_size, f"{tf} {t}"
# assert t <= self.block_size, "Cannot forward, model block size is exhausted"
features, pad = self.process_features(x)
# logits, x = self.perceiver(features, pad)
# logits, x = self.enc_dec(features, pad)
logits, x = self.GPTdecoder(features, pad)
# time = self.proj_time(x) # (B, T_id, 1)
# print(x[:, 0].shape)
# psth = self.proj_psth(x) # (B, Vocab_id)
# if targets, calculate loss
# calculate loss on logits up to padding token for each batch
loss = None
loss_frames = 0
loss_id = []
loss_time = []
loss_dice = []
loss_psth = []
loss_hungarian = []
if targets is not None:
# loss_psth = self.dice_loss(psth, targets['modes'][:, tf:])
for B, P in enumerate(pad):
# im_logits = logits[B, :tf]
# im_targets = targets['frames'][B, :tf]
# loss_frames += F.cross_entropy(im_logits.view(-1, im_logits.size(-1)), im_targets.view(-1))
id_logits = logits[B, tf:tf + t - P]
id_targets = targets['id'][B, :t - P]
loss_id_ = F.cross_entropy(id_logits.view(-1, id_logits.size(-1)), id_targets.view(-1), weight=self.class_weights)
# if self.config.epoch >= 15:
# self.truncated_loss.update_weight(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])
# loss_id_ = self.truncated_loss(id_logits[None, ...], id_targets[None, ...], id_indexes[None, ...])
# time_preds = time[B, :t - P]
# time_targets = targets['dt'][B, :t - P]
# loss_time_ = F.cross_entropy(time_preds.view(-1, time_preds.size(-1)), time_targets.view(-1))
# loss_time_ = F.mse_loss(time_preds.squeeze(-1), time_targets)
# loss_id_ = self.poisson_loss(id_logits.view(-1, id_logits.size(-1)), F.one_hot(id_targets, self.config.vocab_size))
# if len(id_targets) > 0:
# indices = self.hungarian_matcher(id_logits, id_targets)
# probs_matching, targets_matching = id_logits[indices[0][0]], id_targets[indices[0][1]]
# loss_hungarian_ = F.cross_entropy(probs_matching, targets_matching, weight=self.class_weights).to(self.device)
# loss_hungarian.append(loss_hungarian_)
# # psth = self.proj_psth(x[B, -1]) # from the EOS position
# loss_psth.append(torch.nan_to_num(self.set_loss(id_logits, id_targets)))
# loss_psth_ = self.dice_loss(id_logits, id_targets)
# loss_psth.append(torch.nan_to_num(loss_psth_))
# loss_time.append(torch.nan_to_num(loss_time_))
loss_id.append(torch.nan_to_num(loss_id_))
loss = dict()
# loss['frames'] = loss_frames / (b / 3)
loss['id'] = sum(loss_id) / (b) # sum(loss_id) / (b * 2) # / len(loss_id)
# loss['time'] = sum(loss_time) / (b * 2)
# loss['dice'] = sum(loss_dice) / len(loss_dice)
# loss['dt'] = loss_time / (b * 50)
# loss['hungarian'] = sum(loss_hungarian) / (b * 2)
# loss['psth'] = sum(loss_psth) / (b * 2)
for key in list(loss):
if isinstance(loss[key], float):
del loss[key]
preds = dict()
preds['logits'] = logits # [:, tf:] # only id logits
# preds['dt'] = time
return preds, features, loss | 39.270302 | 139 | 0.582819 |
acf4535a6867dcb6f8d4c01c8e5430bc89cbebd3 | 1,120 | py | Python | model_training/reorg_google_spanish_peru.py | mmcauliffe/corpus-creation-scripts | 067dbf30a9a086d3987a101c6b2742cdc29b2156 | [
"CC0-1.0"
] | 1 | 2022-01-03T05:32:10.000Z | 2022-01-03T05:32:10.000Z | model_training/reorg_google_spanish_peru.py | mmcauliffe/corpus-creation-scripts | 067dbf30a9a086d3987a101c6b2742cdc29b2156 | [
"CC0-1.0"
] | null | null | null | model_training/reorg_google_spanish_peru.py | mmcauliffe/corpus-creation-scripts | 067dbf30a9a086d3987a101c6b2742cdc29b2156 | [
"CC0-1.0"
] | null | null | null | import os
corpus_root = r'D:\Data\speech\spanish_corpora\google_peru'
speaker_data = {
}
for g in ['male', 'female']:
gender_dir = os.path.join(corpus_root, f'es_pe_{g}')
if not os.path.exists(gender_dir):
continue
transcription_file = os.path.join(gender_dir, 'line_index.tsv')
with open(transcription_file, 'r', encoding='utf8') as f:
for line in f:
line = line.strip()
utt, text = line.split(maxsplit=1)
speaker = utt.rsplit('_', maxsplit=1)[0]
speaker_dir = os.path.join(corpus_root, speaker)
os.makedirs(speaker_dir, exist_ok=True)
speaker_data[speaker] = g
with open(os.path.join(speaker_dir, utt +'.lab'), 'w', encoding='utf8') as f:
f.write(text)
if os.path.exists(os.path.join(gender_dir, utt +'.wav')):
os.rename(os.path.join(gender_dir, utt +'.wav'), os.path.join(speaker_dir, utt+'.wav'))
with open(os.path.join(corpus_root, 'speaker_info.tsv'), 'w', encoding='utf8') as f:
for k, v in speaker_data.items():
f.write(f"{k}\t{v}\n") | 37.333333 | 103 | 0.603571 |
acf4543ed87796f818fd36c85936cdc65e14a9a5 | 706 | py | Python | astroquery/magpis/__init__.py | wschoenell/astroquery | fe8a5e31035a1e9cdcf2603fb4da9e2fc5000d31 | [
"BSD-3-Clause"
] | 1 | 2015-05-10T00:58:21.000Z | 2015-05-10T00:58:21.000Z | astroquery/magpis/__init__.py | wschoenell/astroquery | fe8a5e31035a1e9cdcf2603fb4da9e2fc5000d31 | [
"BSD-3-Clause"
] | null | null | null | astroquery/magpis/__init__.py | wschoenell/astroquery | fe8a5e31035a1e9cdcf2603fb4da9e2fc5000d31 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
MAGPIS Image and Catalog Query Tool
-----------------------------------
.. topic:: Revision History
Refactored using common API as a part of Google Summer of Code 2013.
:Originally contributed by:
Adam Ginsburg (adam.g.ginsburg@gmail.com)
"""
from astropy.config import ConfigurationItem
MAGPIS_SERVER = ConfigurationItem('magpis_server', ["http://third.ucllnl.org/cgi-bin/gpscutout"],
'Name of the MAGPIS server.')
MAGPIS_TIMEOUT = ConfigurationItem('timeout', 60, 'time limit for connecting to MAGPIS server')
from .core import Magpis,MagpisClass
__all__ = ['Magpis','MagpisClass']
| 30.695652 | 97 | 0.677054 |
acf455dd20628030ebda06d537883af069c87428 | 121,261 | py | Python | consoleme/config/requests.py | robertzas/consoleme | 4027922635794de2e2c32bfeb2711c0619406829 | [
"Apache-2.0"
] | null | null | null | consoleme/config/requests.py | robertzas/consoleme | 4027922635794de2e2c32bfeb2711c0619406829 | [
"Apache-2.0"
] | null | null | null | consoleme/config/requests.py | robertzas/consoleme | 4027922635794de2e2c32bfeb2711c0619406829 | [
"Apache-2.0"
] | null | null | null | import asyncio
import re
import sys
import time
import uuid
from hashlib import sha256
from typing import Dict, List, Optional, Union
import sentry_sdk
import ujson as json
from asgiref.sync import sync_to_async
from botocore.exceptions import ClientError
from cloudaux.aws.iam import get_managed_policy_document
from cloudaux.aws.sts import boto3_cached_conn
from policy_sentry.util.actions import get_service_from_action
from policy_sentry.util.arns import parse_arn
from consoleme.config import config
from consoleme.exceptions.exceptions import (
InvalidRequestParameter,
NoMatchingRequest,
ResourceNotFound,
Unauthorized,
UnsupportedChangeType,
)
from consoleme.lib.account_indexers import get_account_id_to_name_mapping
from consoleme.lib.auth import can_admin_policies
from consoleme.lib.aws import (
create_or_update_managed_policy,
fetch_resource_details,
generate_updated_resource_policy,
get_bucket_location_with_fallback,
get_region_from_arn,
get_resource_account,
get_resource_from_arn,
get_resource_policy,
get_service_from_arn,
sanitize_session_name,
)
from consoleme.lib.change_request import generate_policy_name
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import (
can_move_back_to_pending_v2,
can_update_cancel_requests_v2,
get_url_for_resource,
invalid_characters_in_policy,
send_communications_new_comment,
send_communications_policy_change_request_v2,
)
from consoleme.lib.templated_resources.requests import (
generate_honeybee_request_from_change_model_array,
)
from consoleme.lib.v2.aws_principals import get_role_details, get_user_details
from consoleme.models import (
Action,
ActionResult,
ApplyChangeModificationModel,
AssumeRolePolicyChangeModel,
CancelChangeModificationModel,
ChangeModel,
ChangeModelArray,
Command,
CommentModel,
CommentRequestModificationModel,
ExtendedAwsPrincipalModel,
ExtendedRequestModel,
GenericFileChangeModel,
InlinePolicyChangeModel,
ManagedPolicyChangeModel,
ManagedPolicyResourceChangeModel,
PermissionsBoundaryChangeModel,
PolicyModel,
PolicyRequestModificationRequestModel,
PolicyRequestModificationResponseModel,
RequestCreationModel,
RequestCreationResponse,
RequestStatus,
ResourceModel,
ResourcePolicyChangeModel,
ResourceTagChangeModel,
Status,
TagAction,
UpdateChangeModificationModel,
UserModel,
)
log = config.get_logger()
auth = get_plugin_by_name(config.get("plugins.auth", "default_auth"))()
aws = get_plugin_by_name(config.get("plugins.aws", "default_aws"))()
async def generate_request_from_change_model_array(
request_creation: RequestCreationModel, user: str
) -> ExtendedRequestModel:
"""
Compiles an ChangeModelArray and returns a filled out ExtendedRequestModel based on the changes
:param request_creation: ChangeModelArray
:param user: Str - requester's email address
:return: ChangeModelArray
"""
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"request": request_creation.dict(),
"message": "Incoming request",
}
log.info(log_data)
primary_principal = None
change_models = request_creation.changes
if len(change_models.changes) < 1:
log_data["message"] = "At least 1 change is required to create a request."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
inline_policy_changes = []
managed_policy_changes = []
resource_policy_changes = []
assume_role_policy_changes = []
resource_tag_changes = []
permissions_boundary_changes = []
managed_policy_resource_changes = []
generic_file_changes = []
role = None
extended_request_uuid = str(uuid.uuid4())
incremental_change_id = 0
supported_resource_policies = config.get(
"policies.supported_resource_types_for_policy_application", ["s3", "sqs", "sns"]
)
for change in change_models.changes:
# All changes status must be not-applied at request creation
change.status = Status.not_applied
# Add ID for each change
change.id = extended_request_uuid + str(incremental_change_id)
incremental_change_id += 1
# Enforce a maximum of one principal ARN per ChangeGeneratorModelArray (aka Policy Request)
if not primary_principal:
primary_principal = change.principal
if primary_principal != change.principal:
log_data[
"message"
] = "We only support making changes to a single principal ARN per request."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if change.change_type == "inline_policy":
inline_policy_changes.append(
InlinePolicyChangeModel.parse_obj(change.__dict__)
)
elif change.change_type == "managed_policy":
managed_policy_changes.append(
ManagedPolicyChangeModel.parse_obj(change.__dict__)
)
elif change.change_type == "managed_policy_resource":
managed_policy_resource_changes.append(
ManagedPolicyResourceChangeModel.parse_obj(change.__dict__)
)
elif change.change_type == "resource_policy":
change.autogenerated = False
change.source_change_id = None
resource_arn_parsed = parse_arn(change.arn)
resource_type = resource_arn_parsed["service"]
if resource_type in supported_resource_policies:
change.supported = True
else:
change.supported = False
resource_policy_changes.append(change)
elif change.change_type == "assume_role_policy":
assume_role_policy_changes.append(
AssumeRolePolicyChangeModel.parse_obj(change.__dict__)
)
elif change.change_type == "resource_tag":
resource_tag_changes.append(
ResourceTagChangeModel.parse_obj(change.__dict__)
)
elif change.change_type == "permissions_boundary":
permissions_boundary_changes.append(
PermissionsBoundaryChangeModel.parse_obj(change.__dict__)
)
elif change.change_type == "generic_file":
generic_file_changes.append(
GenericFileChangeModel.parse_obj(change.__dict__)
)
else:
raise UnsupportedChangeType(
f"Invalid `change_type` for change: {change.__dict__}"
)
# Make sure the requester is only ever 64 chars with domain
if len(user) > 64:
split_items: list = user.split("@")
user: str = (
split_items[0][: (64 - (len(split_items[-1]) + 1))] + "@" + split_items[-1]
)
if primary_principal.principal_type == "AwsResource":
# TODO: Separate this out into another function
account_id = await get_resource_account(primary_principal.principal_arn)
arn_parsed = parse_arn(primary_principal.principal_arn)
arn_type = arn_parsed["service"]
arn_name = (
arn_parsed["resource_path"]
if arn_parsed["resource_path"]
else arn_parsed["resource"]
)
arn_region = arn_parsed["region"]
try:
arn_url = await get_url_for_resource(
arn=primary_principal.principal_arn,
resource_type=arn_type,
account_id=account_id,
region=arn_region,
resource_name=arn_name,
)
except ResourceNotFound:
# should never reach this case...
arn_url = ""
# Only one assume role policy change allowed per request
if len(assume_role_policy_changes) > 1:
log_data[
"message"
] = "One one assume role policy change supported per request."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if len(managed_policy_resource_changes) > 0:
# for managed policy changes, principal arn must be a managed policy
if arn_parsed["service"] != "iam" or arn_parsed["resource"] != "policy":
log_data[
"message"
] = "Principal ARN type not supported for managed policy resource changes."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if arn_parsed["account"] == "aws":
log_data["message"] = "AWS Managed Policies aren't valid for changes."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if (
len(inline_policy_changes) > 0
or len(managed_policy_changes) > 0
or len(assume_role_policy_changes) > 0
or len(permissions_boundary_changes) > 0
):
log_data[
"message"
] = "Principal ARN type not supported for inline/managed/assume role policy changes."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if len(managed_policy_resource_changes) > 1:
log_data[
"message"
] = "One one managed policy resource change supported per request."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
policy_name = arn_parsed["resource_path"].split("/")[-1]
managed_policy_resource = None
try:
managed_policy_resource = await sync_to_async(
get_managed_policy_document
)(
policy_arn=primary_principal.principal_arn,
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
retry_max_attempts=2,
)
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchEntity":
# Could be a new managed policy, hence not found
pass
else:
log_data[
"message"
] = "Exception raised while getting managed policy"
log.error(log_data, exc_info=True)
raise InvalidRequestParameter(log_data["message"] + ": " + str(e))
for managed_policy_resource_change in managed_policy_resource_changes:
await validate_managed_policy_resource_change(
managed_policy_resource_change,
policy_name,
user,
managed_policy_resource,
)
elif (
len(inline_policy_changes) > 0
or len(managed_policy_changes) > 0
or len(assume_role_policy_changes) > 0
or len(permissions_boundary_changes) > 0
):
# for inline/managed/assume role policies, principal arn must be a role
if arn_parsed["service"] != "iam" or arn_parsed["resource"] not in [
"role",
"user",
]:
log_data[
"message"
] = "Resource not found, or ARN type not supported for inline/managed/assume role policy changes."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
principal_name = arn_parsed["resource_path"].split("/")[-1]
principal_details = None
if arn_parsed["resource"] == "role":
principal_details = await get_role_details(
account_id, role_name=principal_name, extended=True
)
elif arn_parsed["resource"] == "user":
principal_details = await get_user_details(
account_id, user_name=principal_name, extended=True
)
if not principal_details:
log_data["message"] = "Principal not found"
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
for inline_policy_change in inline_policy_changes:
inline_policy_change.policy_name = await generate_policy_name(
inline_policy_change.policy_name,
user,
inline_policy_change.expiration_date,
)
await validate_inline_policy_change(
inline_policy_change, user, principal_details
)
for managed_policy_change in managed_policy_changes:
await validate_managed_policy_change(
managed_policy_change, user, principal_details
)
for permissions_boundary_change in permissions_boundary_changes:
await validate_permissions_boundary_change(
permissions_boundary_change, user, principal_details
)
for assume_role_policy_change in assume_role_policy_changes:
if arn_parsed["resource"] == "user":
raise UnsupportedChangeType(
"Unable to modify an assume role policy associated with an IAM user"
)
await validate_assume_role_policy_change(
assume_role_policy_change, user, principal_details
)
for resource_tag_change in resource_tag_changes:
await validate_resource_tag_change(
resource_tag_change, user, principal_details
)
# TODO: validate resource policy logic when we are ready to apply that
# If here, request is valid and can successfully be generated
request_changes = ChangeModelArray(
changes=inline_policy_changes
+ managed_policy_changes
+ resource_policy_changes
+ assume_role_policy_changes
+ resource_tag_changes
+ permissions_boundary_changes
+ managed_policy_resource_changes
)
extended_request = ExtendedRequestModel(
admin_auto_approve=request_creation.admin_auto_approve,
id=extended_request_uuid,
principal=primary_principal,
timestamp=int(time.time()),
justification=request_creation.justification,
requester_email=user,
approvers=[], # TODO: approvers logic (future feature)
request_status=RequestStatus.pending,
changes=request_changes,
requester_info=UserModel(
email=user,
extended_info=await auth.get_user_info(user),
details_url=config.config_plugin().get_employee_info_url(user),
photo_url=config.config_plugin().get_employee_photo_url(user),
),
comments=[],
cross_account=False,
arn_url=arn_url,
)
extended_request = await populate_old_policies(extended_request, user, role)
extended_request = await generate_resource_policies(extended_request, user)
if len(managed_policy_resource_changes) > 0:
await populate_old_managed_policies(extended_request, user)
elif primary_principal.principal_type == "HoneybeeAwsResourceTemplate":
# TODO: Generate extended request from HB template
extended_request = await generate_honeybee_request_from_change_model_array(
request_creation, user, extended_request_uuid
)
else:
raise Exception("Unknown principal type")
return extended_request
async def get_request_url(extended_request: ExtendedRequestModel) -> str:
if extended_request.principal.principal_type == "AwsResource":
return f"/policies/request/{extended_request.id}"
elif extended_request.principal.principal_type == "HoneybeeAwsResourceTemplate":
return extended_request.request_url
else:
raise Exception("Unsupported principal type")
async def is_request_eligible_for_auto_approval(
extended_request: ExtendedRequestModel, user: str
) -> bool:
"""
Checks whether a request is eligible for auto-approval probes or not. Currently, only requests with inline_policies
are eligible for auto-approval probes.
:param extended_request: ExtendedRequestModel
:param user: username
:return bool:
"""
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"arn": extended_request.principal.principal_arn,
"request": extended_request.dict(),
"message": "Checking whether request is eligible for auto-approval probes",
}
log.info(log_data)
is_eligible = False
# Currently the only allowances are: Inline policies
for change in extended_request.changes.changes:
# Exclude auto-generated resource policies from eligibility check
if (
change.change_type == "resource_policy"
or change.change_type == "sts_resource_policy"
) and change.autogenerated:
continue
if change.change_type != "inline_policy":
log_data[
"message"
] = "Finished checking whether request is eligible for auto-approval probes"
log_data["eligible_for_auto_approval"] = is_eligible
log.info(log_data)
return is_eligible
# If above check passes, then it's eligible for auto-approval probe check
is_eligible = True
log_data[
"message"
] = "Finished checking whether request is eligible for auto-approval probes"
log_data["eligible_for_auto_approval"] = is_eligible
log.info(log_data)
return is_eligible
async def generate_resource_policies(extended_request: ExtendedRequestModel, user: str):
"""
Generates the resource policies and adds it to the extended request.
Note: generating resource policy is only supported for when the principal ARN is a role right now.
:param extended_request: ExtendedRequestModel
:param user: username
:return:
"""
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": extended_request.principal,
"request": extended_request.dict(),
"message": "Generating resource policies",
}
log.debug(log_data)
supported_resource_policies = config.get(
"policies.supported_resource_types_for_policy_application", ["s3", "sqs", "sns"]
)
supported_trust_policy_permissions = config.get(
"policies.supported_trust_policy_permissions",
[
"sts:AssumeRole",
"sts:TagSession",
"sts:AssumeRoleWithSAML",
"sts:AssumeRoleWithWebIdentity",
],
)
if extended_request.principal.principal_type == "AwsResource":
principal_arn = extended_request.principal.principal_arn
role_account_id = await get_resource_account(principal_arn)
arn_parsed = parse_arn(principal_arn)
if arn_parsed["service"] != "iam" or arn_parsed["resource"] != "role":
log_data[
"message"
] = "ARN type not supported for generating resource policy changes."
log.debug(log_data)
return extended_request
resource_policy = {"Version": "2012-10-17", "Statement": []}
resource_policy_sha = sha256(
json.dumps(resource_policy, escape_forward_slashes=False).encode()
).hexdigest()
if not arn_parsed.get("resource_path") or not arn_parsed.get("service"):
return extended_request
primary_principal_resource_model = ResourceModel(
arn=principal_arn,
name=arn_parsed["resource_path"].split("/")[-1],
account_id=role_account_id,
resource_type=arn_parsed["service"],
)
auto_generated_resource_policy_changes = []
# Create resource policy stubs for current resources that are used
for policy_change in extended_request.changes.changes:
if policy_change.change_type == "inline_policy":
policy_change.resources = await get_resources_from_policy_change(
policy_change
)
for resource in policy_change.resources:
resource_account_id = await get_resource_account(resource.arn)
if (
resource_account_id != role_account_id
and resource.resource_type != "iam"
and resource.resource_type in supported_resource_policies
):
# Cross account
auto_generated_resource_policy_changes.append(
ResourcePolicyChangeModel(
arn=resource.arn,
policy=PolicyModel(
policy_document=resource_policy,
policy_sha256=resource_policy_sha,
),
change_type="resource_policy",
principal=extended_request.principal,
status=Status.not_applied,
source_change_id=policy_change.id,
id=str(uuid.uuid4()),
resources=[primary_principal_resource_model],
autogenerated=True,
)
)
elif (
resource_account_id != role_account_id
and resource.resource_type == "iam"
):
resource_added = False
for statement in policy_change.policy.policy_document.get(
"Statement", []
):
if resource.arn in statement.get("Resource"):
# check if action includes supported trust policy permissions
statement_actions = statement.get("Action", [])
statement_actions = (
statement_actions
if isinstance(statement_actions, list)
else [statement_actions]
)
for action in statement_actions:
if action in supported_trust_policy_permissions:
# Cross account sts policy
auto_generated_resource_policy_changes.append(
ResourcePolicyChangeModel(
arn=resource.arn,
policy=PolicyModel(
policy_document=resource_policy,
policy_sha256=resource_policy_sha,
),
change_type="sts_resource_policy",
principal=extended_request.principal,
status=Status.not_applied,
source_change_id=policy_change.id,
id=str(uuid.uuid4()),
resources=[
primary_principal_resource_model
],
autogenerated=True,
)
)
resource_added = True
break
if resource_added:
break
extended_request.changes.changes.extend(auto_generated_resource_policy_changes)
if len(auto_generated_resource_policy_changes) > 0:
extended_request.cross_account = True
log_data["message"] = "Finished generating resource policies"
log_data["request"] = extended_request.dict()
log.debug(log_data)
return extended_request
async def validate_inline_policy_change(
change: InlinePolicyChangeModel, user: str, role: ExtendedAwsPrincipalModel
):
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": change.principal.dict(),
"policy_name": change.policy_name,
"request": change.dict(),
"message": "Validating inline policy change",
}
log.debug(log_data)
if (
await invalid_characters_in_policy(change.policy.policy_document)
or await invalid_characters_in_policy(change.policy_name)
or await invalid_characters_in_policy(change.policy.version)
):
log_data["message"] = "Invalid characters were detected in the policy."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
# Can't detach a new policy
if change.new and change.action == Action.detach:
log_data["message"] = "Can't detach an inline policy that is new."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
seen_policy_name = False
for existing_policy in role.inline_policies:
# Check if a new policy is being created, ensure that we don't overwrite another policy with same name
if change.new and change.policy_name == existing_policy.get("PolicyName"):
log_data[
"message"
] = f"Inline Policy with the name {change.policy_name} already exists."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
# Check if policy being updated is the same as existing policy.
if (
not change.new
and change.policy.policy_document == existing_policy.get("PolicyDocument")
and change.policy_name == existing_policy.get("PolicyName")
and change.action == Action.attach
):
log_data[
"message"
] = f"No changes were found between the updated and existing policy for policy {change.policy_name}."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if change.policy_name == existing_policy.get("PolicyName"):
seen_policy_name = True
# Trying to detach inline policy with name that isn't attached
if change.action == Action.detach and not seen_policy_name:
log_data[
"message"
] = f"An inline policy named '{seen_policy_name}' is not attached, so we cannot remove it"
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if change.action == Action.attach and not seen_policy_name and not change.new:
log_data[
"message"
] = f"Inline policy {change.policy_name} not seen but request claims change is not new"
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
# TODO: check sha in the request (future feature)
# If here, then that means inline policy is validated
async def validate_permissions_boundary_change(
change: PermissionsBoundaryChangeModel, user: str, role: ExtendedAwsPrincipalModel
):
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": change.principal.dict(),
"request": change.dict(),
"message": "Validating permissions boundary change",
}
log.info(log_data)
policy_name = change.arn.split("/")[-1]
if await invalid_characters_in_policy(policy_name):
log_data["message"] = "Invalid characters were detected in the policy name."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if change.action == Action.attach:
if not role.permissions_boundary:
return
log_data["message"] = (
"A permissions boundary is already attached to this role. "
"Only one permission boundary can be attached to a role."
)
log.error(log_data)
raise InvalidRequestParameter(
"A permissions boundary is already attached to this role. "
"Only one permission boundary can be attached to a role."
)
elif change.action == Action.detach:
# check to make sure permissions boundary is actually attached to the role
if change.arn == role.permissions_boundary.get("PermissionsBoundaryArn"):
return
log_data[
"message"
] = "The Permissions Boundary you are trying to detach is not attached to this role."
log.error(log_data)
raise InvalidRequestParameter(
f"{change.arn} is not attached to this role as a permissions boundary"
)
async def validate_managed_policy_change(
change: ManagedPolicyChangeModel, user: str, role: ExtendedAwsPrincipalModel
):
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": change.principal.dict(),
"request": change.dict(),
"message": "Validating managed policy change",
}
log.info(log_data)
policy_name = change.arn.split("/")[-1]
if await invalid_characters_in_policy(policy_name):
log_data["message"] = "Invalid characters were detected in the policy name."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if change.action == Action.attach:
# check to make sure managed policy is not already attached
for existing_policy in role.managed_policies:
if change.arn == existing_policy.get("PolicyArn"):
log_data[
"message"
] = "Managed Policy with that ARN already attached to this role."
log.error(log_data)
raise InvalidRequestParameter(
f"{change.arn} already attached to this role"
)
elif change.action == Action.detach:
# check to make sure managed policy is actually attached to role
seen = False
for existing_policy in role.managed_policies:
if change.arn == existing_policy.get("PolicyArn"):
seen = True
break
if not seen:
log_data[
"message"
] = "The Managed Policy you are trying to detach is not attached to this role."
log.error(log_data)
raise InvalidRequestParameter(f"{change.arn} is not attached to this role")
# TODO: check policy name is same what ARN claims
async def validate_managed_policy_resource_change(
change: ManagedPolicyResourceChangeModel,
policy_name: str,
user: str,
managed_policy_resource: Dict,
):
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": change.principal.dict(),
"request": change.dict(),
"message": "Validating managed policy resource change",
}
log.info(log_data)
if await invalid_characters_in_policy(
policy_name
) or await invalid_characters_in_policy(change.policy.policy_document):
log_data[
"message"
] = "Invalid characters were detected in the policy name or document."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if change.new and managed_policy_resource:
# change is claiming to be a new policy, but it already exists in AWS
log_data["message"] = "Managed policy with that ARN already exists"
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
elif not change.new and not managed_policy_resource:
# change is claiming to update policy, but it doesn't exist in AWS
log_data["message"] = "Managed policy with that ARN doesn't exist"
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
if not change.new:
if change.policy.policy_document == managed_policy_resource:
log_data[
"message"
] = "No changes detected between current and proposed policy"
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
async def validate_resource_tag_change(
change: ResourceTagChangeModel, user: str, role: ExtendedAwsPrincipalModel
):
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": change.principal.dict(),
"request": change.dict(),
"role": role,
"message": "Validating resource tag change",
}
log.debug(log_data)
# TODO: Add validation here
return
async def validate_assume_role_policy_change(
change: AssumeRolePolicyChangeModel, user: str, role: ExtendedAwsPrincipalModel
):
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": change.principal.dict(),
"request": change.dict(),
"message": "Validating assume role policy change",
}
log.debug(log_data)
if await invalid_characters_in_policy(
change.policy.policy_document
) or await invalid_characters_in_policy(change.policy.version):
log_data["message"] = "Invalid characters were detected in the policy."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
# Check if policy being updated is the same as existing policy.
if change.policy.policy_document == role.assume_role_policy_document:
log_data[
"message"
] = "No changes were found between the updated and existing assume role policy."
log.error(log_data)
raise InvalidRequestParameter(log_data["message"])
async def apply_changes_to_role(
extended_request: ExtendedRequestModel,
response: Union[RequestCreationResponse, PolicyRequestModificationResponseModel],
user: str,
specific_change_id: str = None,
) -> None:
"""
Applies changes based on the changes array in the request, in a best effort manner to a role
Caution: this method applies changes blindly... meaning it assumes before calling this method,
you have validated the changes being made are authorized.
:param extended_request: ExtendedRequestModel
:param user: Str - requester's email address
:param response: RequestCreationResponse
:param specific_change_id: if this function is being used to apply only one specific change
if not provided, all non-autogenerated, supported changes are applied
"""
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"request": extended_request.dict(),
"message": "Applying request changes",
"specific_change_id": specific_change_id,
}
log.info(log_data)
arn_parsed = parse_arn(extended_request.principal.principal_arn)
# Principal ARN must be a role for this function
if arn_parsed["service"] != "iam" or arn_parsed["resource"] not in ["role", "user"]:
log_data[
"message"
] = "Resource not found, or ARN type not supported for inline/managed/assume role policy changes."
log.error(log_data)
response.errors += 1
response.action_results.append(
ActionResult(status="error", message=log_data["message"])
)
return
principal_name = arn_parsed["resource_path"].split("/")[-1]
account_id = await get_resource_account(extended_request.principal.principal_arn)
iam_client = await sync_to_async(boto3_cached_conn)(
"iam",
service_type="client",
account_number=account_id,
region=config.region,
assume_role=config.get("policies.role_name"),
session_name=sanitize_session_name("principal-updater-" + user),
retry_max_attempts=2,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=config.get(
"aws.sts_endpoint_url", "https://sts.{region}.amazonaws.com"
).format(region=config.region),
),
client_kwargs=config.get("boto3.client_kwargs", {}),
)
for change in extended_request.changes.changes:
if change.status == Status.applied:
# This change has already been applied, this can happen in the future when we have a multi-change request
# that an admin approves, and it applies 5 of the changes, but fails to apply 1 change due to an error.
# Upon correcting the error, the admin can click approve again, and it will only apply the changes that
# haven't already been applied
log_data[
"message"
] = "Change has already been applied, skipping applying the change"
log_data["change"] = change.dict()
log.debug(log_data)
continue
if specific_change_id and change.id != specific_change_id:
continue
if change.change_type == "inline_policy":
if change.action == Action.attach:
try:
if arn_parsed["resource"] == "role":
await sync_to_async(iam_client.put_role_policy)(
RoleName=principal_name,
PolicyName=change.policy_name,
PolicyDocument=json.dumps(
change.policy.policy_document,
escape_forward_slashes=False,
),
)
elif arn_parsed["resource"] == "user":
await sync_to_async(iam_client.put_user_policy)(
UserName=principal_name,
PolicyName=change.policy_name,
PolicyDocument=json.dumps(
change.policy.policy_document,
escape_forward_slashes=False,
),
)
response.action_results.append(
ActionResult(
status="success",
message=(
f"Successfully applied inline policy {change.policy_name} to principal: "
f"{principal_name}"
),
)
)
change.status = Status.applied
except Exception as e:
log_data["message"] = "Exception occurred applying inline policy"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=(
f"Error occurred applying inline policy {change.policy_name} to principal: "
f"{principal_name}: " + str(e)
),
)
)
elif change.action == Action.detach:
try:
if arn_parsed["resource"] == "role":
await sync_to_async(iam_client.delete_role_policy)(
RoleName=principal_name, PolicyName=change.policy_name
)
elif arn_parsed["resource"] == "user":
await sync_to_async(iam_client.delete_user_policy)(
UserName=principal_name, PolicyName=change.policy_name
)
response.action_results.append(
ActionResult(
status="success",
message=(
f"Successfully deleted inline policy {change.policy_name} from principal: "
f"{principal_name}"
),
)
)
change.status = Status.applied
except Exception as e:
log_data["message"] = "Exception occurred deleting inline policy"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=(
f"Error occurred deleting inline policy {change.policy_name} from principal: "
f"{principal_name} " + str(e)
),
)
)
elif change.change_type == "permissions_boundary":
if change.action == Action.attach:
try:
if arn_parsed["resource"] == "role":
await sync_to_async(iam_client.put_role_permissions_boundary)(
RoleName=principal_name, PermissionsBoundary=change.arn
)
elif arn_parsed["resource"] == "user":
await sync_to_async(iam_client.put_user_permissions_boundary)(
UserName=principal_name, PermissionsBoundary=change.arn
)
response.action_results.append(
ActionResult(
status="success",
message=(
f"Successfully attached permissions boundary {change.arn} to principal: "
f"{principal_name}"
),
)
)
change.status = Status.applied
except Exception as e:
log_data[
"message"
] = "Exception occurred attaching permissions boundary"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=(
f"Error occurred attaching permissions boundary {change.arn} to principal: "
f"{principal_name}: " + str(e)
),
)
)
elif change.action == Action.detach:
try:
if arn_parsed["resource"] == "role":
await sync_to_async(
iam_client.delete_role_permissions_boundary
)(RoleName=principal_name)
elif arn_parsed["resource"] == "user":
await sync_to_async(
iam_client.delete_user_permissions_boundary
)(UserName=principal_name)
response.action_results.append(
ActionResult(
status="success",
message=(
f"Successfully detached permissions boundary {change.arn} from principal: "
f"{principal_name}"
),
)
)
change.status = Status.applied
except Exception as e:
log_data[
"message"
] = "Exception occurred detaching permissions boundary"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=(
f"Error occurred detaching permissions boundary {change.arn} "
f"from principal: {principal_name}: " + str(e)
),
)
)
elif change.change_type == "managed_policy":
if change.action == Action.attach:
try:
if arn_parsed["resource"] == "role":
await sync_to_async(iam_client.attach_role_policy)(
RoleName=principal_name, PolicyArn=change.arn
)
elif arn_parsed["resource"] == "user":
await sync_to_async(iam_client.attach_user_policy)(
UserName=principal_name, PolicyArn=change.arn
)
response.action_results.append(
ActionResult(
status="success",
message=(
f"Successfully attached managed policy {change.arn} to principal: {principal_name}"
),
)
)
change.status = Status.applied
except Exception as e:
log_data["message"] = "Exception occurred attaching managed policy"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=(
f"Error occurred attaching managed policy {change.arn} to principal: "
"{principal_name}: " + str(e)
),
)
)
elif change.action == Action.detach:
try:
if arn_parsed["resource"] == "role":
await sync_to_async(iam_client.detach_role_policy)(
RoleName=principal_name, PolicyArn=change.arn
)
elif arn_parsed["resource"] == "user":
await sync_to_async(iam_client.detach_user_policy)(
UserName=principal_name, PolicyArn=change.arn
)
response.action_results.append(
ActionResult(
status="success",
message=(
f"Successfully detached managed policy {change.arn} from principal: {principal_name}"
),
)
)
change.status = Status.applied
except Exception as e:
log_data["message"] = "Exception occurred detaching managed policy"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=(
f"Error occurred detaching managed policy {change.arn} from principal: "
f"{principal_name}: " + str(e)
),
)
)
elif change.change_type == "assume_role_policy":
if arn_parsed["resource"] == "user":
raise UnsupportedChangeType(
"IAM users don't have assume role policies. Unable to process request."
)
try:
await sync_to_async(iam_client.update_assume_role_policy)(
RoleName=principal_name,
PolicyDocument=json.dumps(
change.policy.policy_document, escape_forward_slashes=False
),
)
response.action_results.append(
ActionResult(
status="success",
message=f"Successfully updated assume role policy for principal: {principal_name}",
)
)
change.status = Status.applied
except Exception as e:
log_data[
"message"
] = "Exception occurred updating assume role policy policy"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Error occurred updating assume role policy for principal: {principal_name}: "
+ str(e),
)
)
elif change.change_type == "resource_tag":
if change.tag_action in [TagAction.create, TagAction.update]:
if change.original_key and not change.key:
change.key = change.original_key
if change.original_value and not change.value:
change.value = change.original_value
try:
if arn_parsed["resource"] == "role":
await sync_to_async(iam_client.tag_role)(
RoleName=principal_name,
Tags=[{"Key": change.key, "Value": change.value}],
)
elif arn_parsed["resource"] == "user":
await sync_to_async(iam_client.tag_user)(
UserName=principal_name,
Tags=[{"Key": change.key, "Value": change.value}],
)
response.action_results.append(
ActionResult(
status="success",
message=f"Successfully created or updated tag for principal: {principal_name}",
)
)
if change.original_key and change.original_key != change.key:
if arn_parsed["resource"] == "role":
await sync_to_async(iam_client.untag_role)(
RoleName=principal_name, TagKeys=[change.original_key]
)
elif arn_parsed["resource"] == "user":
await sync_to_async(iam_client.untag_user)(
UserName=principal_name, TagKeys=[change.original_key]
)
response.action_results.append(
ActionResult(
status="success",
message=f"Successfully renamed tag {change.original_key} to {change.key}.",
)
)
change.status = Status.applied
except Exception as e:
log_data["message"] = "Exception occurred creating or updating tag"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Error occurred updating tag for principal: {principal_name}: "
+ str(e),
)
)
if change.tag_action == TagAction.delete:
try:
if arn_parsed["resource"] == "role":
await sync_to_async(iam_client.untag_role)(
RoleName=principal_name, TagKeys=[change.key]
)
elif arn_parsed["resource"] == "user":
await sync_to_async(iam_client.untag_user)(
UserName=principal_name, TagKeys=[change.key]
)
response.action_results.append(
ActionResult(
status="success",
message=f"Successfully deleted tag for principal: {principal_name}",
)
)
change.status = Status.applied
except Exception as e:
log_data["message"] = "Exception occurred deleting tag"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Error occurred deleting tag for principal: {principal_name}: "
+ str(e),
)
)
else:
# unsupported type for auto-application
if change.autogenerated and extended_request.admin_auto_approve:
# If the change was auto-generated and an administrator auto-approved the choices, there's no need
# to try to apply the auto-generated policies.
pass
else:
response.action_results.append(
ActionResult(
status="error",
message=f"Error occurred applying: Change type {change.change_type} is not supported",
)
)
response.errors += 1
log_data["message"] = "Unsupported type for auto-application detected"
log_data["change"] = change.dict()
log.error(log_data)
log_data["message"] = "Finished applying request changes"
log_data["request"] = extended_request.dict()
log_data["response"] = response.dict()
log.info(log_data)
async def populate_old_policies(
extended_request: ExtendedRequestModel,
user: str,
principal: Optional[ExtendedAwsPrincipalModel] = None,
) -> ExtendedRequestModel:
"""
Populates the old policies for each inline policy.
Note: Currently only applicable when the principal ARN is a role and for old inline_policies, assume role policy
:param extended_request: ExtendedRequestModel
:param user: username
:return ExtendedRequestModel
"""
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": extended_request.principal,
"request": extended_request.dict(),
"message": "Populating old policies",
}
log.debug(log_data)
if extended_request.principal.principal_type == "AwsResource":
principal_arn = extended_request.principal.principal_arn
role_account_id = await get_resource_account(principal_arn)
arn_parsed = parse_arn(principal_arn)
if arn_parsed["service"] != "iam" or arn_parsed["resource"] not in [
"role",
"user",
]:
log_data[
"message"
] = "ARN type not supported for populating old policy changes."
log.debug(log_data)
return extended_request
principal_name = arn_parsed["resource_path"].split("/")[-1]
if not principal:
if arn_parsed["resource"] == "role":
principal = await get_role_details(
role_account_id,
role_name=principal_name,
extended=True,
force_refresh=True,
)
elif arn_parsed["resource"] == "user":
principal = await get_user_details(
role_account_id,
user_name=principal_name,
extended=True,
force_refresh=True,
)
for change in extended_request.changes.changes:
if change.status == Status.applied:
# Skip changing any old policies that are saved for historical record (already applied)
continue
if change.change_type == "assume_role_policy":
change.old_policy = PolicyModel(
policy_sha256=sha256(
json.dumps(
principal.assume_role_policy_document,
escape_forward_slashes=False,
).encode()
).hexdigest(),
policy_document=principal.assume_role_policy_document,
)
elif change.change_type == "inline_policy" and not change.new:
for existing_policy in principal.inline_policies:
if change.policy_name == existing_policy.get("PolicyName"):
change.old_policy = PolicyModel(
policy_sha256=sha256(
json.dumps(
existing_policy.get("PolicyDocument"),
escape_forward_slashes=False,
).encode()
).hexdigest(),
policy_document=existing_policy.get("PolicyDocument"),
)
break
log_data["message"] = "Done populating old policies"
log_data["request"] = extended_request.dict()
log.debug(log_data)
return extended_request
async def populate_old_managed_policies(
extended_request: ExtendedRequestModel,
user: str,
) -> Dict:
"""
Populates the old policies for a managed policy resource change.
:param extended_request: ExtendedRequestModel
:param user: username
:return ExtendedRequestModel
"""
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"principal": extended_request.principal,
"request": extended_request.dict(),
"message": "Populating old managed policies",
}
log.debug(log_data)
managed_policy_resource = None
result = {"changed": False}
if extended_request.principal.principal_type == "AwsResource":
principal_arn = extended_request.principal.principal_arn
arn_parsed = parse_arn(principal_arn)
if arn_parsed["service"] != "iam" or arn_parsed["resource"] != "policy":
log_data[
"message"
] = "ARN type not supported for populating old managed policy changes."
log.debug(log_data)
return result
try:
managed_policy_resource = await sync_to_async(get_managed_policy_document)(
policy_arn=principal_arn,
account_number=arn_parsed["account"],
assume_role=config.get("policies.role_name"),
region=config.region,
retry_max_attempts=2,
)
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchEntity":
# Could be a new managed policy, hence not found, in this case there are no old policies
return result
raise
else:
# TODO: Add Honeybee Support for editing managed policies
return result
for change in extended_request.changes.changes:
if (
change.status == Status.applied
or change.change_type != "managed_policy_resource"
):
# Skip changing any old policies that are saved for historical record (already applied)
continue
if managed_policy_resource:
old_policy_sha256 = sha256(
json.dumps(
managed_policy_resource, escape_forward_slashes=False
).encode()
).hexdigest()
if (
change.old_policy
and old_policy_sha256 == change.old_policy.policy_sha256
):
# Old policy hasn't changed since last refresh of page, no need to generate resource policy again
continue
result["changed"] = True
change.old_policy = PolicyModel(
policy_sha256=sha256(
json.dumps(
managed_policy_resource,
escape_forward_slashes=False,
).encode()
).hexdigest(),
policy_document=managed_policy_resource,
)
log_data["message"] = "Done populating old managed policies"
log_data["request"] = extended_request.dict()
log.debug(log_data)
result["extended_request"] = extended_request
return result
async def populate_cross_account_resource_policy_for_change(
change, extended_request, log_data
):
resource_policies_changed = False
supported_resource_policies = config.get(
"policies.supported_resource_types_for_policy_application", ["s3", "sqs", "sns"]
)
sts_resource_policy_supported = config.get(
"policies.sts_resource_policy_supported", True
)
supported_trust_policy_permissions = config.get(
"policies.supported_trust_policy_permissions",
[
"sts:AssumeRole",
"sts:TagSession",
"sts:AssumeRoleWithSAML",
"sts:AssumeRoleWithWebIdentity",
],
)
all_accounts = await get_account_id_to_name_mapping(status=None)
default_policy = {"Version": "2012-10-17", "Statement": []}
if change.status == Status.applied:
# Skip any changes that have already been applied so we don't overwrite any historical records
return resource_policies_changed
if (
change.change_type == "resource_policy"
or change.change_type == "sts_resource_policy"
):
# resource policy change or sts assume role policy change
resource_arn_parsed = parse_arn(change.arn)
resource_type = resource_arn_parsed["service"]
resource_name = resource_arn_parsed["resource"]
resource_region = resource_arn_parsed["region"]
resource_account = resource_arn_parsed["account"]
if not resource_account:
resource_account = await get_resource_account(change.arn)
if resource_type in supported_resource_policies:
change.supported = True
elif (
change.change_type == "sts_resource_policy"
and sts_resource_policy_supported
):
change.supported = True
else:
change.supported = False
# If we don't have resource_account (due to resource not being in Config or 3rd Party account),
# force the change to be not supported and default policy
if not resource_account:
change.supported = False
old_policy = default_policy
log_data["message"] = "Resource account couldn't be determined"
log_data["resource_arn"] = change.arn
log.warning(log_data)
elif resource_account not in all_accounts.keys():
# if we see the resource account, but it is not an account that we own
change.supported = False
old_policy = default_policy
log_data[
"message"
] = "Resource account doesn't belong to organization's accounts"
log_data["resource_arn"] = change.arn
log.warning(log_data)
else:
if change.change_type == "resource_policy":
old_policy = await get_resource_policy(
account=resource_account,
resource_type=resource_type,
name=resource_name,
region=resource_region,
)
else:
role_name = resource_arn_parsed["resource_path"].split("/")[-1]
role = await get_role_details(
resource_account,
role_name=role_name,
extended=True,
force_refresh=True,
)
if not role:
log.error(
{
**log_data,
"message": (
"Unable to retrieve role. Won't attempt to make cross-account policy."
),
}
)
return
old_policy = role.assume_role_policy_document
old_policy_sha256 = sha256(
json.dumps(old_policy, escape_forward_slashes=False).encode()
).hexdigest()
if change.old_policy and old_policy_sha256 == change.old_policy.policy_sha256:
# Old policy hasn't changed since last refresh of page, no need to generate resource policy again
return
# Otherwise it has changed
resource_policies_changed = True
change.old_policy = PolicyModel(
policy_sha256=old_policy_sha256, policy_document=old_policy
)
if not change.autogenerated:
# Change is not autogenerated (user submitted or modified), don't auto-generate
return resource_policies_changed
# Have to grab the actions from the source inline change for resource policy changes
actions = []
resource_arns = []
for source_change in extended_request.changes.changes:
# Find the specific inline policy associated with this change
if (
source_change.change_type == "inline_policy"
and source_change.id == change.source_change_id
):
for statement in source_change.policy.policy_document.get(
"Statement", []
):
# Find the specific statement within the inline policy associated with this resource
if change.arn in statement.get("Resource"):
statement_actions = statement.get("Action", [])
statement_actions = (
statement_actions
if isinstance(statement_actions, list)
else [statement_actions]
)
for action in statement_actions:
if action.startswith(f"{resource_type}:") or (
resource_type == "iam" and action.startswith("sts")
):
if change.change_type == "sts_resource_policy":
# only supported actions allowed for sts resource policy
if action in supported_trust_policy_permissions:
actions.append(action)
else:
actions.append(action)
for resource in statement.get("Resource"):
if change.arn in resource:
resource_arns.append(resource)
new_policy = await generate_updated_resource_policy(
existing=old_policy,
principal_arn=extended_request.principal.principal_arn,
resource_arns=list(set(resource_arns)),
actions=actions,
# since iam assume role policy documents can't include resources
include_resources=change.change_type == "resource_policy",
)
new_policy_sha256 = sha256(
json.dumps(new_policy, escape_forward_slashes=False).encode()
).hexdigest()
change.policy = PolicyModel(
policy_sha256=new_policy_sha256, policy_document=new_policy
)
return resource_policies_changed
async def populate_cross_account_resource_policies(
extended_request: ExtendedRequestModel, user: str
) -> Dict:
"""
Populates the cross-account resource policies for supported resources for each inline policy.
:param extended_request: ExtendedRequestModel
:param user: username
:return: Dict:
changed: whether the resource policies have changed or not
extended_request: modified extended_request
"""
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"arn": extended_request.principal.principal_arn,
"request": extended_request.dict(),
"message": "Populating cross-account resource policies",
}
log.debug(log_data)
concurrent_tasks = []
for change in extended_request.changes.changes:
concurrent_tasks.append(
populate_cross_account_resource_policy_for_change(
change, extended_request, log_data
)
)
concurrent_tasks_results = await asyncio.gather(*concurrent_tasks)
resource_policies_changed = bool(any(concurrent_tasks_results))
log_data["message"] = "Done populating cross account resource policies"
log_data["request"] = extended_request.dict()
log_data["resource_policies_changed"] = resource_policies_changed
log.debug(log_data)
return {"changed": resource_policies_changed, "extended_request": extended_request}
async def apply_managed_policy_resource_tag_change(
extended_request: ExtendedRequestModel,
change: ResourceTagChangeModel,
response: PolicyRequestModificationResponseModel,
user: str,
) -> PolicyRequestModificationResponseModel:
"""
Applies resource tagging changes for managed policies
Caution: this method applies changes blindly... meaning it assumes before calling this method,
you have validated the changes being made are authorized.
:param change: ResourcePolicyChangeModel
:param extended_request: ExtendedRequestModel
:param user: Str - requester's email address
:param response: RequestCreationResponse
"""
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"change": change.dict(),
"message": "Applying resource policy change changes",
"request": extended_request.dict(),
}
resource_arn_parsed = parse_arn(change.principal.principal_arn)
resource_type = resource_arn_parsed["service"]
resource_name = resource_arn_parsed["resource"]
resource_account = resource_arn_parsed["account"]
if not resource_account:
resource_account = await get_resource_account(change.principal.principal_arn)
if not resource_account:
# If we don't have resource_account (due to resource not being in Config or 3rd Party account),
# we can't apply this change
log_data["message"] = "Resource account not found"
log.warning(log_data)
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Cannot apply change to {change.principal.json()} as cannot determine resource account",
)
)
return response
if resource_type != "iam" or resource_name != "policy" or resource_account == "aws":
# Not a managed policy, or a managed policy that is AWS owned
log_data["message"] = "Resource change not supported"
log.warning(log_data)
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Cannot apply change to {change.principal.json()} as it's not supported",
)
)
return response
iam_client = await sync_to_async(boto3_cached_conn)(
"iam",
service_type="client",
account_number=resource_account,
region=config.region,
assume_role=config.get("policies.role_name"),
session_name=sanitize_session_name("tag-updater-" + user),
retry_max_attempts=2,
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=config.get(
"aws.sts_endpoint_url", "https://sts.{region}.amazonaws.com"
).format(region=config.region),
),
client_kwargs=config.get("boto3.client_kwargs", {}),
)
principal_arn = change.principal.principal_arn
if change.tag_action in [TagAction.create, TagAction.update]:
if change.original_key and not change.key:
change.key = change.original_key
if change.original_value and not change.value:
change.value = change.original_value
try:
await sync_to_async(iam_client.tag_policy)(
PolicyArn=principal_arn,
Tags=[{"Key": change.key, "Value": change.value}],
)
response.action_results.append(
ActionResult(
status="success",
message=f"Successfully created or updated tag for managed policy: {principal_arn}",
)
)
if change.original_key and change.original_key != change.key:
await sync_to_async(iam_client.untag_policy)(
PolicyArn=principal_arn, TagKeys=[change.original_key]
)
response.action_results.append(
ActionResult(
status="success",
message=f"Successfully renamed tag {change.original_key} to {change.key}.",
)
)
change.status = Status.applied
except Exception as e:
log_data["message"] = "Exception occurred creating or updating tag"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Error occurred updating tag for managed policy: {principal_arn}: "
+ str(e),
)
)
elif change.tag_action == TagAction.delete:
try:
await sync_to_async(iam_client.untag_policy)(
PolicyArn=principal_arn, TagKeys=[change.key]
)
response.action_results.append(
ActionResult(
status="success",
message=f"Successfully deleted tag for managed policy: {principal_arn}",
)
)
change.status = Status.applied
except Exception as e:
log_data["message"] = "Exception occurred deleting tag"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Error occurred deleting tag for managed policy: {principal_arn}: "
+ str(e),
)
)
else:
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Unsupport change requested for tag {change.tag_action}",
)
)
return response
async def apply_non_iam_resource_tag_change(
extended_request: ExtendedRequestModel,
change: ResourceTagChangeModel,
response: PolicyRequestModificationResponseModel,
user: str,
) -> PolicyRequestModificationResponseModel:
"""
Applies resource tagging changes for supported non IAM role tags
Caution: this method applies changes blindly... meaning it assumes before calling this method,
you have validated the changes being made are authorized.
:param change: ResourcePolicyChangeModel
:param extended_request: ExtendedRequestModel
:param user: Str - requester's email address
:param response: RequestCreationResponse
"""
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"change": change.dict(),
"message": "Applying resource policy change changes",
"request": extended_request.dict(),
}
resource_arn_parsed = parse_arn(change.principal.principal_arn)
resource_type = resource_arn_parsed["service"]
resource_name = resource_arn_parsed["resource"]
resource_region = resource_arn_parsed["region"]
resource_account = resource_arn_parsed["account"]
if not resource_account:
resource_account = await get_resource_account(change.principal.principal_arn)
if resource_type == "s3" and not resource_region:
resource_region = await get_bucket_location_with_fallback(
resource_name, resource_account
)
if not resource_account:
# If we don't have resource_account (due to resource not being in Config or 3rd Party account),
# we can't apply this change
log_data["message"] = "Resource account not found"
log.warning(log_data)
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Cannot apply change to {change.principal.json()} as cannot determine resource account",
)
)
return response
supported_resource_types = config.get(
"policies.supported_resource_types_for_policy_application", ["s3", "sqs", "sns"]
)
if resource_type not in supported_resource_types:
log_data["message"] = "Resource change not supported"
log.warning(log_data)
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Cannot apply change to {change.principal.json()} as it's not supported",
)
)
return response
try:
client = await sync_to_async(boto3_cached_conn)(
resource_type,
service_type="client",
future_expiration_minutes=15,
account_number=resource_account,
assume_role=config.get("policies.role_name"),
region=resource_region or config.region,
session_name=sanitize_session_name("apply-resource-tag-" + user),
arn_partition="aws",
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=config.get(
"aws.sts_endpoint_url", "https://sts.{region}.amazonaws.com"
).format(region=config.region),
),
client_kwargs=config.get("boto3.client_kwargs", {}),
retry_max_attempts=2,
)
resource_details = await fetch_resource_details(
resource_account,
resource_type,
resource_name,
resource_region or config.region,
)
if change.original_key and not change.key:
change.key = change.original_key
if change.original_value and not change.value:
change.value = change.original_value
if resource_type == "s3":
if change.tag_action in [TagAction.create, TagAction.update]:
tag_key_preexists = False
resulting_tagset = []
for tag in resource_details["TagSet"]:
# If we renamed a tag key, let's "skip" the tag with the original name
if change.original_key and change.original_key != change.key:
if tag.get("Key") == change.original_key:
continue
if change.key == tag["Key"]:
tag_key_preexists = True
# If we changed the value of an existing tag, let's record that
resulting_tagset.append(
{"Key": change.key, "Value": change.value}
)
else:
# Leave original tag unmodified
resulting_tagset.append(tag)
# Let's create the tag if it is a new one
if not tag_key_preexists:
resulting_tagset.append({"Key": change.key, "Value": change.value})
await sync_to_async(client.put_bucket_tagging)(
Bucket=resource_name,
Tagging={"TagSet": resulting_tagset},
)
elif change.tag_action == TagAction.delete:
resulting_tagset = []
for tag in resource_details["TagSet"]:
if tag.get("Key") != change.key:
resulting_tagset.append(tag)
resource_details["TagSet"] = resulting_tagset
await sync_to_async(client.put_bucket_tagging)(
Bucket=resource_name,
Tagging={"TagSet": resource_details["TagSet"]},
)
elif resource_type == "sns":
if change.tag_action in [TagAction.create, TagAction.update]:
await sync_to_async(client.tag_resource)(
ResourceArn=change.principal.principal_arn,
Tags=[{"Key": change.key, "Value": change.value}],
)
# Renaming a key
if change.original_key and change.original_key != change.key:
await sync_to_async(client.untag_resource)(
ResourceArn=change.principal.principal_arn,
TagKeys=[change.original_key],
)
elif change.tag_action == TagAction.delete:
await sync_to_async(client.untag_resource)(
ResourceArn=change.principal.principal_arn,
TagKeys=[change.key],
)
elif resource_type == "sqs":
if change.tag_action in [TagAction.create, TagAction.update]:
await sync_to_async(client.tag_queue)(
QueueUrl=resource_details["QueueUrl"],
Tags={change.key: change.value},
)
# Renaming a key
if change.original_key and change.original_key != change.key:
await sync_to_async(client.untag_queue)(
QueueUrl=resource_details["QueueUrl"],
TagKeys=[change.original_key],
)
elif change.tag_action == TagAction.delete:
await sync_to_async(client.untag_queue)(
QueueUrl=resource_details["QueueUrl"], TagKeys=[change.key]
)
response.action_results.append(
ActionResult(
status="success",
message=f"Successfully updated resource policy for {change.principal.principal_arn}",
)
)
change.status = Status.applied
except Exception as e:
log_data["message"] = "Exception changing resource tags"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Error occurred changing resource tags for {change.principal.principal_arn}"
+ str(e),
)
)
log_data["message"] = "Finished applying resource tagging change"
log_data["response"] = response.dict()
log_data["request"] = extended_request.dict()
log_data["change"] = change.dict()
log.debug(log_data)
return response
async def apply_managed_policy_resource_change(
extended_request: ExtendedRequestModel,
change: ManagedPolicyResourceChangeModel,
response: PolicyRequestModificationResponseModel,
user: str,
) -> PolicyRequestModificationResponseModel:
"""
Applies resource policy change for managed policies
Caution: this method applies changes blindly... meaning it assumes before calling this method,
you have validated the changes being made are authorized.
:param change: ResourcePolicyChangeModel
:param extended_request: ExtendedRequestModel
:param user: Str - requester's email address
:param response: RequestCreationResponse
"""
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"change": change.dict(),
"message": "Applying managed policy resource change",
"request": extended_request.dict(),
}
log.info(log_data)
arn_parsed = parse_arn(extended_request.principal.principal_arn)
resource_type = arn_parsed["service"]
resource_name = arn_parsed["resource"]
resource_account = arn_parsed["account"]
if resource_type != "iam" or resource_name != "policy" or resource_account == "aws":
log_data[
"message"
] = "ARN type not supported for managed policy resource changes."
log.error(log_data)
response.errors += 1
response.action_results.append(
ActionResult(status="error", message=log_data["message"])
)
return response
if not resource_account:
# If we don't have resource_account (due to resource not being in Config or 3rd Party account),
# we can't apply this change
log_data["message"] = "Resource account not found"
log.warning(log_data)
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Cannot apply change to {extended_request.principal.principal_arn} as cannot determine resource account",
)
)
return response
conn_details = {
"account_number": resource_account,
"assume_role": config.get("policies.role_name"),
"session_name": f"ConsoleMe_MP_{user}",
"client_kwargs": config.get("boto3.client_kwargs", {}),
}
# Save current policy by populating "old" policies at the time of application for historical record
populate_old_managed_policies_results = await populate_old_managed_policies(
extended_request, user
)
if populate_old_managed_policies_results["changed"]:
extended_request = populate_old_managed_policies_results["extended_request"]
policy_name = arn_parsed["resource_path"].split("/")[-1]
if change.new:
description = f"Managed Policy created using ConsoleMe by {user}"
# create new policy
try:
policy_path = "/" + arn_parsed["resource_path"].replace(policy_name, "")
await create_or_update_managed_policy(
new_policy=change.policy.policy_document,
policy_name=policy_name,
policy_arn=extended_request.principal.principal_arn,
description=description,
policy_path=policy_path,
**conn_details,
)
response.action_results.append(
ActionResult(
status="success",
message=f"Successfully created managed policy {extended_request.principal.principal_arn}",
)
)
change.status = Status.applied
except Exception as e:
log_data["message"] = "Exception occurred creating managed policy"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Error occurred creating managed policy: {str(e)}",
)
)
else:
try:
await create_or_update_managed_policy(
new_policy=change.policy.policy_document,
policy_name=policy_name,
policy_arn=extended_request.principal.principal_arn,
description="",
existing_policy=True,
**conn_details,
)
response.action_results.append(
ActionResult(
status="success",
message=f"Successfully updated managed policy {extended_request.principal.principal_arn}",
)
)
change.status = Status.applied
except Exception as e:
log_data["message"] = "Exception occurred updating managed policy"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Error occurred creating updating policy: {str(e)}",
)
)
return response
async def apply_resource_policy_change(
extended_request: ExtendedRequestModel,
change: ResourcePolicyChangeModel,
response: PolicyRequestModificationResponseModel,
user: str,
) -> PolicyRequestModificationResponseModel:
"""
Applies resource policy change for supported changes
Caution: this method applies changes blindly... meaning it assumes before calling this method,
you have validated the changes being made are authorized.
:param change: ResourcePolicyChangeModel
:param extended_request: ExtendedRequestModel
:param user: Str - requester's email address
:param response: RequestCreationResponse
"""
log_data: dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"change": change.dict(),
"message": "Applying resource policy change changes",
"request": extended_request.dict(),
}
log.info(log_data)
resource_arn_parsed = parse_arn(change.arn)
resource_type = resource_arn_parsed["service"]
resource_name = resource_arn_parsed["resource"]
resource_region = resource_arn_parsed["region"]
resource_account = resource_arn_parsed["account"]
if not resource_account:
resource_account = await get_resource_account(change.arn)
if resource_type == "s3" and not resource_region:
resource_region = await get_bucket_location_with_fallback(
resource_name, resource_account
)
if not resource_account:
# If we don't have resource_account (due to resource not being in Config or 3rd Party account),
# we can't apply this change
log_data["message"] = "Resource account not found"
log.warning(log_data)
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Cannot apply change to {change.arn} as cannot determine resource account",
)
)
return response
supported_resource_types = config.get(
"policies.supported_resource_types_for_policy_application", ["s3", "sqs", "sns"]
)
sts_resource_policy_supported = config.get(
"policies.sts_resource_policy_supported", True
)
if (
not change.supported
or (
change.change_type == "resource_policy"
and resource_type not in supported_resource_types
)
or (
change.change_type == "sts_resource_policy"
and not sts_resource_policy_supported
)
):
log_data["message"] = "Resource change not supported"
log.warning(log_data)
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Cannot apply change to {change.arn} as it's not supported",
)
)
return response
try:
client = await sync_to_async(boto3_cached_conn)(
resource_type,
service_type="client",
future_expiration_minutes=15,
account_number=resource_account,
assume_role=config.get("policies.role_name"),
region=resource_region or config.region,
session_name=sanitize_session_name("apply-resource-policy-" + user),
arn_partition="aws",
sts_client_kwargs=dict(
region_name=config.region,
endpoint_url=config.get(
"aws.sts_endpoint_url", "https://sts.{region}.amazonaws.com"
).format(region=config.region),
),
client_kwargs=config.get("boto3.client_kwargs", {}),
retry_max_attempts=2,
)
if resource_type == "s3":
await sync_to_async(client.put_bucket_policy)(
Bucket=resource_name,
Policy=json.dumps(
change.policy.policy_document, escape_forward_slashes=False
),
)
elif resource_type == "sns":
await sync_to_async(client.set_topic_attributes)(
TopicArn=change.arn,
AttributeName="Policy",
AttributeValue=json.dumps(
change.policy.policy_document, escape_forward_slashes=False
),
)
elif resource_type == "sqs":
queue_url: dict = await sync_to_async(client.get_queue_url)(
QueueName=resource_name
)
await sync_to_async(client.set_queue_attributes)(
QueueUrl=queue_url.get("QueueUrl"),
Attributes={
"Policy": json.dumps(
change.policy.policy_document, escape_forward_slashes=False
)
},
)
elif resource_type == "iam":
role_name = resource_arn_parsed["resource_path"].split("/")[-1]
await sync_to_async(client.update_assume_role_policy)(
RoleName=role_name,
PolicyDocument=json.dumps(
change.policy.policy_document, escape_forward_slashes=False
),
)
# force refresh the role for which we just changed the assume role policy doc
await aws.fetch_iam_role(resource_account, change.arn, force_refresh=True)
response.action_results.append(
ActionResult(
status="success",
message=f"Successfully updated resource policy for {change.arn}",
)
)
change.status = Status.applied
except Exception as e:
log_data["message"] = "Exception occurred updating resource policy"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=f"Error occurred updating resource policy for {change.arn}"
+ str(e),
)
)
log_data["message"] = "Finished applying resource policy change"
log_data["response"] = response.dict()
log_data["request"] = extended_request.dict()
log_data["change"] = change.dict()
log.debug(log_data)
return response
async def _add_error_to_response(
log_data: Dict,
response: PolicyRequestModificationResponseModel,
message: str,
error=None,
):
log_data["message"] = message
log_data["error"] = error
log.error(log_data)
response.errors += 1
response.action_results.append(
ActionResult(status="error", message=log_data["message"])
)
return response
async def _update_dynamo_with_change(
user: str,
extended_request: ExtendedRequestModel,
log_data: Dict,
response: PolicyRequestModificationResponseModel,
success_message: str,
error_message: str,
visible: bool = True,
):
dynamo = UserDynamoHandler(user)
try:
await dynamo.write_policy_request_v2(extended_request)
response.action_results.append(
ActionResult(status="success", message=success_message, visible=visible)
)
except Exception as e:
log_data["message"] = error_message
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
response.errors += 1
response.action_results.append(
ActionResult(status="error", message=error_message + ": " + str(e))
)
return response
async def _get_specific_change(changes: ChangeModelArray, change_id: str):
for change in changes.changes:
if change.id == change_id:
return change
return None
async def maybe_approve_reject_request(
extended_request: ExtendedRequestModel,
user: str,
log_data: Dict,
response: PolicyRequestModificationResponseModel,
) -> PolicyRequestModificationResponseModel:
any_changes_applied = False
any_changes_pending = False
any_changes_cancelled = False
request_status_changed = False
for change in extended_request.changes.changes:
if change.status == Status.applied:
any_changes_applied = True
if change.status == Status.not_applied:
# Don't consider "unsupported" resource policies as "pending", since they can't be applied.
if (
change.change_type == "resource_policy"
or change.change_type == "sts_resource_policy"
) and change.supported is False:
continue
# Requests should still be marked as approved if they have pending autogenerated changes
if change.autogenerated:
continue
any_changes_pending = True
if change.status == Status.cancelled:
any_changes_cancelled = True
# Automatically mark request as "approved" if at least one of the changes in the request is approved, and
# nothing else is pending
if any_changes_applied and not any_changes_pending:
extended_request.request_status = RequestStatus.approved
request_status_changed = True
# Automatically mark request as "cancelled" if all changes in the request are cancelled
if not any_changes_applied and not any_changes_pending and any_changes_cancelled:
extended_request.request_status = RequestStatus.cancelled
request_status_changed = True
if request_status_changed:
extended_request.reviewer = user
response = await _update_dynamo_with_change(
user,
extended_request,
log_data,
response,
"Successfully updated request status",
"Error updating request in dynamo",
visible=False,
)
await send_communications_policy_change_request_v2(extended_request)
account_id = await get_resource_account(
extended_request.principal.principal_arn
)
if extended_request.principal.principal_arn.startswith("arn:{config.partition}:iam::"):
await aws.fetch_iam_role(
account_id, extended_request.principal.principal_arn, force_refresh=True
)
return response
async def parse_and_apply_policy_request_modification(
extended_request: ExtendedRequestModel,
policy_request_model: PolicyRequestModificationRequestModel,
user: str,
user_groups,
last_updated,
approval_probe_approved=False,
) -> PolicyRequestModificationResponseModel:
"""
Parses the policy request modification changes
:param extended_request: ExtendedRequestModel
:param user: Str - requester's email address
:param policy_request_model: PolicyRequestModificationRequestModel
:param user_groups: user's groups
:param last_updated:
:param approval_probe_approved: Whether this change was approved by an auto-approval probe. If not, user needs to be
authorized to make the change.
:return PolicyRequestModificationResponseModel
"""
log_data: Dict = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"user": user,
"request": extended_request.dict(),
"request_changes": policy_request_model.dict(),
"message": "Parsing request modification changes",
}
log.debug(log_data)
response = PolicyRequestModificationResponseModel(errors=0, action_results=[])
request_changes = policy_request_model.modification_model
if request_changes.command in [Command.update_change, Command.cancel_request]:
can_update_cancel = await can_update_cancel_requests_v2(
extended_request.requester_email, user, user_groups
)
if not can_update_cancel:
raise Unauthorized(
"You are not authorized to update or cancel changes in this request"
)
if request_changes.command in [
Command.apply_change,
Command.approve_request,
Command.reject_request,
]:
can_manage_policy_request = can_admin_policies(user, user_groups)
# Authorization required if the policy wasn't approved by an auto-approval probe.
should_apply_because_auto_approved = (
request_changes.command == Command.apply_change and approval_probe_approved
)
if not can_manage_policy_request and not should_apply_because_auto_approved:
raise Unauthorized("You are not authorized to manage this request")
if request_changes.command == Command.move_back_to_pending:
can_move_back_to_pending = await can_move_back_to_pending_v2(
extended_request, last_updated, user, user_groups
)
if not can_move_back_to_pending:
raise Unauthorized("Cannot move this request back to pending")
# If here, then the person is authorized to make the change they want
# For cancelled / rejected requests, only moving back to pending, adding comments is permitted
if extended_request.request_status in [
RequestStatus.cancelled,
RequestStatus.rejected,
] and request_changes.command not in [
Command.add_comment,
Command.move_back_to_pending,
]:
raise InvalidRequestParameter(
f"Cannot perform {request_changes.command.value} on "
f"{extended_request.request_status.value} requests"
)
if request_changes.command == Command.add_comment:
# TODO: max comment size? prevent spamming?
comment_model = CommentRequestModificationModel.parse_obj(request_changes)
user_comment = CommentModel(
id=str(uuid.uuid4()),
timestamp=int(time.time()),
user_email=user,
user=UserModel(
email=user,
extended_info=await auth.get_user_info(user),
details_url=config.config_plugin().get_employee_info_url(user),
photo_url=config.config_plugin().get_employee_photo_url(user),
),
last_modified=int(time.time()),
text=comment_model.comment_text,
)
extended_request.comments.append(user_comment)
success_message = "Successfully added comment"
error_message = "Error occurred adding comment"
response = await _update_dynamo_with_change(
user, extended_request, log_data, response, success_message, error_message
)
if user == extended_request.requester_email:
# User who created the request adding a comment, notification should go to reviewers
await send_communications_new_comment(extended_request, user)
else:
# A reviewer or someone else making the comment, notification should go to original requester
await send_communications_new_comment(
extended_request, user, to_addresses=[extended_request.requester_email]
)
elif request_changes.command == Command.update_change:
update_change_model = UpdateChangeModificationModel.parse_obj(request_changes)
specific_change = await _get_specific_change(
extended_request.changes, update_change_model.change_id
)
# We only support updating inline policies, assume role policy documents and resource policies that haven't
# applied already
if (
specific_change
and specific_change.change_type
in [
"inline_policy",
"resource_policy",
"sts_resource_policy",
"assume_role_policy",
"managed_policy_resource",
]
and specific_change.status == Status.not_applied
):
specific_change.policy.policy_document = update_change_model.policy_document
if (
specific_change.change_type == "resource_policy"
or specific_change.change_type == "sts_resource_policy"
):
# Special case, if it's autogenerated and a user modifies it, update status to
# not autogenerated, so we don't overwrite it on page refresh
specific_change.autogenerated = False
success_message = "Successfully updated policy document"
error_message = "Error occurred updating policy document"
specific_change.updated_by = user
response = await _update_dynamo_with_change(
user,
extended_request,
log_data,
response,
success_message,
error_message,
)
else:
raise NoMatchingRequest(
"Unable to find a compatible non-applied change with "
"that ID in this policy request"
)
elif request_changes.command == Command.apply_change:
apply_change_model = ApplyChangeModificationModel.parse_obj(request_changes)
specific_change = await _get_specific_change(
extended_request.changes, apply_change_model.change_id
)
if specific_change and specific_change.status == Status.not_applied:
# Update the policy doc locally for supported changes, if it needs to be updated
if apply_change_model.policy_document and specific_change.change_type in [
"inline_policy",
"resource_policy",
"sts_resource_policy",
"assume_role_policy",
"managed_policy_resource",
]:
specific_change.policy.policy_document = (
apply_change_model.policy_document
)
managed_policy_arn_regex = re.compile(r"^arn:" + config.partition + ":iam::\d{12}:policy/.+")
if (
specific_change.change_type == "resource_policy"
or specific_change.change_type == "sts_resource_policy"
):
response = await apply_resource_policy_change(
extended_request, specific_change, response, user
)
elif (
specific_change.change_type == "resource_tag"
and not specific_change.principal.principal_arn.startswith(
f"arn:{config.partition}:iam::"
)
):
response = await apply_non_iam_resource_tag_change(
extended_request, specific_change, response, user
)
elif (
specific_change.change_type == "resource_tag"
and managed_policy_arn_regex.search(
specific_change.principal.principal_arn
)
):
response = await apply_managed_policy_resource_tag_change(
extended_request, specific_change, response, user
)
elif specific_change.change_type == "managed_policy_resource":
response = await apply_managed_policy_resource_change(
extended_request, specific_change, response, user
)
else:
# Save current policy by populating "old" policies at the time of application for historical record
extended_request = await populate_old_policies(extended_request, user)
await apply_changes_to_role(
extended_request, response, user, specific_change.id
)
account_id = await get_resource_account(
extended_request.principal.principal_arn
)
await aws.fetch_iam_role(
account_id,
extended_request.principal.principal_arn,
force_refresh=True,
)
if specific_change.status == Status.applied:
# Change was successful, update in dynamo
success_message = "Successfully updated change in dynamo"
error_message = "Error updating change in dynamo"
specific_change.updated_by = user
response = await _update_dynamo_with_change(
user,
extended_request,
log_data,
response,
success_message,
error_message,
visible=False,
)
else:
raise NoMatchingRequest(
"Unable to find a compatible non-applied change with "
"that ID in this policy request"
)
elif request_changes.command == Command.cancel_change:
cancel_change_model = CancelChangeModificationModel.parse_obj(request_changes)
specific_change = await _get_specific_change(
extended_request.changes, cancel_change_model.change_id
)
if specific_change and specific_change.status == Status.not_applied:
# Update the status
specific_change.status = Status.cancelled
specific_change.updated_by = user
# Update in dynamo
success_message = "Successfully updated change in dynamo"
error_message = "Error updating change in dynamo"
response = await _update_dynamo_with_change(
user,
extended_request,
log_data,
response,
success_message,
error_message,
visible=False,
)
else:
raise NoMatchingRequest(
"Unable to find a compatible non-applied change with "
"that ID in this policy request"
)
elif request_changes.command == Command.cancel_request:
if extended_request.request_status != RequestStatus.pending:
raise InvalidRequestParameter(
"Request cannot be cancelled as it's status "
f"is {extended_request.request_status.value}"
)
for change in extended_request.changes.changes:
if change.status == Status.applied:
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=(
"Request cannot be cancelled because at least one change has been applied already. "
"Please apply or cancel the other changes."
),
)
)
response = await maybe_approve_reject_request(
extended_request, user, log_data, response
)
return response
extended_request.request_status = RequestStatus.cancelled
success_message = "Successfully cancelled request"
error_message = "Error cancelling request"
extended_request.reviewer = user
response = await _update_dynamo_with_change(
user, extended_request, log_data, response, success_message, error_message
)
await send_communications_policy_change_request_v2(extended_request)
elif request_changes.command == Command.reject_request:
if extended_request.request_status != RequestStatus.pending:
raise InvalidRequestParameter(
f"Request cannot be rejected "
f"as it's status is {extended_request.request_status.value}"
)
for change in extended_request.changes.changes:
if change.status == Status.applied:
response.errors += 1
response.action_results.append(
ActionResult(
status="error",
message=(
"Request cannot be rejected because at least one change has been applied already. "
"Please apply or cancel the other changes."
),
)
)
response = await maybe_approve_reject_request(
extended_request, user, log_data, response
)
return response
extended_request.request_status = RequestStatus.rejected
success_message = "Successfully rejected request"
error_message = "Error rejected request"
extended_request.reviewer = user
response = await _update_dynamo_with_change(
user, extended_request, log_data, response, success_message, error_message
)
await send_communications_policy_change_request_v2(extended_request)
elif request_changes.command == Command.move_back_to_pending:
extended_request.request_status = RequestStatus.pending
success_message = "Successfully moved request back to pending"
error_message = "Error moving request back to pending"
response = await _update_dynamo_with_change(
user, extended_request, log_data, response, success_message, error_message
)
# This marks a request as complete. This essentially means that all necessary actions have been taken with the
# request, and doesn't apply any changes.
elif request_changes.command == Command.approve_request:
if extended_request.request_status != RequestStatus.pending:
raise InvalidRequestParameter(
"Request cannot be approved as it's "
f"status is {extended_request.request_status.value}"
)
# Save current policy by populating "old" policies at the time of application for historical record
extended_request = await populate_old_policies(extended_request, user)
extended_request.request_status = RequestStatus.approved
extended_request.reviewer = user
success_message = "Successfully updated request status"
error_message = "Error updating request in dynamo"
response = await _update_dynamo_with_change(
user,
extended_request,
log_data,
response,
success_message,
error_message,
visible=False,
)
await send_communications_policy_change_request_v2(extended_request)
account_id = await get_resource_account(
extended_request.principal.principal_arn
)
await aws.fetch_iam_role(
account_id, extended_request.principal.principal_arn, force_refresh=True
)
response = await maybe_approve_reject_request(
extended_request, user, log_data, response
)
log_data["message"] = "Done parsing/applying request modification changes"
log_data["request"] = extended_request.dict()
log_data["response"] = response.dict()
log_data["error"] = None
log.debug(log_data)
return response
async def get_resources_from_policy_change(change: ChangeModel):
"""Returns a dict of resources affected by a list of policy changes along with
the actions and other data points that are relevant to them.
Returned dict format:
{
"resource_name": {
"actions": ["service1:action1", "service2:action2"],
"arns": ["arn:{config.partition}:service1:::resource_name", "arn:{config.partition}:service1:::resource_name/*"],
"account": "1234567890",
"type": "service1",
"region": "",
}
}
"""
accounts_d: dict = await get_account_id_to_name_mapping()
resource_actions: List = []
if change.change_type not in ["inline_policy"]:
return []
policy_document = change.policy.policy_document
for statement in policy_document.get("Statement", []):
resources = statement.get("Resource", [])
resources = resources if isinstance(resources, list) else [resources]
for resource in resources:
# We can't yet generate multiple cross-account resource policies
# based on a partial wildcard in a resource name
if "*" in resource:
continue
if not resource:
raise Exception(
"One or more resources must be specified in the policy."
)
resource_name = get_resource_from_arn(resource)
resource_action = {
"arn": resource,
"name": resource_name,
"account_id": await get_resource_account(resource),
"region": get_region_from_arn(resource),
"resource_type": get_service_from_arn(resource),
}
resource_action["account_name"] = accounts_d.get(
resource_action["account_id"]
)
resource_action["actions"] = get_actions_for_resource(resource, statement)
resource_actions.append(ResourceModel.parse_obj(resource_action))
return resource_actions
def get_actions_for_resource(resource_arn: str, statement: Dict) -> List[str]:
"""For the given resource and policy statement, return the actions that are
for that resource's service.
"""
results: List[str] = []
# Get service from resource
resource_service = get_service_from_arn(resource_arn)
# Get relevant actions from policy doc
actions = statement.get("Action", [])
actions = actions if isinstance(actions, list) else [actions]
for action in actions:
if action == "*":
results.append(action)
else:
if (
get_service_from_action(action) == resource_service
or action.lower() == "sts:assumerole"
and resource_service == "iam"
):
if action not in results:
results.append(action)
return results
| 42.221797 | 130 | 0.583823 |
acf4562f82236f657eaf1234689ef8b4ef025d5c | 7,852 | py | Python | airflow/providers/google/cloud/transfers/azure_fileshare_to_gcs.py | augusto-herrmann/airflow | 7ee4295dd3f7dba4fcd763286c7823bb1707fe99 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2021-06-26T13:37:35.000Z | 2022-01-11T15:49:44.000Z | airflow/providers/google/cloud/transfers/azure_fileshare_to_gcs.py | augusto-herrmann/airflow | 7ee4295dd3f7dba4fcd763286c7823bb1707fe99 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 33 | 2021-07-25T10:29:30.000Z | 2022-03-30T04:39:06.000Z | airflow/providers/google/cloud/transfers/azure_fileshare_to_gcs.py | augusto-herrmann/airflow | 7ee4295dd3f7dba4fcd763286c7823bb1707fe99 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Optional, Sequence, Union
from airflow import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook, _parse_gcs_url, gcs_object_is_directory
from airflow.providers.microsoft.azure.hooks.fileshare import AzureFileShareHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class AzureFileShareToGCSOperator(BaseOperator):
"""
Synchronizes a Azure FileShare directory content (excluding subdirectories),
possibly filtered by a prefix, with a Google Cloud Storage destination path.
:param share_name: The Azure FileShare share where to find the objects. (templated)
:type share_name: str
:param directory_name: (Optional) Path to Azure FileShare directory which content is to be transferred.
Defaults to root directory (templated)
:type directory_name: str
:param prefix: Prefix string which filters objects whose name begin with
such prefix. (templated)
:type prefix: str
:param azure_fileshare_conn_id: The source WASB connection
:type azure_fileshare_conn_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param dest_gcs: The destination Google Cloud Storage bucket and prefix
where you want to store the files. (templated)
:type dest_gcs: str
:param delegate_to: Google account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param replace: Whether you want to replace existing destination files
or not.
:type replace: bool
:param gzip: Option to compress file for upload
:type gzip: bool
:param google_impersonation_chain: Optional Google service account to impersonate using
short-term credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type google_impersonation_chain: Optional[Union[str, Sequence[str]]]
Note that ``share_name``, ``directory_name``, ``prefix``, ``delimiter`` and ``dest_gcs`` are
templated, so you can use variables in them if you wish.
"""
template_fields: Sequence[str] = (
'share_name',
'directory_name',
'prefix',
'dest_gcs',
)
def __init__(
self,
*,
share_name: str,
dest_gcs: str,
directory_name: Optional[str] = None,
prefix: str = '',
azure_fileshare_conn_id: str = 'azure_fileshare_default',
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
replace: bool = False,
gzip: bool = False,
google_impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.share_name = share_name
self.directory_name = directory_name
self.prefix = prefix
self.azure_fileshare_conn_id = azure_fileshare_conn_id
self.gcp_conn_id = gcp_conn_id
self.dest_gcs = dest_gcs
self.delegate_to = delegate_to
self.replace = replace
self.gzip = gzip
self.google_impersonation_chain = google_impersonation_chain
def _check_inputs(self) -> None:
if self.dest_gcs and not gcs_object_is_directory(self.dest_gcs):
self.log.info(
'Destination Google Cloud Storage path is not a valid '
'"directory", define a path that ends with a slash "/" or '
'leave it empty for the root of the bucket.'
)
raise AirflowException(
'The destination Google Cloud Storage path must end with a slash "/" or be empty.'
)
def execute(self, context: 'Context'):
self._check_inputs()
azure_fileshare_hook = AzureFileShareHook(self.azure_fileshare_conn_id)
files = azure_fileshare_hook.list_files(
share_name=self.share_name, directory_name=self.directory_name
)
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.google_impersonation_chain,
)
dest_gcs_bucket, dest_gcs_object_prefix = _parse_gcs_url(self.dest_gcs)
if not self.replace:
# if we are not replacing -> list all files in the GCS bucket
# and only keep those files which are present in
# S3 and not in Google Cloud Storage
existing_files_prefixed = gcs_hook.list(dest_gcs_bucket, prefix=dest_gcs_object_prefix)
existing_files = []
# Remove the object prefix itself, an empty directory was found
if dest_gcs_object_prefix in existing_files_prefixed:
existing_files_prefixed.remove(dest_gcs_object_prefix)
# Remove the object prefix from all object string paths
for file in existing_files_prefixed:
if file.startswith(dest_gcs_object_prefix):
existing_files.append(file[len(dest_gcs_object_prefix) :])
else:
existing_files.append(file)
files = list(set(files) - set(existing_files))
if files:
self.log.info('%s files are going to be synced.', len(files))
if self.directory_name is None:
raise RuntimeError("The directory_name must be set!.")
for file in files:
with NamedTemporaryFile() as temp_file:
azure_fileshare_hook.get_file_to_stream(
stream=temp_file,
share_name=self.share_name,
directory_name=self.directory_name,
file_name=file,
)
temp_file.flush()
# There will always be a '/' before file because it is
# enforced at instantiation time
dest_gcs_object = dest_gcs_object_prefix + file
gcs_hook.upload(dest_gcs_bucket, dest_gcs_object, temp_file.name, gzip=self.gzip)
self.log.info("All done, uploaded %d files to Google Cloud Storage.", len(files))
else:
self.log.info('There are no new files to sync. Have a nice day!')
self.log.info('In sync, no files needed to be uploaded to Google Cloud Storage')
return files
| 43.622222 | 107 | 0.670148 |
acf4573b3385857875a6c2a4f7d627cf57c572be | 711 | py | Python | pyupbit/constants.py | snj830526/py_autoinvestment | 9e05ed0f50a5801959513fed31a891ff4fe0f45e | [
"BSD-2-Clause"
] | null | null | null | pyupbit/constants.py | snj830526/py_autoinvestment | 9e05ed0f50a5801959513fed31a891ff4fe0f45e | [
"BSD-2-Clause"
] | null | null | null | pyupbit/constants.py | snj830526/py_autoinvestment | 9e05ed0f50a5801959513fed31a891ff4fe0f45e | [
"BSD-2-Clause"
] | null | null | null | import json
file = open('config.json')
config = json.load(file)
# 슬랙 채널명
def get_slack_channel():
return config['slack_channel']
# access key
def get_access_key():
return config['access_key']
# secret_key
def get_secret_key():
return config['secret_key']
# site_url
def get_site_url():
return config['site_url']
# slack token
def get_slack_token():
return config['slack_token']
# main.py path
def get_script_path():
return config['main_script_path']
# 투자 할 금액
def get_my_order_price():
return config['my_order_price']
# 자동 매각 기능 허용
def get_auto_sell():
return config['auto_sell']
# 손절 퍼센트값
def get_force_cell_percecnt():
return config['force_sell_percent'] | 14.510204 | 39 | 0.706048 |
acf458561c6c9c1d85cd5a55cd9293d8e88a7183 | 2,086 | py | Python | src/conjecture/rich.py | artisanofcode/python-conjecture | 5a7d57e407a4fb3e09a05d41ffda773136003289 | [
"MIT"
] | null | null | null | src/conjecture/rich.py | artisanofcode/python-conjecture | 5a7d57e407a4fb3e09a05d41ffda773136003289 | [
"MIT"
] | null | null | null | src/conjecture/rich.py | artisanofcode/python-conjecture | 5a7d57e407a4fb3e09a05d41ffda773136003289 | [
"MIT"
] | null | null | null | """rich comparison conjectures."""
from __future__ import annotations
import abc
import typing
import conjecture.base
CT = typing.TypeVar("CT", bound="Comparable")
class Comparable(typing.Protocol):
"""Rich comparison protocol."""
@abc.abstractmethod
def __lt__(self: CT, other: CT) -> bool:
"""Check less than."""
@abc.abstractmethod
def __gt__(self: CT, other: CT) -> bool:
"""Check greater than."""
@abc.abstractmethod
def __le__(self: CT, other: CT) -> bool:
"""Check less than or equal to."""
@abc.abstractmethod
def __ge__(self: CT, other: CT) -> bool:
"""Check greater than or equal to."""
def greater_than(value: Comparable) -> conjecture.base.Conjecture:
"""
Greater than.
Propose that the value is greater than the provided value
>>> assert value == conjecture.greater_than(5)
:return: a conjecture object
"""
return conjecture.base.Conjecture(lambda x: typing.cast(Comparable, x) > value)
def greater_than_or_equal_to(value: Comparable) -> conjecture.base.Conjecture:
"""
Greater than or equal to.
Propose that the value is greater than or equal to the provided value
>>> assert value == conjecture.greater_than_or_equal(5)
:return: a conjecture object
"""
return conjecture.base.Conjecture(lambda x: typing.cast(Comparable, x) >= value)
def less_than(value: Comparable) -> conjecture.base.Conjecture:
"""
Less than.
Propose that the value is less than the provided value
>>> assert value == conjecture.less_than(5)
:return: a conjecture object
"""
return conjecture.base.Conjecture(lambda x: typing.cast(Comparable, x) < value)
def less_than_or_equal_to(value: Comparable) -> conjecture.base.Conjecture:
"""
Less than or equal to.
Propose that the value is less than or equal to the provided value
>>> assert value == conjecture.less_than_or_equal(5)
:return: a conjecture object
"""
return conjecture.base.Conjecture(lambda x: typing.cast(Comparable, x) <= value)
| 25.439024 | 84 | 0.677852 |
acf45882e2311aaf09c93331bf51da50213ce0a7 | 1,748 | py | Python | cms_client.py | chenjisheng/Vue-cms-server | 698ace0d39ac1ef403e677fec66126cfc3346117 | [
"MIT"
] | 1 | 2019-04-08T08:40:31.000Z | 2019-04-08T08:40:31.000Z | cms_client.py | chenjisheng/Vue-cms-server | 698ace0d39ac1ef403e677fec66126cfc3346117 | [
"MIT"
] | null | null | null | cms_client.py | chenjisheng/Vue-cms-server | 698ace0d39ac1ef403e677fec66126cfc3346117 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding:utf-8
# @Time : 2019/3/27 17:57
# @Author : chenjisheng
# @File : cms_client.py
# @Mail : mail_maomao@163.com
import requests
import time
BASE_URL = "http://127.0.0.1:8080/v1"
def Add_swipe():
number = 1
url = BASE_URL + "/swipe"
data = [
{"img_url": "https://dpic3.tiankong.com/g0/rw/QJ7109236255.jpg?x-oss-process=style/240h"},
{"img_url": "https://dpic1.tiankong.com/8m/lj/QJ6212733281.jpg?x-oss-process=style/240h"},
{"img_url": "https://dpic.tiankong.com/58/lu/QJ9109162040.jpg?x-oss-process=style/240h"},
{"img_url": "https://dpic1.tiankong.com/uv/jb/QJ6104512293.jpg?x-oss-process=style/240h"}
]
res = requests.post(url,json=data)
print(res.json())
def Add_newsList():
url = BASE_URL + "/news"
numbers = 10
_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
for i in range(numbers):
data = {"title":str(i)*3,
"click":0,"url":"https://dpic.tiankong.com/58/lu/QJ9109162040.jpg?x-oss-process=style/240h",
"add_time": _time,
"content": {"content":str(i)*10,
"news_type":"media"},
}
res = requests.post(url,json=data)
print(res.json())
def Add_comments():
number = 10
for news in range(1,number):
url = BASE_URL + "/news/comments/" + str(news)
for i in range(10):
_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
data = {
"comment":"public==" + str(i),
"add_time": _time,
}
res = requests.post(url,json=data)
print(res.json())
if __name__ == "__main__":
Add_swipe()
Add_newsList()
Add_comments()
pass | 31.781818 | 101 | 0.569794 |
acf4597bb9c8493b4f54c935e413aac366aff3b9 | 5,980 | py | Python | aiida/orm/implementation/sqlalchemy/authinfos.py | louisponet/aiida-core | 3214236df66a3792ee57fe38a06c0c3bb65861ab | [
"MIT",
"BSD-3-Clause"
] | null | null | null | aiida/orm/implementation/sqlalchemy/authinfos.py | louisponet/aiida-core | 3214236df66a3792ee57fe38a06c0c3bb65861ab | [
"MIT",
"BSD-3-Clause"
] | 1 | 2021-07-14T07:59:44.000Z | 2021-08-01T10:31:09.000Z | aiida/orm/implementation/sqlalchemy/authinfos.py | louisponet/aiida-core | 3214236df66a3792ee57fe38a06c0c3bb65861ab | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Module for the SqlAlchemy backend implementation of the `AuthInfo` ORM class."""
from aiida.backends.sqlalchemy import get_scoped_session
from aiida.backends.sqlalchemy.models.authinfo import DbAuthInfo
from aiida.common import exceptions
from aiida.common.lang import type_check
from ..authinfos import BackendAuthInfo, BackendAuthInfoCollection
from . import entities
from . import utils
class SqlaAuthInfo(entities.SqlaModelEntity[DbAuthInfo], BackendAuthInfo):
"""SqlAlchemy backend implementation for the `AuthInfo` ORM class."""
MODEL_CLASS = DbAuthInfo
def __init__(self, backend, computer, user):
"""Construct a new instance.
:param computer: a :class:`aiida.orm.implementation.computers.BackendComputer` instance
:param user: a :class:`aiida.orm.implementation.users.BackendUser` instance
:return: an :class:`aiida.orm.implementation.authinfos.BackendAuthInfo` instance
"""
from . import computers
from . import users
super().__init__(backend)
type_check(user, users.SqlaUser)
type_check(computer, computers.SqlaComputer)
self._dbmodel = utils.ModelWrapper(DbAuthInfo(dbcomputer=computer.dbmodel, aiidauser=user.dbmodel))
@property
def id(self): # pylint: disable=invalid-name
return self._dbmodel.id
@property
def is_stored(self):
"""Return whether the entity is stored.
:return: True if stored, False otherwise
:rtype: bool
"""
return self._dbmodel.is_saved()
@property
def enabled(self):
"""Return whether this instance is enabled.
:return: boolean, True if enabled, False otherwise
"""
return self._dbmodel.enabled
@enabled.setter
def enabled(self, enabled):
"""Set the enabled state
:param enabled: boolean, True to enable the instance, False to disable it
"""
self._dbmodel.enabled = enabled
@property
def computer(self):
"""Return the computer associated with this instance.
:return: :class:`aiida.orm.implementation.computers.BackendComputer`
"""
return self.backend.computers.from_dbmodel(self._dbmodel.dbcomputer)
@property
def user(self):
"""Return the user associated with this instance.
:return: :class:`aiida.orm.implementation.users.BackendUser`
"""
return self._backend.users.from_dbmodel(self._dbmodel.aiidauser)
def get_auth_params(self):
"""Return the dictionary of authentication parameters
:return: a dictionary with authentication parameters
"""
return self._dbmodel.auth_params
def set_auth_params(self, auth_params):
"""Set the dictionary of authentication parameters
:param auth_params: a dictionary with authentication parameters
"""
self._dbmodel.auth_params = auth_params
def get_metadata(self):
"""Return the dictionary of metadata
:return: a dictionary with metadata
"""
return self._dbmodel._metadata # pylint: disable=protected-access
def set_metadata(self, metadata):
"""Set the dictionary of metadata
:param metadata: a dictionary with metadata
"""
self._dbmodel._metadata = metadata # pylint: disable=protected-access
class SqlaAuthInfoCollection(BackendAuthInfoCollection):
"""The collection of SqlAlchemy backend `AuthInfo` entries."""
ENTITY_CLASS = SqlaAuthInfo
def delete(self, pk):
"""Delete an entry from the collection.
:param pk: the pk of the entry to delete
"""
# pylint: disable=import-error,no-name-in-module
from sqlalchemy.orm.exc import NoResultFound
session = get_scoped_session()
try:
session.query(DbAuthInfo).filter_by(id=pk).one().delete()
session.commit()
except NoResultFound:
raise exceptions.NotExistent(f'AuthInfo<{pk}> does not exist')
def get(self, computer, user):
"""Return an entry from the collection that is configured for the given computer and user
:param computer: a :class:`aiida.orm.implementation.computers.BackendComputer` instance
:param user: a :class:`aiida.orm.implementation.users.BackendUser` instance
:return: :class:`aiida.orm.implementation.authinfos.BackendAuthInfo`
:raise aiida.common.exceptions.NotExistent: if no entry exists for the computer/user pair
:raise aiida.common.exceptions.MultipleObjectsError: if multiple entries exist for the computer/user pair
"""
# pylint: disable=import-error,no-name-in-module
from sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound
session = get_scoped_session()
try:
authinfo = session.query(DbAuthInfo).filter_by(dbcomputer_id=computer.id, aiidauser_id=user.id).one()
except NoResultFound:
raise exceptions.NotExistent(f'User<{user.email}> has no configuration for Computer<{computer.label}>')
except MultipleResultsFound:
raise exceptions.MultipleObjectsError(
f'User<{user.email}> has multiple configurations for Computer<{computer.label}>'
)
else:
return self.from_dbmodel(authinfo)
| 37.375 | 115 | 0.647993 |
acf459d674d57378689de42b3fdebc8e4a5b5b1c | 5,018 | py | Python | base/core/dateutils.py | edisonlz/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 285 | 2019-12-23T09:50:21.000Z | 2021-12-08T09:08:49.000Z | base/core/dateutils.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | null | null | null | base/core/dateutils.py | jeckun/fastor | 342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3 | [
"Apache-2.0"
] | 9 | 2019-12-23T12:59:25.000Z | 2022-03-15T05:12:11.000Z | #encoding=utf-8
import datetime, time
import re
def get_year_start_end():
import calendar
day_now = time.localtime()
day_begin = '%d-01-01' % (day_now.tm_year) # 月初肯定是1号
wday, monthRange = calendar.monthrange(day_now.tm_year, 12)
day_end = '%d-12-%02d' % (day_now.tm_year,monthRange)
return day_begin,day_end
def get_month_start_end():
import calendar
day_now = time.localtime()
day_begin = '%d-%02d-01' % (day_now.tm_year, day_now.tm_mon) # 月初肯定是1号
wday, monthRange = calendar.monthrange(day_now.tm_year, day_now.tm_mon) # 得到本月的天数 第一返回为月第一日为星期几(0-6), 第二返回为此月天数
day_end = '%d-%02d-%02d' % (day_now.tm_year, day_now.tm_mon, monthRange)
return day_begin,day_end
def get_month_start_end_by_month(sdate):
import calendar
day_now = time.localtime()
day_begin = '%d-%02d-01 00:00:00' % (sdate.year, sdate.month) # 月初肯定是1号
wday, monthRange = calendar.monthrange(sdate.year, sdate.month) # 得到本月的天数 第一返回为月第一日为星期几(0-6), 第二返回为此月天数
day_end = '%d-%02d-%02d 23:59:59' % (sdate.year, sdate.month, monthRange)
date_day_begin = datetime.datetime.strptime(day_begin,"%Y-%m-%d %H:%M:%S")
date_day_end = datetime.datetime.strptime(day_end,"%Y-%m-%d %H:%M:%S")
next_day_begin = date_day_end+datetime.timedelta(seconds=120)
return date_day_begin , date_day_end, next_day_begin
def get_week_start_end(d=None):
if not d:
d = datetime.datetime.now()
this_week_start = d - datetime.timedelta(days=d.weekday())
this_week_end = this_week_start + datetime.timedelta(days=6)
return this_week_start.strftime("%Y-%m-%d") + " 00:00:00",this_week_end.strftime("%Y-%m-%d")+ " 23:59:59"
def get_week_start_end_day(d=None):
if not d:
d = datetime.datetime.now()
this_week_start = d - datetime.timedelta(days=d.weekday())
this_week_end = this_week_start + datetime.timedelta(days=6)
return this_week_start.strftime("%m月%d日"),this_week_end.strftime("%m月%d日")
def humanreadable_mseconds(mseconds):
seconds = int(mseconds) / 1000
s = seconds % 60
h = seconds / 60 / 60
if h:
m = seconds / 60 % 60
ret = u"%02d:%02d:%02d" % (h,m,s)
else:
m = seconds / 60
ret = u"%02d:%02d" % (m,s)
return ret
def zero_date():
d = datetime.datetime.today()
return datetime.datetime(d.year, d.month, d.day)
def datetime_to_timestamp(d):
return int(time.mktime(d.timetuple()))
def timestamp_to_datetime(response):
"Converts a unix timestamp to a Python datetime object"
if not response:
return None
try:
response = int(response)
except ValueError:
return None
return datetime.datetime.fromtimestamp(response)
def days_ago(day=30):
return datetime.datetime.now() - datetime.timedelta(day)
def nature_days_ago(day=30):
return zero_date() - datetime.timedelta(day)
def after_days(day=30):
return datetime.datetime.now() + datetime.timedelta(day)
def after_from_days(dd,day=1):
return dd + datetime.timedelta(day)
def nature_after_days(day=30):
return zero_date() + datetime.timedelta(day)
def nature_after_days_end(day=30):
return zero_date() + datetime.timedelta(day) - datetime.timedelta(seconds=60)
def seconds_to_zero():
d = nature_after_days(1)
return int(datetime_to_timestamp(d) - int(time.time()))
def is_weekend(d=datetime.datetime.today()):
return d.weekday() in (0, 6)
def minutes_ago(seconds=300):
return datetime.datetime.now() - datetime.timedelta(seconds=seconds)
def after_minutes(seconds=300):
return datetime.datetime.now() + datetime.timedelta(seconds=seconds)
def int_day(d=None):
if d is None:
d = datetime.datetime.today()
return int("%s%d%d" % (d.year,d.month, d.day))
def int_days(d=None):
if d is None:
d = datetime.datetime.today()
return int("%s%02d%02d" % (d.year,d.month, d.day))
def int_month(d=None):
if d is None:
d = datetime.datetime.today()
return int("%s%d" % (d.year, d.month))
def int_week(d=None):
if d is None:
d = datetime.datetime.today()
monday = d.weekday()
d = d - datetime.timedelta(monday)
return int("%s%d%d" % (d.year, d.month, d.day))
def int_weeks(d=None):
if d is None:
d = datetime.datetime.today()
monday = d.weekday()
d = d - datetime.timedelta(monday)
return int("%s%02d%02d" % (d.year, d.month, d.day))
def int_last_weeks(d=None):
if d is None:
d = datetime.datetime.today() - datetime.timedelta(7)
monday = d.weekday()
d = d - datetime.timedelta(monday)
return int("%s%02d%02d" % (d.year, d.month, d.day))
def is_legal_date(d):
timere = "^(\d{2}|\d{4})-((0([1-9]{1}))|(1[0|1|2]))-(([0-2]([0-9]{1}))|(3[0|1]))$"
return re.match(timere, d) != None
def out_week_date(year,day):
fir_day = datetime.datetime(year,1,1)
zone = datetime.timedelta(days=day-1)
return datetime.datetime.strftime(fir_day + zone, "%Y-%m-%d")
| 25.09 | 116 | 0.657234 |
acf45a91114c6faa79699694e17da845ba8e1e02 | 725 | py | Python | numpynn/regularizers.py | tranlethaison/NumpyNeuralNet | 8a22784348b07e9414c70bdc3674d9a51dd81641 | [
"MIT"
] | null | null | null | numpynn/regularizers.py | tranlethaison/NumpyNeuralNet | 8a22784348b07e9414c70bdc3674d9a51dd81641 | [
"MIT"
] | null | null | null | numpynn/regularizers.py | tranlethaison/NumpyNeuralNet | 8a22784348b07e9414c70bdc3674d9a51dd81641 | [
"MIT"
] | null | null | null | import numpy as np
class L2:
def __init__(self, lmbda):
self.lmbda = lmbda
def __call__(self, weights):
if self.lmbda == 0:
return 0
return self.lmbda * np.sum(np.square(weights))
def shrink(self, lr, weights):
if self.lmbda == 0:
return weights
return weights * (1 - lr * self.lmbda * 2)
class L1:
def __init__(self, lmbda):
self.lmbda = lmbda
def __call__(self, weights):
if self.lmbda == 0:
return 0
return self.lmbda * np.sum(np.abs(weights))
def shrink(self, lr, weights):
if self.lmbda == 0:
return weights
return weights - lr * self.lmbda * np.sign(weights)
| 22.65625 | 59 | 0.555862 |
acf45ac303033fbb73d35e779392df46b3b8783e | 1,240 | py | Python | catalog/urls.py | sarrme/django_local_library | cb9fd3ed458d4c610fa6e8b5fe178d28554ba430 | [
"Apache-2.0"
] | null | null | null | catalog/urls.py | sarrme/django_local_library | cb9fd3ed458d4c610fa6e8b5fe178d28554ba430 | [
"Apache-2.0"
] | null | null | null | catalog/urls.py | sarrme/django_local_library | cb9fd3ed458d4c610fa6e8b5fe178d28554ba430 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name='index'),
path('books/',views.BookListView.as_view(),name='books'),
path('book/<int:pk>', views.BookDetailView.as_view(),name='book-detail'),
path('authors/',views.AuthorListView.as_view(),name='authors'),
path('author/<int:pk>',views.AuthorDetailView.as_view(), name='author-detail'),
]
urlpatterns += [
path('mybooks/', views.LoanedBooksByUserListView.as_view(), name='my-borrowed'),
]
urlpatterns+=[
path('borrowed/',views.BorrowedBooksView.as_view(),name='all_borrowers'),
]
urlpatterns += [
path('book/<uuid:pk>/renew/', views.renew_book_librarian, name='renew-book-librarian'),
]
urlpatterns += [
path('author/create/', views.AuthorCreate.as_view(), name='author_create'),
path('author/<int:pk>/update/', views.AuthorUpdate.as_view(), name='author_update'),
path('author/<int:pk>/delete/', views.AuthorDelete.as_view(), name='author_delete'),
]
urlpatterns += [
path('book/create/', views.BookCreate.as_view(), name='book_create'),
path('book/<int:pk>/update/', views.BookUpdate.as_view(), name='book_update'),
path('book/<int:pk>/delete/', views.BookDelete.as_view(), name='book_delete'),
] | 38.75 | 91 | 0.693548 |
acf45ad9f7c4ac73d2343c698a8ea8dfb9895df7 | 11,483 | py | Python | trainsingle.py | shy1/language-model | 2e24519449544c3b68e57a3654345d8a8563eafd | [
"MIT"
] | null | null | null | trainsingle.py | shy1/language-model | 2e24519449544c3b68e57a3654345d8a8563eafd | [
"MIT"
] | null | null | null | trainsingle.py | shy1/language-model | 2e24519449544c3b68e57a3654345d8a8563eafd | [
"MIT"
] | null | null | null | ## Shallow Ensemble of Temporal Hypersphere Reservoirs
## - pre-trains a single reservoir for later inclusion in an ensemble
import numpy as np
import cupy as cp
import chargrams as cg
import re
import pickle
import time
from random import shuffle
from gensim.models.keyedvectors import KeyedVectors
import pydybm.arraymath as amath
import pydybm.arraymath.dycupy as dycupy
from pydybm.base.sgd32 import ADAM
# todo: grab bigram indexes directly from text file instead of loading w2vec library
wv = KeyedVectors.load_word2vec_format('/home/user01/dev/wang2vec/embeddings-i3e4-ssg-neg15-s1024w6.txt', binary=False)
temp = wv.index2word
glist = np.array(temp[1:len(temp)])
glist = [re.sub(r'_', ' ', j) for j in glist]
gramindex = {gram:idx for idx, gram in enumerate(glist)}
def init(M, N, inweights):
v = cp.identity(M, dtype=np.float32)
for key in inweights:
for m in range(M):
inweights[key][:, m] = inweights[key][:, m] - inweights[key][:, m].mean()
inweights[key][:, m] = inweights[key][:, m] / cp.linalg.norm(inweights[key][:, m])
return inweights, v
def train_kcpa(inweights, v, variables, leak, bs, step, s, cpstates):
T = len(s)
N = 1024
M = 1024
x1 = cp.zeros(N * layerscales["L1"], dtype=np.float32)
# gradient = dict()
# softerr1 = 0
# err1 = 0
skipfirst = 1
t = step
tm1 = (T - 1 - t - skipfirst)
for k in range(skipfirst):
current = s[t - step]
x1 = (1.0 - leak) * x1 + leak * (inweights["U1"][:, current] + cp.roll(x1, 1))
x1 = x1 / cp.linalg.norm(x1)
# wx = cp.dot(variables["W1"], x1)
# wx = wx - cp.max(wx)
# p = cp.exp(wx)
# p1 = p / cp.sum(p)
t += 1
for b1 in range(tm1):
current = s[t - step]
x1 = (1.0 - leak) * x1 + leak * (inweights["U1"][:, current] + cp.roll(x1, 1))
x1 = x1 / cp.linalg.norm(x1)
# wx = cp.dot(variables["W1"], x1)
# wx = wx - cp.max(wx)
# p = cp.exp(wx)
# p1 = p / cp.sum(p)
cpstates = cp.concatenate((cpstates, x1.reshape((1, N * layerscales["L1"]))))
# target = s[t+1]
# gradient["W1"] = cp.outer(v[:, target] - p1, x1)
# SGD.update_state(gradient)
# delta = SGD.get_delta()
# SGD.update_with_L1_regularization(variables, delta, L1)
t += 1
return variables, cpstates
def train(inweights, v, variables, leak, bs, steps, testflag, s, count):
T = len(s)
N = 1024
M = 1024
x1 = cp.zeros(N * layerscales["L1"], dtype=np.float32)
gradient = dict()
softerr1 = 0
err1 = 0
skipfirst = 0
t = step
tm1 = (T - 1 - t - skipfirst)
for k in range(skipfirst):
step1 = s[t - step]
x1 = (1.0 - leak) * x1 + leak * (inweights["U1"][:, step1] + cp.roll(x1, 1))
x1 = x1 / cp.linalg.norm(x1)
t += 1
for b1 in range(tm1):
step1 = s[t - step]
x1 = (1.0 - leak) * x1 + leak * (inweights["U1"][:, step1] + cp.roll(x1, 1))
x1 = x1 / cp.linalg.norm(x1)
wx = cp.dot(variables["W1"], x1)
wx = wx - cp.max(wx)
p = cp.exp(wx)
p1 = p / cp.sum(p)
pred1 = cp.argmax(p1)
target = s[t+1]
target_prob1 = p1[target]
softerr1 += 1 - target_prob1
err1 = err1 + (pred1 != target)
if testflag == 0:
gradient["W1"] = cp.outer(v[:, target] - p1, x1)
SGD.update_state(gradient)
delta = SGD.get_delta()
SGD.update_with_L1_regularization(variables, delta, L1)
t += 1
softerrors = dict()
prederrors = dict()
softerrors["lay1"] = softerr1 / (tm1)
prederrors["lay1"] = err1 * 100.0 / (tm1)
return prederrors, softerrors, variables
amath.setup(dycupy)
chunkfile = '/home/user01/dev/language-model/chunks256.p'
train1280 = '/home/user01/dev/language-model/train1280.p'
test128 = '/home/user01/dev/language-model/test128.p'
chunklist = pickle.load(open(chunkfile, "rb"))
layerscales = dict()
variables = dict()
inweights = dict()
L2 = dict()
L1 = dict()
steps = dict()
trainchunks = []
testchunks = []
cp.random.seed(481639)
n=2
stride = 1
# leaks = [0.382, 0.5, 0.618]
leak = 0.382
N = 1024
M = 1024
layerscales["L1"] = 3
# layerscales["L2"] = 3
# layerscales["L3"] = 2
## use 1-2-4-7-12-20-33-54-88, the fibonacci numbers look better as ##
## distances between points, rather than as the points themselves ##
savedweights = 0
step = 0
batchsize = 1
trainsize = 64
testsize = 32
interval = 128
lrate = 0.002
SGD = ADAM(alpha=lrate)
variables["W1"] = cp.zeros((M, N * layerscales["L1"]), dtype=np.float32)
inweights["U1"] = cp.random.rand(N * layerscales["L1"], M, dtype=np.float32)
SGD = SGD.set_shape(variables)
for key in variables:
L1[key] = 0
L2[key] = 0
inweights, v = init(M, N, inweights)
layersize = str(inweights["U1"].shape[0])
print("L1: {}".format(layersize))
print("Learning rate:", lrate, "Batch size:", batchsize)
print("step: {}".format(step))
### load pre integer tokenized dataset of ~1 million characters in size
# trainfile = '/home/user01/dev/language-model/train1m.p'
# testfile = '/home/user01/dev/language-model/test1m.p'
# trainlist = pickle.load(open(trainfile, "rb"))
# testlist = pickle.load(open(testfile, "rb"))
#
# for chunk in trainlist:
# intchunk = cp.array(chunk, dtype=np.int16)
# trainchunks.append(intchunk)
#
# for chunk in testlist:
# intchunk = cp.array(chunk, dtype=np.int16)
# testchunks.append(intchunk)
for j in range(trainsize):
chunk = chunklist[j]
sgi = []
for idx in range(0, len(chunk) - (n - 1), stride):
try:
sgi.append(gramindex[chunk[idx:idx + n]])
except:
print(chunk[idx:idx + n])
intchunk = cp.asarray(sgi, dtype=np.int16)
trainchunks.append(intchunk)
for k in range(trainsize, trainsize + testsize):
chunk = chunklist[k]
sgi = []
for idx in range(0, len(chunk) - (n - 1), stride):
try:
sgi.append(gramindex[chunk[idx:idx + n]])
except:
print(chunk[idx:idx + n])
intchunk = cp.asarray(sgi, dtype=np.int16)
testchunks.append(intchunk)
trainsize = len(trainchunks)
testsize = len(testchunks)
print("train size:", trainsize, "test size:", testsize, "layersize:", layersize)
print(leak)
### get kernel PCA states
# cpstates = cp.empty((0, N * layerscales["L1"]), dtype=np.float32)
# npstates = np.empty((0, N * layerscales["L1"]), dtype=np.float32)
# totalerr1 = 0
# totalstates = 0
# testflag = 0
# count = 0
# totalstart = time.perf_counter()
#
# for chunk in trainchunks:
# count += 1
# startp = time.perf_counter()
# variables, cpstates = train_kcpa(inweights, v, variables, leak, batchsize, step, chunk, cpstates)
# npstates = np.concatenate((npstates, cp.asnumpy(cpstates)))
# cpstates = cp.empty((0, N * layerscales["L1"]), dtype=np.float32)
# totalstates += len(chunk) - 2
# if count % interval == 0:
# elapsedp = time.perf_counter() - startp
# totalelapsed = time.perf_counter() - totalstart
# tm, ts = divmod(totalelapsed, 60)
# print("\n", count, elapsedp, "-- {0:.0f}m {1:.0f}s".format(tm, ts))
# print("total states:", totalstates, "npstates:", npstates.shape)
#
# statefile = '/home/user01/dev/language-model/states' + layersize + "-" + str(step) + ".p"
# pickle.dump(npstates, open(statefile, "wb"))
# print("total states:", totalstates, "npstates:", npstates.shape)
# elapsedp = time.perf_counter() - startp
# totalelapsed = time.perf_counter() - totalstart
# tm, ts = divmod(totalelapsed, 60)
# print("\n", count, elapsedp, "-- {0:.0f}m {1:.0f}s".format(tm, ts))
# shuffle(trainchunks)
# print(inweights["U1"], variables["W1"] )
# outweights = '/home/user01/dev/language-model/outweights' + layersize + "-" + str(step) + ".p"
# inweights = '/home/user01/dev/language-model/inweights' + layersize + "-" + str(step) + ".p"
# saved_outweights = pickle.load(open(outweights, "rb"))
# saved_inweights = pickle.load(open(inweights, "rb"))
# print(saved_inweights["U1"].shape, type(saved_inweights["U1"]))
# print(saved_outweights["W1"].shape, type(saved_outweights["W1"]))
# inweights["U1"] = saved_inweights["U1"]
# variables["W1"] = saved_outweights["W1"]
# print(inweights["U1"], variables["W1"] )
######################################################################
lrs = str(lrate)
lrs = "-" + lrs[2:]
lrs = "-" + "500"
if savedweights == 1:
# winfile = '/home/user01/dev/language-model/inweights' + layersize + "-" + str(step) + lrs + ".p"
# woutfile = '/home/user01/dev/language-model/outweights' + layersize + "-" + str(step) + lrs + ".p"
winfile = '/home/user01/dev/language-model/inweights8192-0-382' + ".p"
woutfile = '/home/user01/dev/language-model/inweights8192-0-382' + ".p"
print("U: {}\nW: {}".format(winfile, woutfile))
saved_inweights = pickle.load(open(winfile, "rb"))
saved_outweights = pickle.load(open(woutfile, "rb"))
inweights["U1"] = saved_inweights["U1"]
variables["W1"] = saved_outweights["W1"]
print(saved_inweights["U1"].shape, saved_outweights["W1"].shape)
# shuffle(trainchunks)
# shuffle(testchunks)
# totalstart = time.perf_counter()
testflag = 0
count = 0
for i in range(64):
epocherr1 = 0
epochpred1 = 0
totalerr1 = 0
prederr1 = 0
# istart = time.perf_counter()
startp = time.perf_counter()
for chunk in trainchunks:
count += 1
prederrs, softerrs, variables = train(inweights, v, variables, leak, batchsize, step, testflag, chunk, count)
# prederr1 += prederrs["lay1"]
# totalerr1 += softerrs["lay1"]
epochpred1 += prederrs["lay1"]
epocherr1 += softerrs["lay1"]
# if count % interval == 0:
# elapsedp = time.perf_counter() - startp
# totalelapsed = time.perf_counter() - totalstart
# tm, ts = divmod(totalelapsed, 60)
# totalerr1 = totalerr1 * 100 / interval
# prederr1 = prederr1 / interval
# print("\n", i, count, "-- {0:.0f}m {1:.0f}s".format(tm, ts))
# print("Error: ", prederr1)
# print("Loss: ", totalerr1)
#
# startp = time.perf_counter()
# totalerr1 = 0
# prederr1 = 0
elapsedp = time.perf_counter() - startp
tm, ts = divmod(elapsedp, 60)
print("\n", i, count, "-- {0:.0f}m {1:.0f}s".format(tm, ts))
epocherr1 = epocherr1 * 100 / trainsize
epochpred1 = epochpred1 / trainsize
print("Error: ", epochpred1)
print("Loss: ", epocherr1)
shuffle(trainchunks)
if i > 0 and i % 128 == 0:
totalerr1 = 0
print("\n-----------\n Testing...\n-----------")
testflag = 1
for chunk in testchunks:
prederrs, softerrs, variables = train(inweights, v, variables, leak, batchsize, step, testflag, chunk, count)
totalerr1 += softerrs["lay1"]
totalerr1 = totalerr1 * 100 / testsize
print("Test Error:", prederrs["lay1"])
print("Test Loss:", totalerr1)
shuffle(testchunks)
testflag = 0
lrs = "-" + "5012301"
winfile = '/home/user01/dev/language-model/inweights' + layersize + "-" + str(step) + lrs + ".p"
woutfile = '/home/user01/dev/language-model/outweights' + layersize + "-" + str(step) + lrs + ".p"
pickle.dump(inweights, open(winfile, "wb"))
pickle.dump(variables, open(woutfile, "wb"))
| 33.092219 | 121 | 0.601846 |
acf45b5f0f665e8d67dc0d204bb8d5f426439af0 | 4,818 | py | Python | vumi/transports/safaricom/safaricom.py | seidu626/vumi | 62eae205a07029bc7ab382086715694548001876 | [
"BSD-3-Clause"
] | 199 | 2015-01-05T09:04:24.000Z | 2018-08-15T17:02:49.000Z | vumi/transports/safaricom/safaricom.py | seidu626/vumi | 62eae205a07029bc7ab382086715694548001876 | [
"BSD-3-Clause"
] | 187 | 2015-01-06T15:22:38.000Z | 2018-07-14T13:15:29.000Z | vumi/transports/safaricom/safaricom.py | seidu626/vumi | 62eae205a07029bc7ab382086715694548001876 | [
"BSD-3-Clause"
] | 86 | 2015-01-31T02:47:08.000Z | 2018-12-01T11:59:47.000Z | # -*- test-case-name: vumi.transports.safaricom.tests.test_safaricom -*-
import json
from twisted.internet.defer import inlineCallbacks
from vumi.transports.httprpc import HttpRpcTransport
from vumi.message import TransportUserMessage
from vumi.components.session import SessionManager
from vumi import log
class SafaricomTransport(HttpRpcTransport):
"""
HTTP transport for USSD with Safaricom in Kenya.
:param str web_path:
The HTTP path to listen on.
:param int web_port:
The HTTP port
:param str transport_name:
The name this transport instance will use to create its queues
:param dict redis:
The configuration parameters for connecting to Redis.
:param int ussd_session_timeout:
The number of seconds after which a timeout is forced on a transport
level.
"""
transport_type = 'ussd'
ENCODING = 'utf-8'
EXPECTED_FIELDS = set(['ORIG', 'DEST', 'SESSION_ID', 'USSD_PARAMS'])
def validate_config(self):
super(SafaricomTransport, self).validate_config()
self.transport_type = self.config.get('transport_type', 'ussd')
self.redis_config = self.config.get('redis_manager', {})
self.r_prefix = "vumi.transports.safaricom:%s" % self.transport_name
self.r_session_timeout = int(self.config.get("ussd_session_timeout",
600))
@inlineCallbacks
def setup_transport(self):
super(SafaricomTransport, self).setup_transport()
self.session_manager = yield SessionManager.from_redis_config(
self.redis_config, self.r_prefix, self.r_session_timeout)
@inlineCallbacks
def teardown_transport(self):
yield self.session_manager.stop()
yield super(SafaricomTransport, self).teardown_transport()
@inlineCallbacks
def handle_raw_inbound_message(self, message_id, request):
values, errors = self.get_field_values(request, self.EXPECTED_FIELDS)
if errors:
log.err('Unhappy incoming message: %s' % (errors,))
yield self.finish_request(message_id, json.dumps(errors), code=400)
return
self.emit(('SafaricomTransport sending from %s to %s '
'for %s message "%s" (%s still pending)') % (
values['ORIG'], values['DEST'], values['SESSION_ID'],
values['USSD_PARAMS'], len(self._requests),
))
session_id = values['SESSION_ID']
from_addr = values['ORIG']
dest = values['DEST']
ussd_params = values['USSD_PARAMS']
session = yield self.session_manager.load_session(session_id)
if session:
to_addr = session['to_addr']
last_ussd_params = session['last_ussd_params']
new_params = ussd_params[len(last_ussd_params):]
if new_params:
if last_ussd_params:
content = new_params[1:]
else:
content = new_params
else:
content = ''
session['last_ussd_params'] = ussd_params
yield self.session_manager.save_session(session_id, session)
session_event = TransportUserMessage.SESSION_RESUME
else:
if ussd_params:
to_addr = '*%s*%s#' % (dest, ussd_params)
else:
to_addr = '*%s#' % (dest,)
yield self.session_manager.create_session(session_id,
from_addr=from_addr, to_addr=to_addr,
last_ussd_params=ussd_params)
session_event = TransportUserMessage.SESSION_NEW
content = ''
yield self.publish_message(
message_id=message_id,
content=content,
to_addr=to_addr,
from_addr=from_addr,
provider='safaricom',
session_event=session_event,
transport_type=self.transport_type,
transport_metadata={
'safaricom': {
'session_id': session_id,
}
}
)
def handle_outbound_message(self, message):
missing_fields = self.ensure_message_values(message,
['in_reply_to', 'content'])
if missing_fields:
return self.reject_message(message, missing_fields)
if message['session_event'] == TransportUserMessage.SESSION_CLOSE:
command = 'END'
else:
command = 'CON'
self.finish_request(message['in_reply_to'],
('%s %s' % (command, message['content'])).encode(self.ENCODING))
return self.publish_ack(user_message_id=message['message_id'],
sent_message_id=message['message_id'])
| 37.937008 | 79 | 0.607513 |
acf45bd24d1f9ca9734fb18182513904bafbd0fd | 1,483 | py | Python | @utils/parser/domains/ship.py | RogulinSV/stellaris-aoe | 120d9114059b5e744c4025966e1f4b50e1d76200 | [
"Unlicense"
] | null | null | null | @utils/parser/domains/ship.py | RogulinSV/stellaris-aoe | 120d9114059b5e744c4025966e1f4b50e1d76200 | [
"Unlicense"
] | null | null | null | @utils/parser/domains/ship.py | RogulinSV/stellaris-aoe | 120d9114059b5e744c4025966e1f4b50e1d76200 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from parsing import BlockToken
from .common import Collection
class Ship(object):
def __init__(self, name: str):
self.__name = name
self.__size = None
self.__class_name = None
def __str__(self):
return self.__name
@property
def name(self) -> str:
return self.__name
@property
def size(self) -> int:
return self.__size
@size.setter
def size(self, value: int) -> int:
self.__size = value
@property
def class_name(self) -> str:
return self.__class_name
@class_name.setter
def class_name(self, value: int) -> int:
self.__class_name = value
@staticmethod
def from_token(token: BlockToken):
ship = Ship(token.name)
ship.size = token.properties.get('size_multiplier')
ship.class_name = token.properties.get('class')
return ship
class Ships(Collection):
def __contains__(self, ship: Ship):
if not isinstance(ship, Ship):
raise ValueError('Unexpected argument')
if ship in self._items:
return True
for item in self._items: # type: Ship
if item.name == ship.name and ship.class_name == ship.class_name:
return True
return False
def add(self, ship: Ship):
if not isinstance(ship, Ship):
raise ValueError('Unexpected argument')
self._items.add(ship)
| 22.815385 | 77 | 0.603506 |
acf45c892d165e846a8f0d634e0c097046a416f3 | 1,893 | py | Python | setup.py | boromir674/python-semantic-release | 7a8540322f1308399653d10657e24a7b28943767 | [
"MIT"
] | null | null | null | setup.py | boromir674/python-semantic-release | 7a8540322f1308399653d10657e24a7b28943767 | [
"MIT"
] | null | null | null | setup.py | boromir674/python-semantic-release | 7a8540322f1308399653d10657e24a7b28943767 | [
"MIT"
] | null | null | null | import re
from setuptools import find_packages, setup
import sys
def _read_long_description():
try:
with open("readme.rst") as fd:
return fd.read()
except Exception:
return None
with open("semantic_release/__init__.py", "r") as fd:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE
).group(1)
try:
from semantic_release import setup_hook
setup_hook(sys.argv)
except ImportError:
pass
setup(
name="python-semantic-release",
version=version,
url="http://github.com/relekang/python-semantic-release",
author="Rolf Erik Lekang",
author_email="me@rolflekang.com",
description="Automatic semantic versioning for python projects",
long_description=_read_long_description(),
packages=find_packages(exclude=("tests",)),
license="MIT",
install_requires=[
"click>=7,<8",
"click_log>=0.3,<1",
"gitpython>=3.0.8,<4",
"invoke>=1.4.1,<2",
"semver>=2.8,<3",
"twine>=3,<4",
"requests>=2.21,<3",
"wheel",
"toml==0.10.0",
"python-gitlab>=1.10,<2",
],
extras_require={
"test": [
"coverage>=5,<6",
"pytest>=5,<6",
"pytest-xdist>=1,<2",
"pytest-mock>=2,<3",
"responses==0.5.0",
"mock==1.3.0",
],
"docs": ["Sphinx==1.3.6"],
"dev": ["mypy", "tox", "isort", "black"],
},
entry_points="""
[console_scripts]
semantic-release=semantic_release.cli:entry
""",
include_package_data=True,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 25.931507 | 74 | 0.550449 |
acf45d3fdce5c2289e59535e4c4141a7b9435ac5 | 5,351 | py | Python | test/test_model.py | 2press/sc2monitor | a4c193fd946e54a03be2181d839875ddb956e621 | [
"MIT"
] | 1 | 2018-07-30T11:39:32.000Z | 2018-07-30T11:39:32.000Z | test/test_model.py | 2press/sc2-monitor | a4c193fd946e54a03be2181d839875ddb956e621 | [
"MIT"
] | 22 | 2019-01-03T11:34:30.000Z | 2021-05-03T19:50:10.000Z | test/test_model.py | 2press/sc2-monitor | a4c193fd946e54a03be2181d839875ddb956e621 | [
"MIT"
] | 1 | 2019-01-14T21:35:05.000Z | 2019-01-14T21:35:05.000Z | """Test the sc2monitor model."""
import pytest
from sc2monitor.model import League, Race, Result, Server
def test_result_win():
assert Result.get('win') == Result.Win
assert Result.get('Win') == Result.Win
assert Result.get('WIN') == Result.Win
assert Result.get('W') == Result.Win
assert Result.get('w') == Result.Win
assert Result.get(Result.Win) == Result.Win
assert Result.get(1) == Result.Win
assert Result.get(2) == Result.Win
assert Result.Win.change() == 1
assert Result.Win.short() == 'W'
assert str(Result.Win) == 'Win'
def test_result_loss():
assert Result.get('Loss') == Result.Loss
assert Result.get('loss') == Result.Loss
assert Result.get('LOSS') == Result.Loss
assert Result.get('L') == Result.Loss
assert Result.get('l') == Result.Loss
assert Result.get(Result.Loss) == Result.Loss
assert Result.get(-1) == Result.Loss
assert Result.get(-2) == Result.Loss
assert Result.Loss.change() == -1
assert Result.Loss.short() == 'L'
assert str(Result.Loss) == 'Loss'
def test_result_tie():
assert Result.get('Tie') == Result.Tie
assert Result.get('tie') == Result.Tie
assert Result.get('TIE') == Result.Tie
assert Result.get('T') == Result.Tie
assert Result.get('t') == Result.Tie
assert Result.get(Result.Tie) == Result.Tie
assert Result.get(0) == Result.Tie
assert Result.Tie.change() == 0
assert Result.Tie.short() == 'D'
assert str(Result.Tie) == 'Tie'
def test_result_unknown():
assert Result.get('') == Result.Unknown
assert Result.get(Result.Unknown) == Result.Unknown
assert Result.get('unknown') == Result.Unknown
assert Result.get('u') == Result.Unknown
assert Result.get('U') == Result.Unknown
assert Result.Unknown.change() == 0
assert Result.Unknown.short() == 'U'
assert str(Result.Unknown) == 'Unknown'
assert Result.get('asdasda') == Result.Unknown
def test_race():
def assert_race(race: str, assert_race: Race):
race = race.lower()
race_short = race[0]
assert Race.get(race) == assert_race
assert Race.get(race.upper()) == assert_race
assert Race.get(race.capitalize()) == assert_race
assert Race.get(race_short) == assert_race
assert Race.get(race_short.upper()) == assert_race
assert race_short.upper() == assert_race.short()
assert race.capitalize() == str(assert_race)
assert Race.get(assert_race) == assert_race
assert_race('zerg', Race.Zerg)
assert_race('protoss', Race.Protoss)
assert_race('terran', Race.Terran)
assert_race('random', Race.Random)
assert Race.get('') == Race.Random
with pytest.raises(ValueError):
Race.get('Human')
def test_server():
assert str(Server.America) == 'America'
assert str(Server.Europe) == 'Europe'
assert str(Server.Korea) == 'Korea'
assert Server.America.short() == 'us'
assert Server.Europe.short() == 'eu'
assert Server.Korea.short() == 'kr'
assert Server.America.id() == 1
assert Server.Europe.id() == 2
assert Server.Korea.id() == 3
def test_league():
def assert_league(league: str, assert_league: League, ident: int):
league = league.lower()
league_short = league[0:2]
assert League.get(league) == assert_league
assert League.get(league.upper()) == assert_league
assert League.get(league.capitalize()) == assert_league
assert League.get(league_short) == assert_league
assert League.get(league_short.upper()) == assert_league
assert League.get(assert_league) == assert_league
assert League.get(assert_league.value) == assert_league
assert League.get(ident) == assert_league
assert assert_league.id() == ident
assert league.capitalize() == str(assert_league)
if assert_league != League.Grandmaster:
assert League.get(league[0]) == assert_league
assert League.get(league[0].upper()) == assert_league
else:
assert League.get('GM') == assert_league
assert League.get('gm') == assert_league
assert_league('unranked', League.Unranked, -1)
assert_league('bronze', League.Bronze, 0)
assert_league('silver', League.Silver, 1)
assert_league('gold', League.Gold, 2)
assert_league('platinum', League.Platinum, 3)
assert_league('diamond', League.Diamond, 4)
assert_league('master', League.Master, 5)
assert_league('grandmaster', League.Grandmaster, 6)
assert League.get('') == League.Unranked
with pytest.raises(ValueError):
League.get('Test')
with pytest.raises(ValueError):
League.get(-2)
with pytest.raises(ValueError):
League.get(7)
assert League.Master < League.Grandmaster
assert League.Master <= League.Grandmaster
assert League.Gold > League.Silver
assert League.Gold >= League.Silver
assert League.Diamond > League.Unranked
assert League.Platinum >= League.Platinum
with pytest.raises(TypeError):
assert League.Master > 5
with pytest.raises(TypeError):
League.Master < 'Diamond' == NotImplemented
with pytest.raises(TypeError):
League.Master >= 5 == NotImplemented
with pytest.raises(TypeError):
League.Master <= 'Diamond' == NotImplemented
| 36.155405 | 70 | 0.652401 |
acf45dbb2e0c0e36d1f600c86f100c7d7ae215a2 | 525 | py | Python | iOS/bus-schedule/genTestSchedule.py | leochoo/ios-sfcbustimer | e7e49a3c7a8270583ab2907ad1d8f9826341ef5b | [
"MIT"
] | 4 | 2019-03-25T08:26:45.000Z | 2019-04-27T02:56:37.000Z | iOS/bus-schedule/genTestSchedule.py | leochoo/ios-sfcbustimer | e7e49a3c7a8270583ab2907ad1d8f9826341ef5b | [
"MIT"
] | 38 | 2018-10-12T06:13:46.000Z | 2019-04-06T16:44:33.000Z | iOS/bus-schedule/genTestSchedule.py | leochoo/SFC-Bustimer | e7e49a3c7a8270583ab2907ad1d8f9826341ef5b | [
"MIT"
] | 1 | 2019-04-27T03:09:33.000Z | 2019-04-27T03:09:33.000Z | import json
testData = {}
testData["sfcsho"] = {}
testData["sfcsho"]["weekday"] = []
# testData["sfcsho"]["sat"] = {}
# testData["sfcsho"]["sun"] = {}
for h in range(0,24):
for m in range(0,60):
busData = {
"hour": h,
"min": m,
"type": None,
"rotary": False
}
testData["sfcsho"]["weekday"].append(busData)
# print(testData)
# print(json.dumps(testData))
with open('testData.json', 'w') as outfile:
json.dump(testData, outfile, indent=4)
| 18.75 | 53 | 0.531429 |
acf45e3017748c9e661e85b4b78aa77dab83dc9f | 764 | py | Python | tools/bin/pythonSrc/pychecker-0.8.18/test_input/test74.py | YangHao666666/hawq | 10cff8350f1ba806c6fec64eb67e0e6f6f24786c | [
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 450 | 2015-09-05T09:12:51.000Z | 2018-08-30T01:45:36.000Z | tools/bin/pythonSrc/pychecker-0.8.18/test_input/test74.py | YangHao666666/hawq | 10cff8350f1ba806c6fec64eb67e0e6f6f24786c | [
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 1,274 | 2015-09-22T20:06:16.000Z | 2018-08-31T22:14:00.000Z | tools/bin/pythonSrc/pychecker-0.8.18/test_input/test74.py | YangHao666666/hawq | 10cff8350f1ba806c6fec64eb67e0e6f6f24786c | [
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 278 | 2015-09-21T19:15:06.000Z | 2018-08-31T00:36:51.000Z | 'test __getattr[ibute]__ returning None'
class A:
'no warning'
def __getattr__(self, attr):
return attr
class B:
'no warning'
def __getattribute__(self, attr):
return attr
class C:
'warning'
def __getattr__(self, attr):
pass
class D:
'warning'
def __getattribute__(self, attr):
pass
class E:
'warning'
def __getattr__(self, attr):
if attr == 'n':
return attr
if attr != 'j':
raise AttributeError
class F:
'no warning'
def __getattr__(self, attr):
if attr == 'n':
return attr
raise AttributeError
class G:
'should not gen a warning'
def __getattr__(self, name):
return getattr(self, 'a')[name]
| 18.190476 | 40 | 0.570681 |
acf45fa21b1e21500ba8ab2f0af5585ea4b2b8b7 | 1,416 | py | Python | fullcyclepy/backend/temperature/tempsensor.py | dfoderick/fullcyclemining | b53a35b1b051db27d947f2768c96712ad01f2328 | [
"MIT"
] | 26 | 2018-05-01T15:25:09.000Z | 2021-12-16T20:48:19.000Z | fullcyclepy/backend/temperature/tempsensor.py | dfoderick/fullcyclemining | b53a35b1b051db27d947f2768c96712ad01f2328 | [
"MIT"
] | 13 | 2018-04-23T13:45:31.000Z | 2018-12-20T16:13:06.000Z | fullcyclepy/backend/temperature/tempsensor.py | dfoderick/fullcyclemining | b53a35b1b051db27d947f2768c96712ad01f2328 | [
"MIT"
] | 12 | 2018-05-01T20:34:05.000Z | 2021-12-16T20:48:20.000Z | '''this one pushes to mydevices'''
import time
import sys
import paho.mqtt.client as mqtt
import Adafruit_DHT
print('Waiting 30 seconds in case wireless needs to initialize...')
time.sleep(30) #Sleep to allow wireless to connect before starting MQTT
#TODO: Move to config
USERNAME = "mydevices_name"
PASSWORD = "mydevices_password"
CLIENTID = "mydevices_clientid"
MQTTC = mqtt.Client(client_id=CLIENTID)
MQTTC.username_pw_set(USERNAME, password=PASSWORD)
MQTTC.connect("mqtt.mydevices.com", port=1883, keepalive=60)
MQTTC.loop_start()
TOPIC_TEMP = "v1/" + USERNAME + "/things/" + CLIENTID + "/data/3"
TOPIC_HUMIDITY = "v1/" + USERNAME + "/things/" + CLIENTID + "/data/4"
while True:
try:
#pin 2 or 4 = power, pin 6 = gnd, pin 7 = gpio4
#https://www.raspberrypi.org/documentation/usage/gpio-plus-and-raspi2/README.md
HUMIDITY22, TEMP22 = Adafruit_DHT.read_retry(22, 4)
#22 is the sensor type, 4 is the GPIO pin number (not physical pin number)
if TEMP22 is not None:
TEMP22 = "temp,c=" + str(TEMP22)
MQTTC.publish(TOPIC_TEMP, payload=TEMP22, retain=True)
if HUMIDITY22 is not None:
HUMIDITY22 = "rel_hum,p=" + str(HUMIDITY22)
MQTTC.publish(TOPIC_HUMIDITY, payload=HUMIDITY22, retain=True)
time.sleep(5)
except (EOFError, SystemExit, KeyboardInterrupt):
MQTTC.disconnect()
sys.exit()
| 35.4 | 87 | 0.684322 |
acf460c35b95393697bc6eafe3352554feafe775 | 405 | py | Python | internet_forum/internet_forum/asgi.py | helf4ch/web-projects | c45e8e262f5c2914a0019533f3e7b075655038fa | [
"MIT"
] | null | null | null | internet_forum/internet_forum/asgi.py | helf4ch/web-projects | c45e8e262f5c2914a0019533f3e7b075655038fa | [
"MIT"
] | null | null | null | internet_forum/internet_forum/asgi.py | helf4ch/web-projects | c45e8e262f5c2914a0019533f3e7b075655038fa | [
"MIT"
] | null | null | null | """
ASGI config for internet_forum project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'internet_forum.settings')
application = get_asgi_application()
| 23.823529 | 78 | 0.792593 |
acf4616793456002d296c505386e682764e4ff14 | 8,706 | py | Python | msticpy/common/secret_settings.py | GiuseppeLaurenza/msticpy | 37f96126b1e7ed06d3d140e340cdf86d6eee440b | [
"MIT"
] | null | null | null | msticpy/common/secret_settings.py | GiuseppeLaurenza/msticpy | 37f96126b1e7ed06d3d140e340cdf86d6eee440b | [
"MIT"
] | null | null | null | msticpy/common/secret_settings.py | GiuseppeLaurenza/msticpy | 37f96126b1e7ed06d3d140e340cdf86d6eee440b | [
"MIT"
] | 1 | 2022-02-06T18:56:15.000Z | 2022-02-06T18:56:15.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Settings provider for secrets."""
import re
from functools import partial
from typing import Any, Callable, Dict, Optional, Set, Tuple
import keyring
from keyring.errors import KeyringError, KeyringLocked
from .._version import VERSION
from . import pkg_config as config
from .exceptions import MsticpyKeyVaultConfigError
from .keyvault_client import BHKeyVaultClient
from .keyvault_settings import KeyVaultSettings
from .utility import export
__version__ = VERSION
__author__ = "Ian Hellen"
@export
class KeyringClient:
"""Keyring client wrapper."""
def __init__(self, name: str = "key-cache", debug: bool = False):
"""
Initialize the keyring client.
Parameters
----------
name : str, optional
Name of the credential group, by default "system"
debug : bool, optional
Output debug info, by default False
"""
self.debug = debug
self.keyring = name
self._secret_names: Set[str] = set()
def __getitem__(self, key: str):
"""Get key name."""
cred = self.get_secret(key)
if cred:
return cred
raise KeyError
def get_secret(self, secret_name: str) -> Any:
"""
Retrieve a secret from the keyring.
Parameters
----------
secret_name : str
Secret name.
Returns
-------
Any
Secret value.
"""
secret = None
if self.debug:
print(f"Fetching {secret_name} from keyring")
try:
secret = keyring.get_password(self.keyring, secret_name)
except (KeyringError, KeyringLocked):
if self.debug:
print(
"Keyring error retrieving credentials",
f"for {secret_name} from keyring {self.keyring}",
)
if not secret and self.debug:
print("No credentials", f"for {secret_name} from keyring {self.keyring}")
return secret
def set_secret(self, secret_name: str, secret_value: Any):
"""
Set a secret in the keyring group.
Parameters
----------
secret_name : str
Name of the secret
secret_value : Any
Secret value
"""
if self.debug:
print(f"Saving {secret_name} to keyring {self.keyring}")
self._secret_names.add(secret_name)
keyring.set_password(self.keyring, secret_name, secret_value)
@export
class SecretsClient:
"""Secrets client - manages keyvault and keyring secrets."""
def __init__(self, tenant_id: str = None, use_keyring: bool = True):
"""
Initialize SecretsClient instance.
Parameters
----------
tenant_id : str, optional
TenantID, by default None
use_keyring : bool, optional
If True use keyring to cache secrets, by default True
Raises
------
MsticpyKeyVaultConfigError
Missing or invalid configuration settings.
Notes
-----
Requires KeyVault settings to be defined in msticpyconfig.yaml
"""
self._kv_settings = KeyVaultSettings()
self.tenant_id = tenant_id or self._kv_settings.get("tenantid")
if not self.tenant_id:
raise MsticpyKeyVaultConfigError(
"Could not get TenantId from function parameters or configuration.",
"Please add this to the KeyVault section of msticpyconfig.yaml",
title="missing tenant ID value.",
)
self.kv_secret_vault: Dict[str, str] = {}
self.kv_vaults: Dict[str, BHKeyVaultClient] = {}
self._use_keyring = use_keyring or self._kv_settings.get("UseKeyring", False)
if self._use_keyring:
self._keyring_client = KeyringClient("Providers")
def get_secret_accessor(self, setting_path: str) -> Callable[[], Any]:
"""
Return accessor function for a secret.
Parameters
----------
setting_path : str
The msticpy configuration path (dot-separated)
Returns
-------
Callable[[None], Any]
Accessor function for the secret value.
"""
vault_name, secret_name = self._get_kv_vault_and_name(setting_path)
if vault_name is None or secret_name is None:
return lambda: secret_name if secret_name else ""
return self._get_secret_func(secret_name, vault_name)
def _add_key_vault(self, vault_name: str, secret_name: str):
"""Add the KeyVault instance responsible for storing `secret_name`."""
vault = self.kv_vaults.get(vault_name)
if not vault:
vault = BHKeyVaultClient(self.tenant_id, vault_name=vault_name)
self.kv_vaults[vault_name] = vault
self.kv_secret_vault[secret_name] = vault_name
@staticmethod
def format_kv_name(setting_path):
"""Return normalized name for use as a KeyVault secret name."""
return re.sub("[^0-9a-zA-Z-]", "-", setting_path)
def _get_kv_vault_and_name(
self, setting_path: str
) -> Tuple[Optional[str], Optional[str]]:
"""Return the vault and secret name for a config path."""
setting_item = config.get_config(setting_path)
if not isinstance(setting_item, dict):
return None, str(setting_item)
if "KeyVault" in setting_item:
kv_val = setting_item.get("KeyVault")
def_vault_name = self._kv_settings.get("VaultName")
if not kv_val or kv_val.casefold() == "default":
# If no value, get the default VaultName from settings
# and use the setting path as the secret name
if not def_vault_name:
raise ValueError("No VaultName defined in KeyVault settings.")
secret_name = self.format_kv_name(setting_path)
return def_vault_name, secret_name
if "/" in kv_val:
# '/' delimited string means VaultName/Secret
vault_name, secret_name = kv_val.split("/")
return vault_name, self.format_kv_name(secret_name)
if not def_vault_name:
raise MsticpyKeyVaultConfigError(
"Check that you have specified the right value for VaultName"
+ " in your configuration",
f"No VaultName defined in KeyVault settings for {setting_path}.",
title="Key Vault vault name not found.",
)
# If there is a single string - take that as the secret name
return def_vault_name, self.format_kv_name(kv_val)
return None, None
def _get_secret_func(self, secret_name: str, vault_name: str) -> Callable[[], Any]:
"""Return a func to access a secret."""
if self._use_keyring and self._keyring_client.get_secret(secret_name):
return self._create_secret_func(self._keyring_client, secret_name)
# If the secret is not in keyring, get the vault holding this secret
if not self.kv_secret_vault.get(secret_name):
self._add_key_vault(secret_name=secret_name, vault_name=vault_name)
vault = self.kv_vaults[vault_name]
if self._use_keyring:
# store the secret in keyring and return an accessor
# to the keyring value.
self._keyring_client.set_secret(secret_name, vault.get_secret(secret_name))
return self._create_secret_func(self._keyring_client, secret_name)
# if not using Keyring - return a KeyVault accessor
return self._create_secret_func(vault, secret_name)
@staticmethod
def _create_secret_func(secret_store, secret_name):
return partial(secret_store.get_secret, secret_name=secret_name)
@staticmethod
def read_secret(secret_object: Any) -> Any:
"""
Return the secret value.
Parameters
----------
secret_object : Any
If it is a func, call and return the return value
of that func. Otherwise just return the object.
Returns
-------
Any
The secret value
"""
if callable(secret_object):
return secret_object()
return secret_object
| 35.104839 | 87 | 0.602113 |
acf461dca7e9d53e09fe578a58a520e887ea8315 | 1,985 | py | Python | Hard/WordTransformer.py | roeiherz/CodingInterviews | 1737a86692aef7f0b1f1d7a481a1db563d9dcf6b | [
"MIT"
] | null | null | null | Hard/WordTransformer.py | roeiherz/CodingInterviews | 1737a86692aef7f0b1f1d7a481a1db563d9dcf6b | [
"MIT"
] | null | null | null | Hard/WordTransformer.py | roeiherz/CodingInterviews | 1737a86692aef7f0b1f1d7a481a1db563d9dcf6b | [
"MIT"
] | null | null | null | __author__ = 'roeiherz'
"""
Given two words of equal length that are in a dict, write a method to transform one word into another word by
changing only one letter at a time. The new word you get in each step must be in the dict.
Example: DAMP, LIKE : DAMP -> LAMP -> LIMP -> LIME -> LIKE
"""
def create_wild_card(words):
mapp = {}
for word in words:
for i in range(len(word)):
wild_card = word[:i] + "*" + word[i + 1:]
candidates = []
for i in range(97, 123):
ch = chr(i)
candidate = wild_card.replace('*', ch)
if candidate in words:
candidates.append(candidate)
mapp[wild_card] = candidates
return mapp
def word_transformer(curr_word, end_word, wild_cards, visited):
if curr_word == end_word:
visited.append(curr_word)
return visited
if curr_word in visited:
return None
# Append
visited.append(curr_word)
for i in range(len(curr_word)):
wild_card = curr_word[:i] + "*" + curr_word[i + 1:]
new_words = wild_cards[wild_card]
for new_word in new_words:
res = word_transformer(new_word, end_word, wild_cards, visited)
if res is not None:
return visited
return None
if __name__ == '__main__':
start_word = 'DAMP'
end_word = "LIKE"
words = ["DAMP", "LAMP", "LIMP", "LIME", "LIKE"]
if end_word not in words:
print("The end word is not in dict")
exit()
# Lower case
start_word = start_word.lower()
end_word = end_word.lower()
words = [word.lower() for word in words]
# Preprocess
wild_cards = create_wild_card(words)
visited = []
# Main
res = word_transformer(start_word, end_word, wild_cards, visited)
if res is None:
print("None")
else:
# Upper case
visited = [visit.upper() for visit in visited]
print(visited)
| 25.448718 | 110 | 0.588413 |
acf461f093fddac8af9f16794fb443c58acc893d | 4,718 | py | Python | test/test_projects_api.py | cons3rt/cons3rt-python-sdk | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | [
"RSA-MD"
] | null | null | null | test/test_projects_api.py | cons3rt/cons3rt-python-sdk | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | [
"RSA-MD"
] | null | null | null | test/test_projects_api.py | cons3rt/cons3rt-python-sdk | f0bcb295735ac55bbe47448fcbd95d2c7beb3ec0 | [
"RSA-MD"
] | null | null | null | # coding: utf-8
"""
CONS3RT Web API
A CONS3RT ReSTful API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: Fred@gigagantic-server.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import cons3rt
from cons3rt.api.projects_api import ProjectsApi # noqa: E501
from cons3rt.rest import ApiException
class TestProjectsApi(unittest.TestCase):
"""ProjectsApi unit test stubs"""
def setUp(self):
self.api = cons3rt.api.projects_api.ProjectsApi() # noqa: E501
def tearDown(self):
pass
def test_add_project_member(self):
"""Test case for add_project_member
Assign project member # noqa: E501
"""
pass
def test_add_role_to_project_member(self):
"""Test case for add_role_to_project_member
Assign role to member # noqa: E501
"""
pass
def test_add_submission_service_to_project(self):
"""Test case for add_submission_service_to_project
Add submission service # noqa: E501
"""
pass
def test_add_trusted_project1(self):
"""Test case for add_trusted_project1
Assign trusted project to project # noqa: E501
"""
pass
def test_create_project(self):
"""Test case for create_project
Create a project # noqa: E501
"""
pass
def test_delete_project(self):
"""Test case for delete_project
Delete project # noqa: E501
"""
pass
def test_get_host_configuration_metrics(self):
"""Test case for get_host_configuration_metrics
Retrieve metrics # noqa: E501
"""
pass
def test_get_project(self):
"""Test case for get_project
Retrieve project # noqa: E501
"""
pass
def test_get_project_virt_realms(self):
"""Test case for get_project_virt_realms
List virtualization realms # noqa: E501
"""
pass
def test_get_projects(self):
"""Test case for get_projects
List joined projects # noqa: E501
"""
pass
def test_get_projects_expanded(self):
"""Test case for get_projects_expanded
List unjoined projects # noqa: E501
"""
pass
def test_get_virtual_machine_count_metrics(self):
"""Test case for get_virtual_machine_count_metrics
Retrieve virtual machine metrics # noqa: E501
"""
pass
def test_list_members(self):
"""Test case for list_members
List members # noqa: E501
"""
pass
def test_list_submission_serivces_for_project(self):
"""Test case for list_submission_serivces_for_project
List submission services # noqa: E501
"""
pass
def test_remove_project_member(self):
"""Test case for remove_project_member
Unassign member from project # noqa: E501
"""
pass
def test_remove_role_from_project_member(self):
"""Test case for remove_role_from_project_member
Unassign role from member # noqa: E501
"""
pass
def test_remove_submission_service_from_project(self):
"""Test case for remove_submission_service_from_project
Remove submission service # noqa: E501
"""
pass
def test_remove_trusted_project1(self):
"""Test case for remove_trusted_project1
Unassign trusted project from project # noqa: E501
"""
pass
def test_request_project_invitation(self):
"""Test case for request_project_invitation
Create invitation code # noqa: E501
"""
pass
def test_set_project_default_power_schedule(self):
"""Test case for set_project_default_power_schedule
Update default power schedule # noqa: E501
"""
pass
def test_set_project_default_virtualization_realm(self):
"""Test case for set_project_default_virtualization_realm
Update default virtualization realm # noqa: E501
"""
pass
def test_set_project_itar_information(self):
"""Test case for set_project_itar_information
Set asset export restriction # noqa: E501
"""
pass
def test_update_project(self):
"""Test case for update_project
Update project # noqa: E501
"""
pass
def test_update_submission_service(self):
"""Test case for update_submission_service
Update submission service # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 23.241379 | 71 | 0.6312 |
acf4620c884cf672fb21b0ef270bc9659b353a19 | 2,069 | py | Python | src/oci/core/models/drg_attachment_info.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/core/models/drg_attachment_info.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/core/models/drg_attachment_info.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DrgAttachmentInfo(object):
"""
The `DrgAttachmentInfo` resource contains the `OCID`__ of the DRG attachment.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
"""
def __init__(self, **kwargs):
"""
Initializes a new DrgAttachmentInfo object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this DrgAttachmentInfo.
:type id: str
"""
self.swagger_types = {
'id': 'str'
}
self.attribute_map = {
'id': 'id'
}
self._id = None
@property
def id(self):
"""
**[Required]** Gets the id of this DrgAttachmentInfo.
The Oracle-assigned ID of the DRG attachment
:return: The id of this DrgAttachmentInfo.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this DrgAttachmentInfo.
The Oracle-assigned ID of the DRG attachment
:param id: The id of this DrgAttachmentInfo.
:type: str
"""
self._id = id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 28.342466 | 245 | 0.639923 |
acf4627fd172cd8d14913b71448a86c12def3b51 | 1,797 | py | Python | adminlte_base/contrib/sqla.py | kyzima-spb/adminlte-base | ebde36922b249144bec821f35716eb11466c0a83 | [
"MIT"
] | null | null | null | adminlte_base/contrib/sqla.py | kyzima-spb/adminlte-base | ebde36922b249144bec821f35716eb11466c0a83 | [
"MIT"
] | null | null | null | adminlte_base/contrib/sqla.py | kyzima-spb/adminlte-base | ebde36922b249144bec821f35716eb11466c0a83 | [
"MIT"
] | null | null | null | from sqlalchemy import Column, String, Text, Integer, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declared_attr
from ..data_types import MenuItem
from ..mixins import MenuItemMixin as _MenuItemMixin, MenuMixin as _MenuMixin
__all__ = (
'MenuItemMixin', 'create_entity_menu_item',
)
class MenuItemMixin(_MenuItemMixin):
__tablename__ = 'menu_item'
id = Column(Integer, primary_key=True)
@declared_attr
def menu_id(cls):
return Column(ForeignKey('menu.id'), nullable=False)
@declared_attr
def parent_id(cls):
return Column(ForeignKey('menu_item.id'))
@declared_attr
def parent(cls):
return relationship('MenuItem', remote_side=[cls.id])
type = Column(String(20), default=MenuItem.TYPE_LINK, nullable=False)
title = Column(String(500), nullable=False)
url = Column(Text, default='', nullable=False)
endpoint = Column(String(255), default='', nullable=False)
endpoint_args = Column(Text, default='', nullable=False)
endpoint_kwargs = Column(Text, default='', nullable=False)
icon = Column(String(50), default='', nullable=False)
help = Column(String(500), default='', nullable=False)
pos = Column(Integer, default=0, nullable=False)
class MenuMixin(_MenuMixin):
__tablename__ = 'menu'
id = Column(Integer, primary_key=True)
title = Column(String(500), nullable=False)
program_name = Column(String(255), unique=True, index=True, nullable=False)
@declared_attr
def items(cls):
return relationship('MenuItem', backref='menu', lazy='joined')
def create_entity_menu_item(db):
return type('MenuItem', (db.Model, MenuItemMixin), {})
def create_entity_menu(db):
return type('Menu', (db.Model, MenuMixin), {})
| 29.95 | 79 | 0.706733 |
acf4634f4227eda14df2f729a5bf40427c399f15 | 12,766 | py | Python | lib/python2.7/site-packages/ryu/services/protocols/bgp/rtconf/common.py | nishaero/wifi-userseg-ryu | 1132f2c813b79eff755bdd1a9e73e7ad3980af7c | [
"Apache-2.0"
] | 2 | 2019-10-03T09:08:08.000Z | 2021-02-02T07:15:21.000Z | lib/python2.7/site-packages/ryu/services/protocols/bgp/rtconf/common.py | nishaero/wifi-userseg-ryu | 1132f2c813b79eff755bdd1a9e73e7ad3980af7c | [
"Apache-2.0"
] | null | null | null | lib/python2.7/site-packages/ryu/services/protocols/bgp/rtconf/common.py | nishaero/wifi-userseg-ryu | 1132f2c813b79eff755bdd1a9e73e7ad3980af7c | [
"Apache-2.0"
] | 2 | 2018-07-17T14:10:14.000Z | 2019-10-03T09:08:15.000Z | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Runtime configuration that applies to all bgp sessions, i.e. global settings.
"""
import logging
import numbers
from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
from ryu.services.protocols.bgp.utils.validation import is_valid_asn
from ryu.services.protocols.bgp import rtconf
from ryu.services.protocols.bgp.rtconf.base import BaseConf
from ryu.services.protocols.bgp.rtconf.base import BaseConfListener
from ryu.services.protocols.bgp.rtconf.base import compute_optional_conf
from ryu.services.protocols.bgp.rtconf.base import ConfigTypeError
from ryu.services.protocols.bgp.rtconf.base import ConfigValueError
from ryu.services.protocols.bgp.rtconf.base import MissingRequiredConf
from ryu.services.protocols.bgp.rtconf.base import validate
LOG = logging.getLogger('bgpspeaker.rtconf.common')
# Global configuration settings.
LOCAL_AS = 'local_as'
ROUTER_ID = 'router_id'
LABEL_RANGE = 'label_range'
LABEL_RANGE_MAX = 'max'
LABEL_RANGE_MIN = 'min'
# Configuration that can be set at global level as well as per context
# (session/vrf) level
# Nested configuration override global or higher level configuration as they
# are more granular.
# TODO(apgw-dev) Nested configuration overriding higher level configuration is
# currently low priority
# Similar to Cisco command 'bgp refresh stalepath-time'. To cause the router to
# remove stale routes from the BGP table even if the router does not receive a
# Route-Refresh EOR message The bgp refresh stalepath-time command is not
# needed under normal circumstances.
# TODO(PH): Support this feature (currently low priority)
REFRESH_STALEPATH_TIME = 'refresh_stalepath_time'
# Similar to Cisco command 'bgp refresh max-eor-time'. The bgp refresh max-eor-
# time command is not needed under normal circumstances. You might configure
# the bgp refresh max-eor-time command in the event of continuous route
# flapping, when the router is unable to generate a Route- Refresh EOR message,
# in which case a Route-Refresh EOR is generated after the timer expires.
# TODO(PH): Support this feature (currently low priority)
REFRESH_MAX_EOR_TIME = 'refresh_max_eor_time'
BGP_CONN_RETRY_TIME = 'bgp_conn_retry_time'
BGP_SERVER_PORT = 'bgp_server_port'
TCP_CONN_TIMEOUT = 'tcp_conn_timeout'
MAX_PATH_EXT_RTFILTER_ALL = 'maximum_paths_external_rtfilter_all'
# Valid default values of some settings.
DEFAULT_LABEL_RANGE = (100, 100000)
DEFAULT_REFRESH_STALEPATH_TIME = 0
DEFAULT_REFRESH_MAX_EOR_TIME = 0
DEFAULT_BGP_SERVER_PORT = 179
DEFAULT_TCP_CONN_TIMEOUT = 30
DEFAULT_BGP_CONN_RETRY_TIME = 30
DEFAULT_MED = 0
DEFAULT_MAX_PATH_EXT_RTFILTER_ALL = True
@validate(name=LOCAL_AS)
def validate_local_as(asn):
if asn is None:
raise MissingRequiredConf(conf_name=LOCAL_AS)
if not is_valid_asn(asn):
raise ConfigValueError(desc='Invalid local_as configuration value: %s'
% asn)
return asn
@validate(name=ROUTER_ID)
def validate_router_id(router_id):
if not router_id:
raise MissingRequiredConf(conf_name=ROUTER_ID)
if not isinstance(router_id, str):
raise ConfigTypeError(conf_name=ROUTER_ID)
if not is_valid_ipv4(router_id):
raise ConfigValueError(desc='Invalid router id %s' % router_id)
return router_id
@validate(name=REFRESH_STALEPATH_TIME)
def validate_refresh_stalepath_time(rst):
if not isinstance(rst, numbers.Integral):
raise ConfigTypeError(desc=('Configuration value for %s has to be '
'integral type' % REFRESH_STALEPATH_TIME))
if rst < 0:
raise ConfigValueError(desc='Invalid refresh stalepath time %s' % rst)
return rst
@validate(name=REFRESH_MAX_EOR_TIME)
def validate_refresh_max_eor_time(rmet):
if not isinstance(rmet, numbers.Integral):
raise ConfigTypeError(desc=('Configuration value for %s has to be of '
'integral type ' % REFRESH_MAX_EOR_TIME))
if rmet < 0:
raise ConfigValueError(desc='Invalid refresh stalepath time %s' % rmet)
return rmet
@validate(name=LABEL_RANGE)
def validate_label_range(label_range):
min_label, max_label = label_range
if (not min_label or
not max_label or
not isinstance(min_label, numbers.Integral) or
not isinstance(max_label, numbers.Integral) or min_label < 17 or
min_label >= max_label):
raise ConfigValueError(desc=('Invalid label_range configuration value:'
' (%s).' % label_range))
return label_range
@validate(name=BGP_SERVER_PORT)
def validate_bgp_server_port(server_port):
if not isinstance(server_port, numbers.Integral):
raise ConfigTypeError(desc=('Invalid bgp sever port configuration '
'value %s' % server_port))
if server_port < 0 or server_port > 65535:
raise ConfigValueError(desc='Invalid server port %s' % server_port)
return server_port
@validate(name=TCP_CONN_TIMEOUT)
def validate_tcp_conn_timeout(tcp_conn_timeout):
# TODO(apgw-dev) made-up some valid values for this settings, check if we
# have a standard value in any routers
if not isinstance(tcp_conn_timeout, numbers.Integral):
raise ConfigTypeError(desc=('Invalid tcp connection timeout '
'configuration value %s' %
tcp_conn_timeout))
if tcp_conn_timeout < 10:
raise ConfigValueError(desc=('Invalid tcp connection timeout'
' configuration value %s' %
tcp_conn_timeout))
return tcp_conn_timeout
@validate(name=BGP_CONN_RETRY_TIME)
def validate_bgp_conn_retry_time(bgp_conn_retry_time):
if not isinstance(bgp_conn_retry_time, numbers.Integral):
raise ConfigTypeError(desc=('Invalid bgp conn. retry time '
'configuration value %s' %
bgp_conn_retry_time))
if bgp_conn_retry_time < 10:
raise ConfigValueError(desc=('Invalid bgp connection retry time'
' configuration value %s' %
bgp_conn_retry_time))
return bgp_conn_retry_time
@validate(name=MAX_PATH_EXT_RTFILTER_ALL)
def validate_max_path_ext_rtfilter_all(max_path_ext_rtfilter_all):
if max_path_ext_rtfilter_all not in (True, False):
raise ConfigTypeError(desc=('Invalid max_path_ext_rtfilter_all'
' configuration value %s' %
max_path_ext_rtfilter_all))
return max_path_ext_rtfilter_all
class CommonConf(BaseConf):
"""Encapsulates configurations applicable to all peer sessions.
Currently if any of these configurations change, it is assumed that current
active peer session will be bought down and restarted.
"""
CONF_CHANGED_EVT = 1
VALID_EVT = frozenset([CONF_CHANGED_EVT])
REQUIRED_SETTINGS = frozenset([ROUTER_ID, LOCAL_AS])
OPTIONAL_SETTINGS = frozenset([REFRESH_STALEPATH_TIME,
REFRESH_MAX_EOR_TIME,
LABEL_RANGE, BGP_SERVER_PORT,
TCP_CONN_TIMEOUT,
BGP_CONN_RETRY_TIME,
MAX_PATH_EXT_RTFILTER_ALL])
def __init__(self, **kwargs):
super(CommonConf, self).__init__(**kwargs)
def _init_opt_settings(self, **kwargs):
super(CommonConf, self)._init_opt_settings(**kwargs)
self._settings[LABEL_RANGE] = compute_optional_conf(
LABEL_RANGE, DEFAULT_LABEL_RANGE, **kwargs)
self._settings[REFRESH_STALEPATH_TIME] = compute_optional_conf(
REFRESH_STALEPATH_TIME, DEFAULT_REFRESH_STALEPATH_TIME, **kwargs)
self._settings[REFRESH_MAX_EOR_TIME] = compute_optional_conf(
REFRESH_MAX_EOR_TIME, DEFAULT_REFRESH_MAX_EOR_TIME, **kwargs)
self._settings[BGP_SERVER_PORT] = compute_optional_conf(
BGP_SERVER_PORT, DEFAULT_BGP_SERVER_PORT, **kwargs)
self._settings[TCP_CONN_TIMEOUT] = compute_optional_conf(
TCP_CONN_TIMEOUT, DEFAULT_TCP_CONN_TIMEOUT, **kwargs)
self._settings[BGP_CONN_RETRY_TIME] = compute_optional_conf(
BGP_CONN_RETRY_TIME, DEFAULT_BGP_CONN_RETRY_TIME, **kwargs)
self._settings[MAX_PATH_EXT_RTFILTER_ALL] = compute_optional_conf(
MAX_PATH_EXT_RTFILTER_ALL, DEFAULT_MAX_PATH_EXT_RTFILTER_ALL,
**kwargs)
# =========================================================================
# Required attributes
# =========================================================================
@property
def local_as(self):
return self._settings[LOCAL_AS]
@property
def router_id(self):
return self._settings[ROUTER_ID]
# =========================================================================
# Optional attributes with valid defaults.
# =========================================================================
@property
def bgp_conn_retry_time(self):
return self._settings[BGP_CONN_RETRY_TIME]
@property
def tcp_conn_timeout(self):
return self._settings[TCP_CONN_TIMEOUT]
@property
def refresh_stalepath_time(self):
return self._settings[REFRESH_STALEPATH_TIME]
@property
def refresh_max_eor_time(self):
return self._settings[REFRESH_MAX_EOR_TIME]
@property
def label_range(self):
return self._settings[LABEL_RANGE]
@property
def bgp_server_port(self):
return self._settings[BGP_SERVER_PORT]
@property
def max_path_ext_rtfilter_all(self):
return self._settings[MAX_PATH_EXT_RTFILTER_ALL]
@classmethod
def get_opt_settings(self):
self_confs = super(CommonConf, self).get_opt_settings()
self_confs.update(CommonConf.OPTIONAL_SETTINGS)
return self_confs
@classmethod
def get_req_settings(self):
self_confs = super(CommonConf, self).get_req_settings()
self_confs.update(CommonConf.REQUIRED_SETTINGS)
return self_confs
@classmethod
def get_valid_evts(self):
self_valid_evts = super(CommonConf, self).get_valid_evts()
self_valid_evts.update(CommonConf.VALID_EVT)
return self_valid_evts
def update(self, **kwargs):
"""Updates global configuration settings with given values.
First checks if given configuration values differ from current values.
If any of the configuration values changed, generates a change event.
Currently we generate change event for any configuration change.
Note: This method is idempotent.
"""
# Update inherited configurations
super(CommonConf, self).update(**kwargs)
conf_changed = False
# Validate given configurations and check if value changed
for conf_name, conf_value in kwargs.items():
rtconf.base.get_validator(conf_name)(conf_value)
item1 = self._settings.get(conf_name, None)
item2 = kwargs.get(conf_name, None)
if item1 != item2:
conf_changed = True
# If any configuration changed, we update configuration value and
# notify listeners
if conf_changed:
for conf_name, conf_value in kwargs.items():
# Since all new values are already validated, we can use them
self._settings[conf_name] = conf_value
self._notify_listeners(CommonConf.CONF_CHANGED_EVT, self)
class CommonConfListener(BaseConfListener):
"""Base listener for various changes to common configurations."""
def __init__(self, global_conf):
super(CommonConfListener, self).__init__(global_conf)
global_conf.add_listener(CommonConf.CONF_CHANGED_EVT,
self.on_update_common_conf)
def on_update_common_conf(self, evt):
raise NotImplementedError('This method should be overridden.')
| 37.881306 | 79 | 0.681733 |
acf464a3ae959dc6faba0315196d38f62a69604c | 2,392 | py | Python | TestRound/pizza/tests/test_pizza.py | manupm87/hashcode-palinkoders-2018 | 098267a5d652f2dfba757ae22383fd59683ba589 | [
"MIT"
] | 4 | 2018-02-22T10:10:23.000Z | 2018-02-22T10:17:46.000Z | TestRound/pizza/tests/test_pizza.py | manupm87/hashcode-palinkoders-2018 | 098267a5d652f2dfba757ae22383fd59683ba589 | [
"MIT"
] | null | null | null | TestRound/pizza/tests/test_pizza.py | manupm87/hashcode-palinkoders-2018 | 098267a5d652f2dfba757ae22383fd59683ba589 | [
"MIT"
] | null | null | null | from definitions import *
import pizza.pizzamodule as p
from util import util
R, C, L, H, pizza = util.parse(INPUT_DATA_DIR + "example.in")
constraints = {"R": R, "C": C, "L": L, "H": H}
def test_possible_frames_of_size_6():
_constraints = {"R": 6, "C": 6, "L": L, "H": H}
sol = p.get_fitting_frames_of_size(size=6, constraints=_constraints)
assert len(sol) is 4
assert sol.__contains__({'r': 1, 'c': 6})
def test_possible_frames_of_size_4():
_constraints = {"R": 4, "C": 4, "L": L, "H": H}
sol = p.get_fitting_frames_of_size(size=4, constraints=_constraints)
assert len(sol) is 3
assert sol.__contains__({'r': 2, 'c': 2})
assert sol.__contains__({'r': 4, 'c': 1})
def test_first_slice_of_size_6():
cut_slice = p.get_ingredients_for_slice_at_pos(pos={"r": 0, "c": 0}, frame={'c': 2, 'r': 3}, pizza=pizza,
constraints=constraints)
assert cut_slice.__eq__(['TT', 'TM', 'TT'])
def test_last_slice_of_size_6():
cut_slice = p.get_ingredients_for_slice_at_pos(pos={"r": 0, "c": 3}, frame={'c': 2, 'r': 3}, pizza=pizza,
constraints=constraints)
assert cut_slice.__eq__(['TT', 'MT', 'TT'])
def test_slice_of_size_6_out_of_pizza_bounds():
cut_slice = p.get_ingredients_for_slice_at_pos(pos={"r": 0, "c": 4}, frame={'c': 2, 'r': 3}, pizza=pizza,
constraints=constraints)
assert not cut_slice
def test_not_enough_ingredients_on_slice_full_of_tomato():
cur_slice = ['TT', 'TT']
assert not p.is_ingredients_valid(cur_slice, constraints=constraints)
def test_not_enough_ingredients_on_slice_full_of_mushroom():
cur_slice = ['MM', 'MM']
assert not p.is_ingredients_valid(cur_slice, constraints=constraints)
def test_enough_ingredients_on_slice_mainly_tomato():
cur_slice = ['TT', 'MT']
assert p.is_ingredients_valid(cur_slice, constraints=constraints)
def test_enough_ingredients_on_slice_mainly_mushroom():
cur_slice = ['MM', 'TM']
assert p.is_ingredients_valid(cur_slice, constraints=constraints)
def test_slice_with_enough_ingredients_but_overlapping():
_constraints = {"R": 4, "C": 4, "L": 2, "H": H}
cur_slice = ['MTT', '*MT', '*MT']
assert not p.is_ingredients_valid(slice_ingredients=cur_slice, constraints=_constraints)
| 35.701493 | 109 | 0.652592 |
acf4650adb8704f80bf9798f495ecd3d0a3e1eca | 16,312 | py | Python | PYTHON_AUTOMATION/00_yuxi.py | sly1314sly/selenium_basic | 53bc2bf4d8a81bcd71f7fe5910cbc34ecfc6869a | [
"Apache-2.0"
] | 1 | 2019-08-03T04:24:13.000Z | 2019-08-03T04:24:13.000Z | PYTHON_AUTOMATION/00_yuxi.py | sly1314sly/selenium_basic | 53bc2bf4d8a81bcd71f7fe5910cbc34ecfc6869a | [
"Apache-2.0"
] | null | null | null | PYTHON_AUTOMATION/00_yuxi.py | sly1314sly/selenium_basic | 53bc2bf4d8a81bcd71f7fe5910cbc34ecfc6869a | [
"Apache-2.0"
] | null | null | null | # 一、入门
# from selenium import webdriver #引入selenium中webdriver这样一个库
# driver = webdriver.Chrome(executable_path='./chromedriver') #创建一个chrome浏览器实例,加上执行的路径 。/代表当前路径(把chromedriver放到环境变量,就不用每次加命令)
# driver.get('http://www.baidu.com') #该处为具体网址,用chrome浏览器打开百度
#二、web自动化selenium通过ID,name定位元素
# driver.find_element_by_id('kw').send_keys("python学习") #输入框输入输入关键字,通过id定位元素
# driver.find_element_by_name('wd').send_keys("python学习") #输入框输入输入关键字 ,通过name定位元素,确保唯一
#页面中一般一个元素的id是唯一的,name是否唯一,在开发者工具页面中,点击command+f,打开查找name,是否唯一
#三、web自动化selenium通过css,xpath定位元素 线下学习css和xpath
#开发者工具,选中右键-copy-copy selector 然后按住command+f 粘贴 ,
# 如百度输入框,查找出来的是#kw ,#代表id 元素为kw,即元素为id为kw的元素
'''CSS可以通过元素的id、class、标签(input)这三个常规属性直接定位到,而这三种编写方式,在HTML中编写style的时候,可以进行标识如:
#su
.class
input
比如:百度的登录页面
id === #su
class === .bg s_btn
标签 === input[type="submit"] 或者 input[value="百度一下"]
'''
# driver.find_element_by_css_selector('#kw').send_keys("python学习") #css定位
# driver.find_element_by_css_selector('a[name="tj_briicon"]').click() #css定位,上网可以查一下
#xpath定位
# //*[@id="kw"] 双斜杠表示从此页面任何一个位置开始,*去匹配,方框表示里面的属性,=是里面的值
# driver.find_element_by_xpath('//*[@id="kw"]').send_keys("python学习")
#四、web自动化selenium如何自动上传文件
# driver.find_element_by_css_selector('span[class="soutu-btn"]').click() 点击
# driver.find_element_by_css_selector('input[type="file"]').send_keys('/Users/songluyao/Desktop/selenium_basic/aaa.jpg') #提供路径
#五、web自动化selenium获取网页标题-文本-添加判断
# from selenium import webdriver
# import time #引入时间,添加等待时间
# driver = webdriver.Chrome(executable_path='./chromedriver')
# driver.get('http://www.baidu.com')
# print(driver.title)
# assert "百度" in driver.title #断言,判断百度是不是在这个里面 不在就报错,在就不会打印任何东西
# driver.find_element_by_id('kw').send_keys("自动化测试") #百度搜索自动化测试
# driver.find_element_by_id('su').click #点击搜索按钮
# time.sleep(2) #等待2秒
# rusult = driver.find_element_by_id('content_left').text #搜索结果,调用text属性,可以拿到文本值
# print(rusult) #打印出来
# assert "自动化测试" in rusult #断言,有期望值
#六、web自动化selenium-Keys简介以及如何使用unittest
# from selenium import webdriver
# from selenium.webdriver.common.keys import Keys #该键类提供键在键盘像enter,F1,ALT等
# driver = webdriver.Chrome()
# driver.get('http://www.baidu.com')
# driver.find_element_by_id('kw').send_keys("自动化测试")
# driver.find_element_by_id('kw').send_keys(Keys.ENTER) #搜索自动化测试,按enter键搜索
'''
4)键盘事件
key_down(value, element=None) ——按下某个键盘上的键
key_up(value, element=None) ——松开某个键
send_keys(Keys.BACK_SPACE) 删除键(backspace)
send_keys( Keys. SPACE) 空格键(space)
send_keys( Keys.TAB) 制表键(Tab)
send_keys( Keys. ESCAPE) 回退键(esc)
send_keys( Keys. ENTER) 回车键(enter)
send_keys(Keys.CONTROL,’a’) 全选(ctrl+A)
send_keys(Keys.CONTROL,’c’) 复制(ctrl+C)
send_keys(Keys.CONTROL,’x’) 剪切(ctrl+X)
send_keys(Keys.CONTROL,’v’) 粘贴(ctrl+v)
send_keys(keys.F1) 键盘F1
……
send_keys(keys.F12) 键盘F12
使用的时候需导入:from selenium.webdriver.common.keys import Keys
'''
#七、使用Selenium编写单元测试
# import unittest #引入这个单元测试
# from selenium import webdriver
# from selenium.webdriver.common.keys import Keys
# class BaiduSearch(unittest.TestCase):
# def setUp(self):
# self.driver = webdriver.Chrome()
# def test_baidu_search(self):
# driver = self.driver
# driver.get('http://www.baidu.com')
# driver.find_element_by_id('kw').send_keys("自动化测试")
# driver.find_element_by_id('kw').send_keys(Keys.ENTER)
# def tearDown(self):
# self.driver.close()
# if __name__ == "__main__":
# unittest.main()
#八、web自动化selenium其它4种查找元素的方法
# from selenium import webdriver
# driver = webdriver.Chrome()
# driver.get('http://www.baidu.com')
# driver.find_element_by_class_name('s_ipt').send_keys("自动化测试") # 如果class属性是唯一的就可以用
# driver.find_element_by_link_text('新闻').click() #保持外面文字是唯一的, 打开百度新闻,超链接外面的文本
# driver.find_element_by_partial_link_text('新').click() #去匹配,找到所有超链接里面带“新”字的
# driver.find_elements_by_tag_name #这个很少用
# 注意:定位对象(locate elements)之后我们需要对该已定位对象进行操作 通常所有的操作与页面交互都将通过WebElement接口,常见的操作元素方法如下
# clear 清除元素的内容
# send_keys 模拟按键输入
# click 点击元素
# submit 提交表单
# 九、web自动化selenium-写一个注册登陆来总结一下这几天的学习
# import unittest #引入这个单元测试
# from selenium import webdriver
# """
# 测试账号:
# 用户名:user0
# 密码:123456
# """
# class Conde(unittest.TestCase): #固定写法
# def setUp(self):
# self.Url = 'http://39.107.96.138:3000/'
# self.driver = webdriver.Chrome()
# self.driver.get(self.Url) #打开网址
# def test_register(self): #测试用例必须写一个test,
# driver = self.driver
# driver.find_element_by_css_selector('a[href="/signup"]').click()
# driver.find_element_by_id('loginname').send_keys("zhangsanfeng")
# driver.find_element_by_id('pass').send_keys("123456")
# driver.find_element_by_id('re_pass').send_keys("123456")
# driver.find_element_by_id('email').send_keys("123@163.com")
# driver.find_element_by_css_selector('input[type="submit"]').click()
# def test_login(self):
# pass
# def tearDown(self):
# self.driver.save_screenshot('./jietu.png') #截个图,此处会出现无论运行多少个case,都会保持为命名这个的截图,思考如何分别保留
# self.driver.quit() #退出
# if __name__ == "__main__":
# unittest.main()
"""
if __name__ == '__main__'的意思是:当.py文件被直接运行时,if __name__ == '__main__'之下的代码块将被运行;当.py文件以模块形式被导入时,if __name__ == '__main__'之下的代码块不被运行。
'''
'''
import unittest
#定义测试类Test,父类为unittest.TestCase
class Test(unittest.TestCase):
# docstring for Test
#重写父类setUp方法
def setUp(self):
print("setUp")
#定义测试用例,以“test_”开头命名的方法
def test_test1(self):
print("test_test1")
self.assertEqual('1','1',msg = '1=1')
def test_test2(self):
print('test_test2')
self.assertEqual('1','2',msg = '1!=2')
@unittest.skip('暂时跳过test_test3的测试')
def test_test3(self):
print('test_test2')
#重写父类tearDown方法
def tearDown(self):
print("tearDown")
if __name__=='__main__':
#unittest.main()方法会搜索该模块下所有以test开头的测试用例方法
unittest.main()
"""
# ##############################################################
# testCase执行顺序
# 1.方法顺序
# def setUp(self): 在测试方法前执行
# def tearDown(self): 在测试方法后执行
# ---------------------
# class TestMethod(unittest.TestCase):
# #每次方法之前执行
# def setUp(self):
# print('每次方法之前执行')
# #每次方法之后执行
# def tearDown(self):
# print('每次方法之后执行')
# def test_01(self):
# print('测试1')
# def test_02(self):
# print('测试2')
# if __name__ == '__main__':
# unittest.main()
# ---------------------
# 执行结果:
# 每次方法之前执行
# 测试1
# 每次方法之后执行
# 每次方法之前执行
# 测试2
# 每次方法之后执行
# 2.类顺序
# @classmethod
# def setUpClass(cls):
# 在类之前执行
# @classmethod
# def tearDownClass(cls):
# 在类之后执行
# ---------------------
# class TestMethod(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# print('类执行之前的方法')
# @classmethod
# def tearDownClass(cls):
# print('类执行之后的方法')
# #每次方法之前执行
# def setUp(self):
# print('每次方法之前执行')
# #每次方法之后执行
# def tearDown(self):
# print('每次方法之后执行')
# def test_01(self):
# print('测试1')
# def test_02(self):
# print('测试2')
# if __name__ == '__main__':
# unittest.main()
# ---------------------
# 执行结果:
# 类执行之前的方法 每次方法之前执行
# 测试1
# 每次方法之后执行
# 每次方法之前执行
# 测试2
# 每次方法之后执行
# 类执行之后的方法
# ##############################################################
# 十、selenium实战,使用action进行发帖操作
####################################################################################
# 十一、Excel文件处理—xlrd库简介
# 安装xlrd : pip install xlrd
# github文档:https://github.com/python-excel/xlrd
# import xlrd
# book = xlrd.open_workbook('data.xls') #打开这个文件
# print("The number of worksheets is {0}".format(book.nsheets)) #打印excel中sheet表个数
# print("Worksheet name(s): {0}".format(book.sheet_names())) # 所有的sheet表的名字
# sh = book.sheet_by_index(0) #第一个sheet
# print("{0} {1} {2}".format(sh.name, sh.nrows, sh.ncols)) #sheet的名字,多少行,多少列
# print("Cell D1 is {0}".format(sh.cell_value(rowx=0, colx=1))) #第1行,第二列的值
# for rx in range(sh.nrows): #用一个循环变了所有的值
# print(sh.row(rx))
####################################################################################
# python 对 excel基本的操作如下:
# # -*- coding: utf-8 -*-
# import xlrd
# import xlwt
# from datetime import date,datetime
# def read_excel():
# # 打开文件
# workbook = xlrd.open_workbook(r'F:\demo.xlsx')
# # 获取所有sheet
# print workbook.sheet_names() # [u'sheet1', u'sheet2']
# sheet2_name = workbook.sheet_names()[1]
# # 根据sheet索引或者名称获取sheet内容
# sheet2 = workbook.sheet_by_index(1) # sheet索引从0开始
# sheet2 = workbook.sheet_by_name('sheet2')
# # sheet的名称,行数,列数
# print sheet2.name,sheet2.nrows,sheet2.ncols
# # 获取整行和整列的值(数组)
# rows = sheet2.row_values(3) # 获取第四行内容
# cols = sheet2.col_values(2) # 获取第三列内容
# print rows
# print cols
# # 获取单元格内容
# print sheet2.cell(1,0).value.encode('utf-8')
# print sheet2.cell_value(1,0).encode('utf-8')
# print sheet2.row(1)[0].value.encode('utf-8')
# # 获取单元格内容的数据类型
# print sheet2.cell(1,0).ctype
# if __name__ == '__main__':
# read_excel()
####################################################################################
# 十二、查找多个元素
#页面所有手机价格找到打印出来
# from selenium import webdriver
# import time
# driver = webdriver.Chrome()
# driver.get('https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&wq=%E6%89%8B%E6%9C%BA&pvid=91a04aab85a446d58abbb0a39947ed5f')
# eles = driver.find_elements_by_css_selector('li.gl-item .p-price')
# for index in range(len(eles)):
# print(eles[index].text)
####################################################################################
#十三、excel文件处理——写入到文件xlwt
# 安装xlwt : pip install xlwt
# github文档:https://github.com/python-excel/xlwt
#简单使用
# import xlwt #引入库
# from datetime import datetime #引入时间
# style0 = xlwt.easyxf('font: name Times New Roman, color-index red, bold on', num_format_str='#,##0.00') #字体、颜色等样式的一些设置,字符串的一些格式
# style1 = xlwt.easyxf(num_format_str='D-MMM-YY') #日期的格式
# wb = xlwt.Workbook() #创建一个excel
# ws = wb.add_sheet('A Test Sheet') # 创建一个sheet表
# #往sheet里面填写信息
# ws.write(0, 0, 1234.56, style0) #第一行第一列,填入值1234.56,使用style0样式
# ws.write(1, 0, datetime.now(), style1)
# ws.write(2, 0, 1)
# ws.write(2, 1, 1)
# ws.write(2, 2, xlwt.Formula("A3+B3"))
# wb.save('example.xls') #文件保存
####################################################################################
# 练习:找到手机价格,保存到excel文件
# from selenium import webdriver
# import xlwt
# from datetime import datetime
# driver = webdriver.Chrome()
# driver.get('https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&wq=%E6%89%8B%E6%9C%BA&pvid=91a04aab85a446d58abbb0a39947ed5f')
# price_eles = driver.find_elements_by_css_selector('li.gl-item .p-price')
# desc_eles = driver.find_elements_by_css_selector('div.p-name.p-name-type-2')
# count = len(price_eles)
# wb = xlwt.Workbook() #创建一个excel
# ws = wb.add_sheet('jd手机价格') # 创建一个sheet表
# ws.write(0,0,'手机')
# ws.write(0,1,'价格')
# for index in range(count):
# ws.write(index+1,0,desc_eles[index].text)
# ws.write(index+1,1,price_eles[index].text)
# wb.save('phone.xls')
####################################################################################
#十四、使用人工智能识别图片验证码
#https://ai.baidu.com/docs#/OCR-API/top
# from selenium import webdriver
# import requests,base64
# import time
# driver = webdriver.Chrome()
# driver.get('http://dev.console.jobsaas.com')
# time.sleep(2)
# driver.find_element_by_xpath('//*[@id="app"]/div/div/div[2]/div[2]/div/div[2]/form/div[2]/div[1]/div/div[1]/input').send_keys("15256558113")
# driver.find_element_by_css_selector('input[type="password"]').send_keys("558113")
# image_ele = driver.find_element_by_css_selector('div.loginVerImg.fr > img') #找到图片验证码位置
# image_ele.screenshot('./image.png') #把图片截图保存下来
# #获取百度APK的token。client_id 为官网获取的AK,client_secret 为官网获取的SK
# host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=gZcF5SoXusyYZbmdXb6x8YFq&client_secret=q5ylwgyYulmxd4boMa1qLDkAMDIAy8Eu'
# res= requests.get(host) #用requeest发用get请求
# r =res.json()
# print(r)
# access_token = r['access_token'] #获取json里面的token
# print(access_token)
# # access_token = '#####调用鉴权接口获取的token#####'
# url = 'https://aip.baidubce.com/rest/2.0/ocr/v1/general?access_token='+access_token
# # 二进制方式打开图文件
# f = open(r'./image.png', 'rb')
# # 参数image:图像base64编码
# img = base64.b64encode(f.read())
# params = {"image": img}
# imageres = requests.post(url, data=params) #用这个方法上传上去,上传url和数据
# image_json = imageres.json() #返回结果
# print(imageres.json())
# image_num = image_json['words_result'][0]['words']
# driver.find_element_by_xpath('//*[@id="app"]/div/div/div[2]/div[2]/div/div[2]/form/div[2]/div[3]/div/div[1]/input').send_keys(image_num)
####################################################################################
#十五、selenium模拟快捷键操作
# from selenium import webdriver
# from selenium.webdriver.common.action_chains import ActionChains #引入actionchains
# from selenium.webdriver.common.keys import Keys ########################引入特殊键的库
# driver=webdriver.Chrome()
# driver.get('http://39.107.96.138:3000/signin') # 账号密码user1 123456
# driver.find_element_by_xpath('//*[@id="name"]').send_keys('user1')
# driver.find_element_by_xpath('//*[@id="pass"]').send_keys('123456')
# driver.find_element_by_css_selector('input[type="submit"]').click()
# driver.find_element_by_xpath('//*[@id="create_topic_btn"]').click() #打开发布话题
# driver.find_element_by_xpath('//*[@id="tab-value"]').click() #点击选择框
# driver.find_element_by_xpath('//*[@id="tab-value"]/option[2]').click() #下拉框选择
# driver.find_element_by_xpath('//*[@id="title"]').send_keys('helloword1') #输入主题
# content_area = driver.find_element_by_xpath('//*[@class="CodeMirror-scroll"]') #鼠标移动到文本编辑器
# content_area.click() #点击后才能输入内容
# actions = ActionChains(driver)
# actions.move_to_element(content_area)
# actions.send_keys('hff而且')
# ###################在文本输入框里模拟快捷键Ctrl+b的操作
# actions.key_down(Keys.CONTROL)
# actions.send_keys('b')
# actions.key_up(Keys.CONTROL)
# actions.perform()
####################################################################################
#十六、selenium爬虫-微博搜索页面操作
# from selenium import webdriver
# driver=webdriver.Chrome()
# driver.get('https://s.weibo.com/')
# driver.find_element_by_css_selector('div[class="search-input"]>input[type="text"]').send_keys('web自动化') #输入图片验证码
# driver.find_element_by_css_selector('.s-btn-b').click()
####################################################################################
#十七、微博搜索结果写入文件
####################################################################################
#十八、selenium执行JavaScript命令
# from selenium import webdriver
# driver=webdriver.Chrome()
# js = 'document.querySelector("#local_news > div.column-title-home > div").scrollIntoView()' #js只能用css查找,鼠标滚动到此处
# driver.get('http://news.baidu.com/')
# driver.execute_script(js)
####################################################################################
#十九、python 定时任务
##https://github.com/dbader/schedule
##pip install schedule
# import schedule
# import time
# def job():
# print("I'm working...")
# schedule.every(1).minutes.do(job) #每1分钟执行一次
# # schedule.every().hour.do(job) #每隔一小时执行一次任务
# # schedule.every().day.at("10:30").do(job) #每天的10:30执行一次任务
# # schedule.every(5).to(10).minutes.do(job) #每隔5到10天执行一次任务
# # schedule.every().monday.do(job) #每周一的这个时候执行一次任务
# # schedule.every().wednesday.at("13:15").do(job) #每周三13:15执行一次任务
# # schedule.every().minute.at(":17").do(job)
# while True:
# schedule.run_pending() #运行所有可以运行的任务
# time.sleep(1)
####################################################################################
#二十、selenum切换iframe
# from selenium import webdriver
# driver=webdriver.Chrome()
# driver.get('https://login.anjuke.com/login/form')
# iframeEle = driver.find_element_by_id('iframeLoginIfm')
# driver.switch_to.frame(iframeEle)
# driver.find_element_by_id('phoneIpt').send_keys('15256558113') | 27.141431 | 163 | 0.628556 |
acf4659b7ad464867a65ad329e99aedab8b8c84e | 113,135 | py | Python | src/sage/categories/category_with_axiom.py | Blues1998/sage | b5c9cf037cbce672101725f269470135b9b2c5c4 | [
"BSL-1.0"
] | null | null | null | src/sage/categories/category_with_axiom.py | Blues1998/sage | b5c9cf037cbce672101725f269470135b9b2c5c4 | [
"BSL-1.0"
] | null | null | null | src/sage/categories/category_with_axiom.py | Blues1998/sage | b5c9cf037cbce672101725f269470135b9b2c5c4 | [
"BSL-1.0"
] | null | null | null | r"""
Axioms
This documentation covers how to implement axioms and proceeds with an
overview of the implementation of the axiom infrastructure. It assumes
that the reader is familiar with the :ref:`category primer
<sage.categories.primer>`, and in particular its :ref:`section about
axioms <category-primer-axioms>`.
Implementing axioms
===================
Simple case involving a single predefined axiom
-----------------------------------------------
Suppose that one wants to provide code (and documentation, tests, ...)
for the objects of some existing category ``Cs()`` that satisfy some
predefined axiom ``A``.
The first step is to open the hood and check whether there already
exists a class implementing the category ``Cs().A()``. For example,
taking ``Cs=Semigroups`` and the ``Finite`` axiom, there already
exists a class for the category of finite semigroups::
sage: Semigroups().Finite()
Category of finite semigroups
sage: type(Semigroups().Finite())
<class 'sage.categories.finite_semigroups.FiniteSemigroups_with_category'>
In this case, we say that the category of semigroups *implements* the
axiom ``Finite``, and code about finite semigroups should go in the
class :class:`FiniteSemigroups` (or, as usual, in its nested classes
``ParentMethods``, ``ElementMethods``, and so on).
On the other hand, there is no class for the category of infinite
semigroups::
sage: Semigroups().Infinite()
Category of infinite semigroups
sage: type(Semigroups().Infinite())
<class 'sage.categories.category.JoinCategory_with_category'>
This category is indeed just constructed as the intersection of the
categories of semigroups and of infinite sets respectively::
sage: Semigroups().Infinite().super_categories()
[Category of semigroups, Category of infinite sets]
In this case, one needs to create a new class to implement the axiom
``Infinite`` for this category. This boils down to adding a nested
class ``Semigroups.Infinite`` inheriting from :class:`CategoryWithAxiom`.
In the following example, we implement a category ``Cs``, with a
subcategory for the objects satisfying the ``Finite`` axiom defined in
the super category ``Sets`` (we will see later on how to *define* new
axioms)::
sage: from sage.categories.category_with_axiom import CategoryWithAxiom
sage: class Cs(Category):
....: def super_categories(self):
....: return [Sets()]
....: class Finite(CategoryWithAxiom):
....: class ParentMethods:
....: def foo(self):
....: print("I am a method on finite C's")
::
sage: Cs().Finite()
Category of finite cs
sage: Cs().Finite().super_categories()
[Category of finite sets, Category of cs]
sage: Cs().Finite().all_super_categories()
[Category of finite cs, Category of finite sets,
Category of cs, Category of sets, ...]
sage: Cs().Finite().axioms()
frozenset({'Finite'})
Now a parent declared in the category ``Cs().Finite()`` inherits from
all the methods of finite sets and of finite `C`'s, as desired::
sage: P = Parent(category=Cs().Finite())
sage: P.is_finite() # Provided by Sets.Finite.ParentMethods
True
sage: P.foo() # Provided by Cs.Finite.ParentMethods
I am a method on finite C's
.. _category-with-axiom-design:
.. NOTE::
- This follows the same idiom as for
:ref:`sage.categories.covariant_functorial_construction`.
- From an object oriented point of view, any subcategory ``Cs()``
of :class:`Sets` inherits a ``Finite`` method. Usually ``Cs``
could complement this method by overriding it with a method
``Cs.Finite`` which would make a super call to ``Sets.Finite``
and then do extra stuff.
In the above example, ``Cs`` also wants to complement
``Sets.Finite``, though not by doing more stuff, but by
providing it with an additional mixin class containing the code
for finite ``Cs``. To keep the analogy, this mixin class is to
be put in ``Cs.Finite``.
- By defining the axiom ``Finite``, :class:`Sets` fixes the
semantic of ``Cs.Finite()`` for all its subcategories ``Cs``:
namely "the category of ``Cs`` which are finite as sets". Hence,
for example, ``Modules.Free.Finite`` cannot be used to model the
category of free modules of finite rank, even though their
traditional name "finite free modules" might suggest it.
- It may come as a surprise that we can actually use the same name
``Finite`` for the mixin class and for the method defining the
axiom; indeed, by default a class does not have a binding
behavior and would completely override the method. See the
section :ref:`axioms-defining-a-new-axiom` for details and the
rationale behind it.
An alternative would have been to give another name to the mixin
class, like ``FiniteCategory``. However this would have resulted
in more namespace pollution, whereas using ``Finite`` is already
clear, explicit, and easier to remember.
- Under the hood, the category ``Cs().Finite()`` is aware that it
has been constructed from the category ``Cs()`` by adding the
axiom ``Finite``::
sage: Cs().Finite().base_category()
Category of cs
sage: Cs().Finite()._axiom
'Finite'
Over time, the nested class ``Cs.Finite`` may become large and too
cumbersome to keep as a nested subclass of ``Cs``. Or the category with
axiom may have a name of its own in the literature, like *semigroups*
rather than *associative magmas*, or *fields* rather than *commutative
division rings*. In this case, the category with axiom can be put
elsewhere, typically in a separate file, with just a link from
``Cs``::
sage: class Cs(Category):
....: def super_categories(self):
....: return [Sets()]
sage: class FiniteCs(CategoryWithAxiom):
....: class ParentMethods:
....: def foo(self):
....: print("I am a method on finite C's")
sage: Cs.Finite = FiniteCs
sage: Cs().Finite()
Category of finite cs
For a real example, see the code of the class :class:`FiniteGroups` and the
link to it in :class:`Groups`. Note that the link is implemented using
:class:`~sage.misc.lazy_import.LazyImport`; this is highly recommended: it
makes sure that :class:`FiniteGroups` is imported after :class:`Groups` it
depends upon, and makes it explicit that the class :class:`Groups` can be
imported and is fully functional without importing :class:`FiniteGroups`.
.. NOTE::
Some categories with axioms are created upon Sage's startup. In such a
case, one needs to pass the ``at_startup=True`` option to
:class:`~sage.misc.lazy_import.LazyImport`, in order to quiet the warning
about that lazy import being resolved upon startup. See for example
``Sets.Finite``.
This is undoubtedly a code smell. Nevertheless, it is preferable
to stick to lazy imports, first to resolve the import order
properly, and more importantly as a reminder that the category
would be best not constructed upon Sage's startup. This is to spur
developers to reduce the number of parents (and therefore
categories) that are constructed upon startup. Each
``at_startup=True`` that will be removed will be a measure of
progress in this direction.
.. NOTE::
In principle, due to a limitation of
:class:`~sage.misc.lazy_import.LazyImport` with nested classes (see
:trac:`15648`), one should pass the option ``as_name`` to
:class:`~sage.misc.lazy_import.LazyImport`::
Finite = LazyImport('sage.categories.finite_groups', 'FiniteGroups', as_name='Finite')
in order to prevent ``Groups.Finite`` to keep on reimporting
``FiniteGroups``.
Given that passing this option introduces some redundancy and is
error prone, the axiom infrastructure includes a little workaround
which makes the ``as_name`` unnecessary in this case.
Making the category with axiom directly callable
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If desired, a category with axiom can be constructed directly through
its class rather than through its base category::
sage: Semigroups()
Category of semigroups
sage: Semigroups() is Magmas().Associative()
True
sage: FiniteGroups()
Category of finite groups
sage: FiniteGroups() is Groups().Finite()
True
For this notation to work, the class :class:`Semigroups` needs to be
aware of the base category class (here, :class:`Magmas`) and of the
axiom (here, ``Associative``)::
sage: Semigroups._base_category_class_and_axiom
(<class 'sage.categories.magmas.Magmas'>, 'Associative')
sage: Fields._base_category_class_and_axiom
(<class 'sage.categories.division_rings.DivisionRings'>, 'Commutative')
sage: FiniteGroups._base_category_class_and_axiom
(<class 'sage.categories.groups.Groups'>, 'Finite')
sage: FiniteDimensionalAlgebrasWithBasis._base_category_class_and_axiom
(<class 'sage.categories.algebras_with_basis.AlgebrasWithBasis'>, 'FiniteDimensional')
In our example, the attribute ``_base_category_class_and_axiom`` was
set upon calling ``Cs().Finite()``, which makes the notation seemingly
work::
sage: FiniteCs()
Category of finite cs
sage: FiniteCs._base_category_class_and_axiom
(<class '__main__.Cs'>, 'Finite')
sage: FiniteCs._base_category_class_and_axiom_origin
'set by __classget__'
But calling ``FiniteCs()`` right after defining the class would have
failed (try it!). In general, one needs to set the attribute explicitly::
sage: class FiniteCs(CategoryWithAxiom):
....: _base_category_class_and_axiom = (Cs, 'Finite')
....: class ParentMethods:
....: def foo(self):
....: print("I am a method on finite C's")
Having to set explicitly this link back from ``FiniteCs`` to ``Cs``
introduces redundancy in the code. It would therefore be desirable to
have the infrastructure set the link automatically instead (a
difficulty is to achieve this while supporting lazy imported
categories with axiom).
As a first step, the link is set automatically upon accessing the
class from the base category class::
sage: Algebras.WithBasis._base_category_class_and_axiom
(<class 'sage.categories.algebras.Algebras'>, 'WithBasis')
sage: Algebras.WithBasis._base_category_class_and_axiom_origin
'set by __classget__'
Hence, for whatever this notation is worth, one can currently do::
sage: Algebras.WithBasis(QQ)
Category of algebras with basis over Rational Field
We don't recommend using syntax like ``Algebras.WithBasis(QQ)``, as it
may eventually be deprecated.
As a second step, Sage tries some obvious heuristics to deduce the link
from the name of the category with axiom (see
:func:`base_category_class_and_axiom` for the details). This typically
covers the following examples::
sage: FiniteCoxeterGroups()
Category of finite coxeter groups
sage: FiniteCoxeterGroups() is CoxeterGroups().Finite()
True
sage: FiniteCoxeterGroups._base_category_class_and_axiom_origin
'deduced by base_category_class_and_axiom'
sage: FiniteDimensionalAlgebrasWithBasis(QQ)
Category of finite dimensional algebras with basis over Rational Field
sage: FiniteDimensionalAlgebrasWithBasis(QQ) is Algebras(QQ).FiniteDimensional().WithBasis()
True
If the heuristic succeeds, the result is guaranteed to be correct. If
it fails, typically because the category has a name of its own like
:class:`Fields`, the attribute ``_base_category_class_and_axiom``
should be set explicitly. For more examples, see the code of the
classes :class:`Semigroups` or :class:`Fields`.
.. NOTE::
When printing out a category with axiom, the heuristic determines
whether a category has a name of its own by checking out how
``_base_category_class_and_axiom`` was set::
sage: Fields._base_category_class_and_axiom_origin
'hardcoded'
See :meth:`CategoryWithAxiom._without_axioms`,
:meth:`CategoryWithAxiom._repr_object_names_static`.
In our running example ``FiniteCs``, Sage failed to deduce
automatically the base category class and axiom because the class
``Cs`` is not in the standard location ``sage.categories.cs``.
.. TOPIC:: Design discussion
The above deduction, based on names, is undoubtedly inelegant. But
it's safe (either the result is guaranteed to be correct, or an
error is raised), it saves on some redundant information, and it
is only used for the simple shorthands like ``FiniteGroups()`` for
``Groups().Finite()``. Finally, most if not all of these
shorthands are likely to eventually disappear (see :trac:`15741`
and the :ref:`related discussion in the primer
<category-primer-axioms-single-entry-point>`).
.. _axioms-defining-a-new-axiom:
Defining a new axiom
--------------------
We describe now how to define a new axiom. The first step is to figure
out the largest category where the axiom makes sense. For example
``Sets`` for ``Finite``, ``Magmas`` for ``Associative``, or
``Modules`` for ``FiniteDimensional``. Here we define the axiom
``Green`` for the category ``Cs`` and its subcategories::
sage: from sage.categories.category_with_axiom import CategoryWithAxiom
sage: class Cs(Category):
....: def super_categories(self):
....: return [Sets()]
....: class SubcategoryMethods:
....: def Green(self):
....: '<documentation of the axiom Green>'
....: return self._with_axiom("Green")
....: class Green(CategoryWithAxiom):
....: class ParentMethods:
....: def foo(self):
....: print("I am a method on green C's")
With the current implementation, the name of the axiom must also be
added to a global container::
sage: all_axioms = sage.categories.category_with_axiom.all_axioms
sage: all_axioms += ("Green",)
We can now use the axiom as usual::
sage: Cs().Green()
Category of green cs
sage: P = Parent(category=Cs().Green())
sage: P.foo()
I am a method on green C's
Compared with our first example, the only newcomer is the method
``.Green()`` that can be used by any subcategory ``Ds()`` of ``Cs()``
to add the axiom ``Green``. Note that the expression ``Ds().Green``
always evaluates to this method, regardless of whether ``Ds`` has a
nested class ``Ds.Green`` or not (an implementation detail)::
sage: Cs().Green
<bound method Cs_with_category.Green of Category of cs>
Thanks to this feature (implemented in :meth:`CategoryWithAxiom.__classget__`),
the user is systematically referred to the documentation of this
method when doing introspection on ``Ds().Green``::
sage: C = Cs()
sage: C.Green? # not tested
sage: Cs().Green.__doc__
'<documentation of the axiom Green>'
It is therefore the natural spot for the documentation of the axiom.
.. NOTE::
The presence of the nested class ``Green`` in ``Cs`` is currently
mandatory even if it is empty.
.. TODO::
Specify whether or not one should systematically use
@cached_method in the definition of the axiom. And make sure all
the definition of axioms in Sage are consistent in this respect!
.. TODO::
We could possibly define an @axiom decorator? This could hide two
little implementation details: whether or not to make the method a
cached method, and the call to _with_axiom(...) under the hood. It
could do possibly do some more magic. The gain is not obvious though.
.. NOTE::
``all_axioms`` is only used marginally, for sanity checks and when
trying to derive automatically the base category class. The order
of the axioms in this tuple also controls the order in which they
appear when printing out categories with axioms (see
:meth:`CategoryWithAxiom._repr_object_names_static`).
During a Sage session, new axioms should only be added at the *end*
of ``all_axioms``, as above, so as to not break the cache of
:func:`axioms_rank`. Otherwise, they can be inserted statically
anywhere in the tuple. For axioms defined within the Sage library,
the name is best inserted by editing directly the definition of
``all_axioms`` in :mod:`sage.categories.category_with_axiom`.
.. TOPIC:: Design note
Let us state again that, unlike what the existence of
``all_axioms`` might suggest, the definition of an axiom is local
to a category and its subcategories. In particular, two
independent categories ``Cs()`` and ``Ds()`` can very well define
axioms with the same name and different semantics. As long as the
two hierarchies of subcategories don't intersect, this is not a
problem. And if they do intersect naturally (that is if one is
likely to create a parent belonging to both categories), this
probably means that the categories ``Cs`` and ``Ds`` are about
related enough areas of mathematics that one should clear the
ambiguity by having either the same semantic or different names.
This caveat is no different from that of name clashes in hierarchy
of classes involving multiple inheritance.
.. TODO::
Explore ways to get rid of this global ``all_axioms`` tuple,
and/or have automatic registration there, and/or having a
register_axiom(...) method.
Special case: defining an axiom depending on several categories
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In some cases, the largest category where the axiom makes sense is the
intersection of two categories. This is typically the case for axioms
specifying compatibility conditions between two otherwise unrelated
operations, like ``Distributive`` which specifies a compatibility
between `*` and `+`. Ideally, we would want the ``Distributive`` axiom
to be defined by::
sage: Magmas() & AdditiveMagmas()
Join of Category of magmas and Category of additive magmas
The current infrastructure does not support this perfectly: indeed,
defining an axiom for a category `C` requires `C` to have a class of
its own; hence a :class:`~.category.JoinCategory` as above won't do;
we need to implement a new class like
:class:`~.magmas_and_additive_magmas.MagmasAndAdditiveMagmas`;
furthermore, we cannot yet model the fact that ``MagmasAndAdditiveMagmas()``
*is* the intersection of ``Magmas()`` and ``AdditiveMagmas()`` rather than a
mere subcategory::
sage: from sage.categories.magmas_and_additive_magmas import MagmasAndAdditiveMagmas
sage: Magmas() & AdditiveMagmas() is MagmasAndAdditiveMagmas()
False
sage: Magmas() & AdditiveMagmas() # todo: not implemented
Category of magmas and additive magmas
Still, there is a workaround to get the natural notations::
sage: (Magmas() & AdditiveMagmas()).Distributive()
Category of distributive magmas and additive magmas
sage: (Monoids() & CommutativeAdditiveGroups()).Distributive()
Category of rings
The trick is to define ``Distributive`` as usual in
:class:`~.magmas_and_additive_magmas.MagmasAndAdditiveMagmas`, and to
add a method :meth:`Magmas.SubcategoryMethods.Distributive` which
checks that ``self`` is a subcategory of both ``Magmas()`` and
``AdditiveMagmas()``, complains if not, and otherwise takes the
intersection of ``self`` with ``MagmasAndAdditiveMagmas()`` before
calling ``Distributive``.
The downsides of this workaround are:
- Creation of an otherwise empty class
:class:`~.magmas_and_additive_magmas.MagmasAndAdditiveMagmas`.
- Pollution of the namespace of ``Magmas()`` (and subcategories like
``Groups()``) with a method that is irrelevant (but safely complains
if called).
- ``C._with_axiom('Distributive')`` is not strictly equivalent to
``C.Distributive()``, which can be unpleasantly surprising::
sage: (Monoids() & CommutativeAdditiveGroups()).Distributive()
Category of rings
sage: (Monoids() & CommutativeAdditiveGroups())._with_axiom('Distributive')
Join of Category of monoids and Category of commutative additive groups
.. TODO::
Other categories that would be better implemented via an axiom
depending on a join category include:
- :class:`Algebras`: defining an associative unital algebra as a
ring and a module satisfying the suitable compatibility axiom
between inner multiplication and multiplication by scalars
(bilinearity). Of course this should be implemented at the level
of :class:`~.magmatic_algebras.MagmaticAlgebras`, if not higher.
- :class:`Bialgebras`: defining an bialgebra as an algebra and
coalgebra where the coproduct is a morphism for the product.
- :class:`Bimodules`: defining a bimodule as a left and right
module where the two actions commute.
.. TODO::
- Design and implement an idiom for the definition of an axiom by a join
category.
- Or support more advanced joins, through some hook or registration
process to specify that a given category *is* the intersection of two
(or more) categories.
- Or at least improve the above workaround to avoid the last issue; this
possibly could be achieved using a class ``Magmas.Distributive`` with a
bit of ``__classcall__`` magic.
Handling multiple axioms, arborescence structure of the code
------------------------------------------------------------
Prelude
^^^^^^^
Let us consider the category of magmas, together with two of its
axioms, namely ``Associative`` and ``Unital``. An associative magma is
a *semigroup* and a unital semigroup is a *monoid*. We have also seen
that axioms commute::
sage: Magmas().Unital()
Category of unital magmas
sage: Magmas().Associative()
Category of semigroups
sage: Magmas().Associative().Unital()
Category of monoids
sage: Magmas().Unital().Associative()
Category of monoids
At the level of the classes implementing these categories, the
following comes as a general naturalization of the previous section::
sage: Magmas.Unital
<class 'sage.categories.magmas.Magmas.Unital'>
sage: Magmas.Associative
<class 'sage.categories.semigroups.Semigroups'>
sage: Magmas.Associative.Unital
<class 'sage.categories.monoids.Monoids'>
However, the following may look suspicious at first::
sage: Magmas.Unital.Associative
Traceback (most recent call last):
...
AttributeError: type object 'Magmas.Unital' has no attribute 'Associative'
The purpose of this section is to explain the design of the code
layout and the rationale for this mismatch.
Abstract model
^^^^^^^^^^^^^^
As we have seen in the :ref:`Primer <category-primer-axioms-explosion>`,
the objects of a category ``Cs()`` can usually satisfy, or not, many
different axioms. Out of all combinations of axioms, only a small
number are relevant in practice, in the sense that we actually want to
provide features for the objects satisfying these axioms.
Therefore, in the context of the category class ``Cs``, we want to
provide the system with a collection `(D_S)_{S\in \mathcal S}` where
each `S` is a subset of the axioms and the corresponding `D_S` is a
class for the subcategory of the objects of ``Cs()`` satisfying the
axioms in `S`. For example, if ``Cs()`` is the category of magmas, the
pairs `(S, D_S)` would include::
{Associative} : Semigroups
{Associative, Unital} : Monoids
{Associative, Unital, Inverse}: Groups
{Associative, Commutative} : Commutative Semigroups
{Unital, Inverse} : Loops
Then, given a subset `T` of axioms, we want the system to be able to
select automatically the relevant classes
`(D_S)_{S\in \mathcal S, S\subset T}`,
and build from them a category for the objects of ``Cs`` satisfying
the axioms in `T`, together with its hierarchy of super categories. If
`T` is in the indexing set `\mathcal S`, then the class of the
resulting category is directly `D_T`::
sage: C = Magmas().Unital().Inverse().Associative(); C
Category of groups
sage: type(C)
<class 'sage.categories.groups.Groups_with_category'>
Otherwise, we get a join category::
sage: C = Magmas().Infinite().Unital().Associative(); C
Category of infinite monoids
sage: type(C)
<class 'sage.categories.category.JoinCategory_with_category'>
sage: C.super_categories()
[Category of monoids, Category of infinite sets]
Concrete model as an arborescence of nested classes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We further want the construction to be efficient and amenable to
laziness. This led us to the following design decision: the collection
`(D_S)_{S\in \mathcal S}` of classes should be structured as an
arborescence (or equivalently a *rooted forest*). The root is ``Cs``,
corresponding to `S=\emptyset`. Any other class `D_S` should be the
child of a single class `D_{S'}` where `S'` is obtained from `S` by
removing a single axiom `A`. Of course, `D_{S'}` and `A` are
respectively the base category class and axiom of the category with
axiom `D_S` that we have met in the first section.
At this point, we urge the reader to explore the code of
:class:`Magmas` and
:class:`~.distributive_magmas_and_additive_magmas.DistributiveMagmasAndAdditiveMagmas`
and see how the arborescence structure on the categories with axioms
is reflected by the nesting of category classes.
Discussion of the design
^^^^^^^^^^^^^^^^^^^^^^^^
Performance
~~~~~~~~~~~
Thanks to the arborescence structure on subsets of axioms,
constructing the hierarchy of categories and computing intersections
can be made efficient with, roughly speaking, a linear/quadratic
complexity in the size of the involved category hierarchy multiplied
by the number of axioms (see Section :ref:`axioms-algorithmic`). This
is to be put in perspective with the manipulation of arbitrary
collections of subsets (aka boolean functions) which can easily raise
NP-hard problems.
Furthermore, thanks to its locality, the algorithms can be made
suitably lazy: in particular, only the involved category classes need
to be imported.
Flexibility
~~~~~~~~~~~
This design also brings in quite some flexibility, with the
possibility to support features such as defining new axioms depending
on other axioms and deduction rules. See below.
Asymmetry
~~~~~~~~~
As we have seen at the beginning of this section, this design
introduces an asymmetry. It's not so bad in practice, since in most
practical cases, we want to work incrementally. It's for example more
natural to describe :class:`FiniteFields` as :class:`Fields` with the
axiom ``Finite`` rather than :class:`Magmas` and
:class:`AdditiveMagmas` with all (or at least sufficiently many) of
the following axioms::
sage: sorted(Fields().axioms())
['AdditiveAssociative', 'AdditiveCommutative', 'AdditiveInverse',
'AdditiveUnital', 'Associative', 'Commutative', 'Distributive',
'Division', 'NoZeroDivisors', 'Unital']
The main limitation is that the infrastructure currently imposes to be
incremental by steps of a single axiom.
In practice, among the roughly 60 categories with axioms that are
currently implemented in Sage, most admitted a (rather) natural choice
of a base category and single axiom to add. For example, one usually
thinks more naturally of a monoid as a semigroup which is unital
rather than as a unital magma which is associative. Modeling this
asymmetry in the code actually brings a bonus: it is used for printing
out categories in a (heuristically) mathematician-friendly way::
sage: Magmas().Commutative().Associative()
Category of commutative semigroups
Only in a few cases is a choice made that feels mathematically
arbitrary. This is essentially in the chain of nested classes
:class:`.distributive_magmas_and_additive_magmas.DistributiveMagmasAndAdditiveMagmas.AdditiveAssociative.AdditiveCommutative.AdditiveUnital.Associative`.
Placeholder classes
~~~~~~~~~~~~~~~~~~~
Given that we can only add a single axiom at a time when implementing
a :class:`CategoryWithAxiom`, we need to create a few category classes
that are just placeholders. For the worst example, see the chain of
nested classes
:class:`.distributive_magmas_and_additive_magmas.DistributiveMagmasAndAdditiveMagmas.AdditiveAssociative.AdditiveCommutative.AdditiveUnital.Associative`.
This is suboptimal, but fits within the scope of the axiom
infrastructure which is to reduce a potentially exponential number of
placeholder category classes to just a couple.
Note also that, in the above example, it's likely that some of the
intermediate classes will grow to non placeholder ones, as people will
explore more weaker variants of rings.
Mismatch between the arborescence of nested classes and the hierarchy of categories
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The fact that the hierarchy relation between categories is not
reflected directly as a relation between the classes may sound
suspicious at first! However, as mentioned in the primer, this is
actually a big selling point of the axioms infrastructure: by
calculating automatically the hierarchy relation between categories
with axioms one avoids the nightmare of maintaining it by hand.
Instead, only a rather minimal number of links needs to be maintainted
in the code (one per category with axiom).
Besides, with the flexibility introduced by runtime deduction rules
(see below), the hierarchy of categories may depend on the parameters
of the categories and not just their class. So it's fine to make it
clear from the onset that the two relations do not match.
Evolutivity
~~~~~~~~~~~
At this point, the arborescence structure has to be hardcoded by hand
with the annoyances we have seen. This does not preclude, in a future
iteration, to design and implement some idiom for categories with
axioms that adds several axioms at once to a base category; maybe some
variation around::
class DistributiveMagmasAndAdditiveMagmas:
...
@category_with_axiom(
AdditiveAssociative,
AdditiveCommutative,
AdditiveUnital,
AdditiveInverse,
Associative)
def _(): return LazyImport('sage.categories.rngs', 'Rngs', at_startup=True)
or::
register_axiom_category(DistributiveMagmasAndAdditiveMagmas,
{AdditiveAssociative,
AdditiveCommutative,
AdditiveUnital,
AdditiveInverse,
Associative},
'sage.categories.rngs', 'Rngs', at_startup=True)
The infrastructure would then be in charge of building the appropriate
arborescence under the hood. Or rely on some database (see discussion
on :trac:`10963`, in particular at the end of comment 332).
Axioms defined upon other axioms
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Sometimes an axiom can only be defined when some other axiom
holds. For example, the axiom ``NoZeroDivisors`` only makes sense if
there is a zero, that is if the axiom ``AdditiveUnital`` holds. Hence,
for the category
:class:`~.magmas_and_additive_magmas.MagmasAndAdditiveMagmas`, we
consider in the abstract model only those subsets of axioms where the
presence of ``NoZeroDivisors`` implies that of ``AdditiveUnital``. We
also want the axiom to be only available if meaningful::
sage: Rings().NoZeroDivisors()
Category of domains
sage: Rings().Commutative().NoZeroDivisors()
Category of integral domains
sage: Semirings().NoZeroDivisors()
Traceback (most recent call last):
...
AttributeError: 'Semirings_with_category' object has no attribute 'NoZeroDivisors'
Concretely, this is to be implemented by defining the new axiom in the
(``SubcategoryMethods`` nested class of the) appropriate category with
axiom. For example the axiom ``NoZeroDivisors`` would be naturally
defined in
:class:`.magmas_and_additive_magmas.MagmasAndAdditiveMagmas.Distributive.AdditiveUnital`.
.. NOTE::
The axiom ``NoZeroDivisors`` is currently defined in
:class:`Rings`, by simple lack of need for the feature; it should
be lifted up as soon as relevant, that is when some code will be
available for parents with no zero divisors that are not
necessarily rings.
.. _axioms-deduction-rules:
Deduction rules
^^^^^^^^^^^^^^^
A similar situation is when an axiom ``A`` of a category ``Cs``
implies some other axiom ``B``, with the same consequence as above on
the subsets of axioms appearing in the abstract model. For example, a
division ring necessarily has no zero divisors::
sage: 'NoZeroDivisors' in Rings().Division().axioms()
True
sage: 'NoZeroDivisors' in Rings().axioms()
False
This deduction rule is implemented by the method
:meth:`Rings.Division.extra_super_categories`::
sage: Rings().Division().extra_super_categories()
(Category of domains,)
In general, this is to be implemented by a method
``Cs.A.extra_super_categories`` returning a tuple ``(Cs().B(),)``, or
preferably ``(Ds().B(),)`` where ``Ds`` is the category defining the
axiom ``B``.
This follows the same idiom as for deduction rules about functorial
constructions (see :meth:`.covariant_functorial_construction.CovariantConstructionCategory.extra_super_categories`).
For example, the fact that a Cartesian product of associative magmas
(i.e. of semigroups) is an associative magma is implemented in
:meth:`Semigroups.CartesianProducts.extra_super_categories`::
sage: Magmas().Associative()
Category of semigroups
sage: Magmas().Associative().CartesianProducts().extra_super_categories()
[Category of semigroups]
Similarly, the fact that the algebra of a commutative magma is
commutative is implemented in
:meth:`Magmas.Commutative.Algebras.extra_super_categories`::
sage: Magmas().Commutative().Algebras(QQ).extra_super_categories()
[Category of commutative magmas]
.. WARNING::
In some situations this idiom is inapplicable as it would require
to implement two classes for the same category. This is the
purpose of the next section.
Special case
~~~~~~~~~~~~
In the previous examples, the deduction rule only had an influence on
the super categories of the category with axiom being constructed. For
example, when constructing ``Rings().Division()``, the rule
:meth:`Rings.Division.extra_super_categories` simply adds
``Rings().NoZeroDivisors()`` as a super category thereof.
In some situations this idiom is inapplicable because a class for the
category with axiom under construction already exists elsewhere. Take
for example Wedderburn's theorem: any finite division ring is
commutative, i.e. is a finite field. In other words,
``DivisionRings().Finite()`` *coincides* with ``Fields().Finite()``::
sage: DivisionRings().Finite()
Category of finite enumerated fields
sage: DivisionRings().Finite() is Fields().Finite()
True
Therefore we cannot create a class ``DivisionRings.Finite`` to hold
the desired ``extra_super_categories`` method, because there is
already a class for this category with axiom, namely
``Fields.Finite``.
A natural idiom would be to have ``DivisionRings.Finite`` be a link to
``Fields.Finite`` (locally introducing an undirected cycle in the
arborescence of nested classes). It would be a bit tricky to implement
though, since one would need to detect, upon constructing
``DivisionRings().Finite()``, that ``DivisionRings.Finite`` is
actually ``Fields.Finite``, in order to construct appropriately
``Fields().Finite()``; and reciprocally, upon computing the super
categories of ``Fields().Finite()``, to not try to add
``DivisionRings().Finite()`` as a super category.
Instead the current idiom is to have a method
``DivisionRings.Finite_extra_super_categories`` which mimicks the
behavior of the would-be
``DivisionRings.Finite.extra_super_categories``::
sage: DivisionRings().Finite_extra_super_categories()
(Category of commutative magmas,)
This idiom is admittedly rudimentary, but consistent with how
mathematical facts specifying non trivial inclusion relations between
categories are implemented elsewhere in the various
``extra_super_categories`` methods of axiom categories and covariant
functorial constructions. Besides, it gives a natural spot (the
docstring of the method) to document and test the modeling of the
mathematical fact. Finally, Wedderburn's theorem is arguably a theorem
about division rings (in the context of division rings, finiteness
implies commutativity) and therefore lives naturally in
:class:`DivisionRings`.
An alternative would be to implement the category of finite division
rings (i.e. finite fields) in a class ``DivisionRings.Finite`` rather
than ``Fields.Finite``::
sage: from sage.categories.category_with_axiom import CategoryWithAxiom
sage: class MyDivisionRings(Category):
....: def super_categories(self):
....: return [Rings()]
sage: class MyFields(Category):
....: def super_categories(self):
....: return [MyDivisionRings()]
sage: class MyFiniteFields(CategoryWithAxiom):
....: _base_category_class_and_axiom = (MyDivisionRings, "Finite")
....: def extra_super_categories(self): # Wedderburn's theorem
....: return [MyFields()]
sage: MyDivisionRings.Finite = MyFiniteFields
sage: MyDivisionRings().Finite()
Category of my finite fields
sage: MyFields().Finite() is MyDivisionRings().Finite()
True
In general, if several categories ``C1s()``, ``C2s()``, ... are mapped to
the same category when applying some axiom ``A`` (that is ``C1s().A()
== C2s().A() == ...``), then one should be careful to implement this
category in a single class ``Cs.A``, and set up methods
``extra_super_categories`` or ``A_extra_super_categories`` methods as
appropriate. Each such method should return something like
``[C2s()]`` and not ``[C2s().A()]`` for the latter would likely lead
to an infinite recursion.
.. TOPIC:: Design discussion
Supporting similar deduction rules will be an important feature in
the future, with quite a few occurrences already implemented in
upcoming tickets. For the time being though there is a single
occurrence of this idiom outside of the tests. So this would be an
easy thing to refactor after :trac:`10963` if a better idiom is
found.
Larger synthetic examples
~~~~~~~~~~~~~~~~~~~~~~~~~
We now consider some larger synthetic examples to check that the
machinery works as expected. Let us start with a category defining a
bunch of axioms, using :func:`axiom` for conciseness (don't do it for
real axioms; they deserve a full documentation!)::
sage: from sage.categories.category_singleton import Category_singleton
sage: from sage.categories.category_with_axiom import axiom
sage: import sage.categories.category_with_axiom
sage: all_axioms = sage.categories.category_with_axiom.all_axioms
sage: all_axioms += ("B","C","D","E","F")
sage: class As(Category_singleton):
....: def super_categories(self):
....: return [Objects()]
....:
....: class SubcategoryMethods:
....: B = axiom("B")
....: C = axiom("C")
....: D = axiom("D")
....: E = axiom("E")
....: F = axiom("F")
....:
....: class B(CategoryWithAxiom):
....: pass
....: class C(CategoryWithAxiom):
....: pass
....: class D(CategoryWithAxiom):
....: pass
....: class E(CategoryWithAxiom):
....: pass
....: class F(CategoryWithAxiom):
....: pass
Now we construct a subcategory where, by some theorem of William,
axioms ``B`` and ``C`` together are equivalent to ``E`` and ``F``
together::
sage: class A1s(Category_singleton):
....: def super_categories(self):
....: return [As()]
....:
....: class B(CategoryWithAxiom):
....: def C_extra_super_categories(self):
....: return [As().E(), As().F()]
....:
....: class E(CategoryWithAxiom):
....: def F_extra_super_categories(self):
....: return [As().B(), As().C()]
sage: A1s().B().C()
Category of e f a1s
The axioms ``B`` and ``C`` do not show up in the name of the obtained
category because, for concision, the printing uses some heuristics to
not show axioms that are implied by others. But they are satisfied::
sage: sorted(A1s().B().C().axioms())
['B', 'C', 'E', 'F']
Note also that this is a join category::
sage: type(A1s().B().C())
<class 'sage.categories.category.JoinCategory_with_category'>
sage: A1s().B().C().super_categories()
[Category of e a1s,
Category of f as,
Category of b a1s,
Category of c as]
As desired, William's theorem holds::
sage: A1s().B().C() is A1s().E().F()
True
and propagates appropriately to subcategories::
sage: C = A1s().E().F().D().B().C()
sage: C is A1s().B().C().E().F().D() # commutativity
True
sage: C is A1s().E().F().E().F().D() # William's theorem
True
sage: C is A1s().E().E().F().F().D() # commutativity
True
sage: C is A1s().E().F().D() # idempotency
True
sage: C is A1s().D().E().F()
True
In this quick variant, we actually implement the category of ``b c
a2s``, and choose to do so in ``A2s.B.C``::
sage: class A2s(Category_singleton):
....: def super_categories(self):
....: return [As()]
....:
....: class B(CategoryWithAxiom):
....: class C(CategoryWithAxiom):
....: def extra_super_categories(self):
....: return [As().E(), As().F()]
....:
....: class E(CategoryWithAxiom):
....: def F_extra_super_categories(self):
....: return [As().B(), As().C()]
sage: A2s().B().C()
Category of e f a2s
sage: sorted(A2s().B().C().axioms())
['B', 'C', 'E', 'F']
sage: type(A2s().B().C())
<class '__main__.A2s.B.C_with_category'>
As desired, William's theorem and its consequences hold::
sage: A2s().B().C() is A2s().E().F()
True
sage: C = A2s().E().F().D().B().C()
sage: C is A2s().B().C().E().F().D() # commutativity
True
sage: C is A2s().E().F().E().F().D() # William's theorem
True
sage: C is A2s().E().E().F().F().D() # commutativity
True
sage: C is A2s().E().F().D() # idempotency
True
sage: C is A2s().D().E().F()
True
Finally, we "accidentally" implement the category of ``b c a1s``, both
in ``A3s.B.C`` and ``A3s.E.F``::
sage: class A3s(Category_singleton):
....: def super_categories(self):
....: return [As()]
....:
....: class B(CategoryWithAxiom):
....: class C(CategoryWithAxiom):
....: def extra_super_categories(self):
....: return [As().E(), As().F()]
....:
....: class E(CategoryWithAxiom):
....: class F(CategoryWithAxiom):
....: def extra_super_categories(self):
....: return [As().B(), As().C()]
We can still construct, say::
sage: A3s().B()
Category of b a3s
sage: A3s().C()
Category of c a3s
However,
::
sage: A3s().B().C() # not tested
runs into an infinite recursion loop, as ``A3s().B().C()`` wants to
have ``A3s().E().F()`` as super category and reciprocally.
.. TODO::
The above example violates the specifications (a category should
be modelled by at most one class), so it's appropriate that it
fails. Yet, the error message could be usefully complemented by
some hint at what the source of the problem is (a category
implemented in two distinct classes). Leaving a large enough piece
of the backtrace would be useful though, so that one can explore
where the issue comes from (e.g. with post mortem debugging).
Specifications
==============
After fixing some vocabulary, we summarize here some specifications
about categories and axioms.
The lattice of constructible categories
---------------------------------------
A mathematical category `C` is *implemented* if there is a class in
Sage modelling it; it is *constructible* if it is either implemented,
or is the intersection of *implemented* categories; in the latter case
it is modelled by a :class:`~.category.JoinCategory`. The comparison of two
constructible categories with the :meth:`Category.is_subcategory`
method is supposed to model the comparison of the corresponding
mathematical categories for inclusion of the objects (see
:ref:`category-primer-subcategory` for details). For example::
sage: Fields().is_subcategory(Rings())
True
However this modelling may be incomplete. It can happen that a
mathematical fact implying that a category `A` is a subcategory of a
category `B` is not implemented. Still, the comparison should endow
the set of constructible categories with a poset structure and in fact
a lattice structure.
In this lattice, the join of two categories (:meth:`Category.join`) is
supposed to model their intersection. Given that we compare categories
for inclusion, it would be more natural to call this operation the
*meet*; blames go to me (Nicolas) for originally comparing categories
by *amount of structure* rather than by *inclusion*. In practice, the
join of two categories may be a strict super category of their
intersection; first because this intersection might not be
constructible; second because Sage might miss some mathematical
information to recover the smallest constructible super category of
the intersection.
Axioms
------
We say that an axiom ``A`` is *defined by* a category ``Cs()`` if
``Cs`` defines an appropriate method ``Cs.SubcategoryMethods.A``, with
the semantic of the axiom specified in the documentation; for any
subcategory ``Ds()``, ``Ds().A()`` models the subcategory of the
objects of ``Ds()`` satisfying ``A``. In this case, we say that the
axiom ``A`` is *defined for* the category ``Ds()``. Furthermore,
``Ds`` *implements the axiom* ``A`` if ``Ds`` has a category with
axiom as nested class ``Ds.A``. The category ``Ds()`` *satisfies* the
axiom if ``Ds()`` is a subcategory of ``Cs().A()`` (meaning that all
the objects of ``Ds()`` are known to satisfy the axiom ``A``).
A digression on the structure of fibers when adding an axiom
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Consider the application `\phi_A` which maps a category to its
category of objects satisfying `A`. Equivalently, `\phi_A` is
computing the intersection with the defining category with axiom of
`A`. It follows immediately from the latter that `\phi_A` is a
regressive endomorphism of the lattice of categories. It restricts
to a regressive endomorphism ``Cs() |-> Cs().A()``
on the lattice of constructible categories.
This endomorphism may have non trivial fibers, as in our favorite
example: ``DivisionRings()`` and ``Fields()`` are in the same fiber
for the axiom ``Finite``::
sage: DivisionRings().Finite() is Fields().Finite()
True
Consider the intersection `S` of such a fiber of `\phi_A` with the
upper set `I_A` of categories that do not satisfy ``A``. The fiber
itself is a sublattice. However `I_A` is not guaranteed to be stable
under intersection (though exceptions should be rare). Therefore,
there is a priori no guarantee that `S` would be stable under
intersection. Also it's presumably finite, in fact small, but this is
not guaranteed either.
Specifications
--------------
- Any constructible category ``C`` should admit a finite number of
larger constructible categories.
- The methods ``super_categories``, ``extra_super_categories``, and
friends should always return strict supercategories.
For example, to specify that a finite division ring is a finite
field, ``DivisionRings.Finite_extra_super_categories`` should not
return ``Fields().Finite()``! It could possibly return ``Fields()``;
but it's preferable to return the largest category that contains the
relevant information, in this case ``Magmas().Commutative()``, and
to let the infrastructure apply the derivations.
- The base category of a :class:`CategoryWithAxiom` should be an
implemented category (i.e. not a
:class:`~.category.JoinCategory`). This is checked by
:meth:`CategoryWithAxiom._test_category_with_axiom`.
- Arborescent structure: Let ``Cs()`` be a category, and `S` be some
set of axioms defined in some super categories of ``Cs()`` but not
satisfied by ``Cs()``. Suppose we want to provide a category with
axiom for the elements of ``Cs()`` satisfying the axioms in
`S`. Then, there should be a single enumeration ``A1, A2, ..., Ak``
without repetition of axioms in `S` such that
``Cs.A1.A2....Ak`` is an implemented category.
Furthermore, every intermediate step
``Cs.A1.A2....Ai`` with `i\leq k` should be a category with axiom
having ``Ai`` as axiom and ``Cs.A1.A2....Ai-1`` as base category
class; this base category class should not satisfy ``Ai``. In
particular, when some axioms of `S` can be deduced from previous
ones by deduction rules, they should not appear in the enumeration
``A1, A2, ..., Ak``.
- In particular, if ``Cs()`` is a category that satisfies some axiom
``A`` (e.g. from one of its super categories), then it should not
implement that axiom. For example, a category class ``Cs`` can never
have a nested class ``Cs.A.A``. Similarly, applying the
specification recursively, a category satisfying ``A`` cannot have a
nested class ``Cs.A1.A2.A3.A`` where ``A1``, ``A2``, ``A3`` are
axioms.
- A category can only implement an axiom if this axiom is defined by
some super category. The code has not been systematically checked to
support having two super categories defining the same axiom (which
should of course have the same semantic). You are welcome to try, at
your own risk. :-)
- When a category defines an axiom or functorial construction ``A``,
this fixes the semantic of ``A`` for all the subcategories. In
particular, if two categories define ``A``, then these categories
should be independent, and either the semantic of ``A`` should be
the same, or there should be no natural intersection between the two
hierarchies of subcategories.
- Any super category of a
:class:`~.category.CategoryWithParameters` should either be a
:class:`~.category.CategoryWithParameters` or a
:class:`Category_singleton`.
- A :class:`CategoryWithAxiom` having a
:class:`~sage.categories.category_singleton.Category_singleton` as base
category should be a :class:`CategoryWithAxiom_singleton`. This is handled
automatically by :meth:`CategoryWithAxiom.__init__` and checked in
:meth:`CategoryWithAxiom._test_category_with_axiom`.
- A :class:`CategoryWithAxiom` having a
:class:`Category_over_base_ring` as base category should be a
:class:`Category_over_base_ring`. This currently has to be handled
by hand, using :class:`CategoryWithAxiom_over_base_ring`. This is
checked in :meth:`CategoryWithAxiom._test_category_with_axiom`.
.. TODO::
The following specifications would be desirable but are not yet
implemented:
- A functorial construction category (Graded, CartesianProducts,
...) having a :class:`Category_singleton` as base category
should be a :class:`CategoryWithAxiom_singleton`.
Nothing difficult to implement, but this will need to rework the
current "no subclass of a concrete class" assertion test of
:meth:`Category_singleton.__classcall__`.
- Similarly, a covariant functorial construction category having a
:class:`Category_over_base_ring` as base category should be a
:class:`Category_over_base_ring`.
The following specification might be desirable, or not:
- A join category involving a :class:`Category_over_base_ring`
should be a :class:`Category_over_base_ring`. In the mean
time, a ``base_ring`` method is automatically provided for most
of those by :meth:`Modules.SubcategoryMethods.base_ring`.
Design goals
============
As pointed out in the primer, the main design goal of the axioms
infrastructure is to subdue the potential combinatorial explosion of
the category hierarchy by letting the developer focus on implementing
a few bookshelves for which there is actual code or mathematical
information, and let Sage *compose dynamically and lazily* these
building blocks to construct the minimal hierarchy of classes needed
for the computation at hand. This allows for the infrastructure to
scale smoothly as bookshelves are added, extended, or reorganized.
Other design goals include:
- Flexibility in the code layout: the category of, say, finite sets
can be implemented either within the Sets category (in a nested
class ``Sets.Finite``), or in a separate file (typically in a class
``FiniteSets`` in a lazily imported module
sage.categories.finite_sets).
- Single point of truth: a theorem, like Wedderburn's, should be
implemented in a single spot.
- Single entry point: for example, from the entry :class:`Rings`, one
can explore a whole range of related categories just by applying
axioms and constructions::
sage: Rings().Commutative().Finite().NoZeroDivisors()
Category of finite integral domains
sage: Rings().Finite().Division()
Category of finite enumerated fields
This will allow for progressively getting rid of all the entries
like :class:`GradedHopfAlgebrasWithBasis` which are polluting the
global name space.
Note that this is not about precluding the existence of multiple
natural ways to construct the same category::
sage: Groups().Finite()
Category of finite groups
sage: Monoids().Finite().Inverse()
Category of finite groups
sage: Sets().Finite() & Monoids().Inverse()
Category of finite groups
- Concise idioms for the users (adding axioms, ...)
- Concise idioms and well highlighted hierarchy of bookshelves for
the developer (especially with code folding)
- Introspection friendly (listing the axioms, recovering the mixins)
.. NOTE::
The constructor for instances of this class takes as input the
base category. Hence, they should in principle be constructed
as::
sage: FiniteSets(Sets())
Category of finite sets
sage: Sets.Finite(Sets())
Category of finite sets
None of these idioms are really practical for the user. So instead,
this object is to be constructed using any of the following idioms::
sage: Sets()._with_axiom('Finite')
Category of finite sets
sage: FiniteSets()
Category of finite sets
sage: Sets().Finite()
Category of finite sets
The later two are implemented using respectively
:meth:`CategoryWithAxiom.__classcall__` and
:meth:`CategoryWithAxiom.__classget__`.
Upcoming features
=================
.. TODO::
- Implement compatibility axiom / functorial constructions. For
example, one would want to have::
A.CartesianProducts() & B.CartesianProducts() = (A&B).CartesianProducts()
- Once full subcategories are implemented (see :trac:`10668`),
make the relevant categories with axioms be such. This can be
done systematically for, e.g., the axioms ``Associative`` or
``Commutative``, but not for the axiom ``Unital``: a semigroup
morphism between two monoids need not preserve the unit.
Should all full subcategories be implemented in term of axioms?
.. _axioms-algorithmic:
Algorithms
==========
Computing joins
---------------
The workhorse of the axiom infrastructure is the algorithm for
computing the join `J` of a set `C_1, \ldots, C_k` of categories (see
:meth:`Category.join`). Formally, `J` is defined as the largest
constructible category such that `J \subset C_i` for all `i`, and
`J \subset C.A()` for every constructible category `C \supset J`
and any axiom `A` satisfied by `J`.
The join `J` is naturally computed as a closure in the lattice of
constructible categories: it starts with the `C_i`'s, gathers the set
`S` of all the axioms satisfied by them, and repeatedly adds each
axiom `A` to those categories that do not yet satisfy `A` using
:meth:`Category._with_axiom`. Due to deduction rules or (extra) super
categories, new categories or new axioms may appear in the
process. The process stops when each remaining category has been
combined with each axiom. In practice, only the smallest categories
are kept along the way; this is correct because adding an axiom is
covariant: ``C.A()`` is a subcategory of ``D.A()`` whenever ``C`` is a
subcategory of ``D``.
As usual in such closure computations, the result does not depend on
the order of execution. Futhermore, given that adding an axiom is an
idempotent and regressive operation, the process is guaranteed to stop
in a number of steps which is bounded by the number of super
categories of `J`. In particular, it is a finite process.
.. TODO::
Detail this a bit. What could typically go wrong is a situation
where, for some category ``C1``, ``C1.A()`` specifies a category
``C2`` as super category such that ``C2.A()`` specifies ``C3`` as
super category such that ...; this would clearly cause an infinite
execution. Note that this situation violates the specifications
since ``C1.A()`` is supposed to be a subcategory of ``C2.A()``,
... so we would have an infinite increasing chain of constructible
categories.
It's reasonable to assume that there is a finite number of axioms
defined in the code. There remains to use this assumption to argue
that any infinite execution of the algorithm would give rise to
such an infinite sequence.
Adding an axiom
---------------
Let ``Cs`` be a category and ``A`` an axiom defined for this
category. To compute ``Cs().A()``, there are two cases.
Adding an axiom ``A`` to a category ``Cs()`` not implementing it
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In this case, ``Cs().A()`` returns the join of:
- ``Cs()``
- ``Bs().A()`` for every direct super category ``Bs()`` of ``Cs()``
- the categories appearing in ``Cs().A_extra_super_categories()``
This is a highly recursive process. In fact, as such, it would run
right away into an infinite loop! Indeed, the join of ``Cs()`` with
``Bs().A()`` would trigger the construction of ``Cs().A()`` and
reciprocally. To avoid this, the :meth:`Category.join` method itself
does not use :meth:`Category._with_axiom` to add axioms, but its
sister :meth:`Category._with_axiom_as_tuple`; the latter builds a
tuple of categories that should be joined together but leaves the
computation of the join to its caller, the master join calculation.
Adding an axiom ``A`` to a category ``Cs()`` implementing it
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In this case ``Cs().A()`` simply constructs an instance `D` of
``Cs.A`` which models the desired category. The non trivial part is
the construction of the super categories of `D`. Very much like
above, this includes:
- ``Cs()``
- ``Bs().A()`` for every super category ``Bs()`` of ``Cs()``
- the categories appearing in ``D.extra_super_categories()``
This by itself may not be sufficient, due in particular to deduction
rules. On may for example discover a new axiom ``A1`` satisfied by
`D`, imposing to add ``A1`` to all of the above categories. Therefore
the super categories are computed as the join of the above categories.
Up to one twist: as is, the computation of this join would trigger
recursively a recalculation of ``Cs().A()``! To avoid this,
:meth:`Category.join` is given an optional argument to specify that
the axiom ``A`` should *not* be applied to ``Cs()``.
Sketch of proof of correctness and evaluation of complexity
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
As we have seen, this is a highly recursive process! In particular,
one needs to argue that, as long as the specifications are satisfied,
the algorithm won't run in an infinite recursion, in particular in
case of deduction rule.
.. TOPIC:: Theorem
Consider the construction of a category `C` by adding an axiom to
a category (or computing of a join). Let `H` be the hierarchy of
implemented categories above `C`. Let `n` and `m` be respectively
the number of categories and the number of inheritance edges in
`H`.
Assuming that the specifications are satisfied, the construction
of `C` involves constructing the categories in `H` exactly once
(and no other category), and at most `n` join calculations. In
particular, the time complexity should be, roughly speaking,
bounded by `n^2`. In particular, it's finite.
.. TOPIC:: Remark
It's actually to be expected that the complexity is more of the
order of magnitude of `na+m`, where `a` is the number of axioms
satisfied by `C`. But this is to be checked in detail, in
particular due to the many category inclusion tests involved.
The key argument is that :class:`Category.join` cannot call itself
recursively without going through the construction of some implemented
category. In turn, the construction of some implemented category `C`
only involves constructing strictly smaller categories, and possibly a
direct join calculation whose result is strictly smaller than
`C`. This statement is obvious if `C` implements the
``super_categories`` method directly, and easy to check for functorial
construction categories. It requires a proof for categories with
axioms since there is a recursive join involved.
.. TOPIC:: Lemma
Let `C` be a category implementing an axiom `A`. Recall that the
construction of ``C.A()`` involves a single direct join
calculation for computing the super categories. No other direct
join calculation occur, and the calculation involves only
implemented categories that are strictly smaller than ``C.A()``.
.. TOPIC:: Proof
Let `D` be a category involved in the join calculation for the
super categories of ``C.A()``, and assume by induction that `D` is
strictly smaller than ``C.A()``. A category `E` newly constructed
from `D` can come from:
- ``D.(extra_)super_categories()``
In this case, the specifications impose that `E` should be
strictly smaller than `D` and therefore strictly smaller than
`C`.
- ``D.with_axiom_as_tuple('B')`` or ``D.B_extra_super_categories()``
for some axiom `B`
In this case, the axiom `B` is satisfied by some subcategory of
``C.A()``, and therefore must be satisfied by ``C.A()`` itself.
Since adding an axiom is a regressive construction, `E` must be a
subcategory of ``C.A()``. If there is equality, then `E` and
``C.A()`` must have the same class, and therefore, `E` must be
directly constructed as ``C.A()``. However the join construction
explicitly prevents this call.
Note that a call to ``D.with_axiom_as_tuple('B')`` does not trigger
a direct join calculation; but of course, if `D` implements `B`,
the construction of the implemented category ``E = D.B()`` will
involve a strictly smaller join calculation.
Conclusion
==========
This is the end of the axioms documentation. Congratulations on
having read that far!
Tests
=====
.. NOTE::
Quite a few categories with axioms are constructed early on during
Sage's startup. Therefore, when playing around with the
implementation of the axiom infrastructure, it is easy to break
Sage. The following sequence of tests is designed to test the
infrastructure from the ground up even in a partially broken
Sage. Please don't remove the imports!
TESTS:
::
sage: Magmas()
Category of magmas
sage: Magmas().Finite()
Category of finite magmas
sage: Magmas().Unital()
Category of unital magmas
sage: Magmas().Commutative().Unital()
Category of commutative unital magmas
sage: Magmas().Associative()
Category of semigroups
sage: Magmas().Associative() & Magmas().Unital().Inverse() & Sets().Finite()
Category of finite groups
sage: _ is Groups().Finite()
True
sage: from sage.categories.semigroups import Semigroups
sage: Semigroups()
Category of semigroups
sage: Semigroups().Finite()
Category of finite semigroups
sage: from sage.categories.modules_with_basis import ModulesWithBasis
sage: ModulesWithBasis(QQ) is Modules(QQ).WithBasis()
True
sage: ModulesWithBasis(ZZ) is Modules(ZZ).WithBasis()
True
sage: Semigroups().Unital()
Category of monoids
sage: Semigroups().Unital().Commutative()
Category of commutative monoids
sage: Semigroups().Commutative()
Category of commutative semigroups
sage: Semigroups().Commutative().Unital()
Category of commutative monoids
sage: Semigroups().Commutative().Unital().super_categories()
[Category of monoids, Category of commutative magmas]
sage: AdditiveMagmas().AdditiveAssociative().AdditiveCommutative()
Category of commutative additive semigroups
sage: from sage.categories.magmas_and_additive_magmas import MagmasAndAdditiveMagmas
sage: C = CommutativeAdditiveMonoids() & Monoids() & MagmasAndAdditiveMagmas().Distributive(); C
Category of semirings
sage: C is (CommutativeAdditiveMonoids() & Monoids()).Distributive()
True
sage: C.AdditiveInverse()
Category of rings
sage: Rings().axioms()
frozenset({'AdditiveAssociative',
'AdditiveCommutative',
'AdditiveInverse',
'AdditiveUnital',
'Associative',
'Distributive',
'Unital'})
sage: sorted(Rings().axioms())
['AdditiveAssociative', 'AdditiveCommutative', 'AdditiveInverse',
'AdditiveUnital', 'Associative', 'Distributive', 'Unital']
sage: Domains().Commutative()
Category of integral domains
sage: DivisionRings().Finite() # Wedderburn's theorem
Category of finite enumerated fields
sage: FiniteMonoids().Algebras(QQ)
Join of Category of monoid algebras over Rational Field
and Category of finite dimensional algebras with basis over Rational Field
and Category of finite set algebras over Rational Field
sage: FiniteGroups().Algebras(QQ)
Category of finite group algebras over Rational Field
"""
# ****************************************************************************
# Copyright (C) 2011-2014 Nicolas M. Thiery <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function
import importlib
import re
from sage.misc.cachefunc import cached_method, cached_function
from sage.misc.lazy_attribute import lazy_class_attribute
from sage.misc.lazy_import import LazyImport
from sage.misc.misc import call_method
from sage.categories.category import Category
from sage.categories.category_singleton import Category_singleton
from sage.categories.category_types import Category_over_base_ring
from sage.structure.dynamic_class import DynamicMetaclass
from sage.categories.category_cy_helper import AxiomContainer, canonicalize_axioms
# The order of the axioms in this lists implies that
# Magmas().Commutative().Unital() is printed as
# ``Category of commutative unital magmas''
all_axioms = AxiomContainer()
all_axioms += ("Flying", "Blue",
"Compact",
"Differentiable", "Smooth", "Analytic", "AlmostComplex",
"FinitelyGeneratedAsMagma",
"WellGenerated",
"Facade", "Finite", "Infinite","Enumerated",
"Complete",
"Nilpotent",
"FiniteDimensional", "Connected", "WithBasis",
"Irreducible",
"Commutative", "Associative", "Inverse", "Unital", "Division", "NoZeroDivisors", "Cellular",
"AdditiveCommutative", "AdditiveAssociative", "AdditiveInverse", "AdditiveUnital",
"Distributive",
"Endset",
"Pointed",
"Stratified",
)
def uncamelcase(s,separator=" "):
"""
EXAMPLES::
sage: sage.categories.category_with_axiom.uncamelcase("FiniteDimensionalAlgebras")
'finite dimensional algebras'
sage: sage.categories.category_with_axiom.uncamelcase("JTrivialMonoids")
'j trivial monoids'
sage: sage.categories.category_with_axiom.uncamelcase("FiniteDimensionalAlgebras", "_")
'finite_dimensional_algebras'
"""
return re.sub("(?!^)[A-Z]", lambda match: separator+match.group()[0], s).lower()
def base_category_class_and_axiom(cls):
"""
Try to deduce the base category and the axiom from the name of ``cls``.
The heuristic is to try to decompose the name as the concatenation
of the name of a category and the name of an axiom, and looking up
that category in the standard location (i.e. in
:mod:`sage.categories.hopf_algebras` for :class:`HopfAlgebras`,
and in :mod:`sage.categories.sets_cat` as a special case
for :class:`Sets`).
If the heuristic succeeds, the result is guaranteed to be
correct. Otherwise, an error is raised.
EXAMPLES::
sage: from sage.categories.category_with_axiom import base_category_class_and_axiom, CategoryWithAxiom
sage: base_category_class_and_axiom(FiniteSets)
(<class 'sage.categories.sets_cat.Sets'>, 'Finite')
sage: Sets.Finite
<class 'sage.categories.finite_sets.FiniteSets'>
sage: base_category_class_and_axiom(Sets.Finite)
(<class 'sage.categories.sets_cat.Sets'>, 'Finite')
sage: base_category_class_and_axiom(FiniteDimensionalHopfAlgebrasWithBasis)
(<class 'sage.categories.hopf_algebras_with_basis.HopfAlgebrasWithBasis'>, 'FiniteDimensional')
sage: base_category_class_and_axiom(HopfAlgebrasWithBasis)
(<class 'sage.categories.hopf_algebras.HopfAlgebras'>, 'WithBasis')
Along the way, this does some sanity checks::
sage: class FacadeSemigroups(CategoryWithAxiom):
....: pass
sage: base_category_class_and_axiom(FacadeSemigroups)
Traceback (most recent call last):
...
AssertionError: Missing (lazy import) link for <class 'sage.categories.semigroups.Semigroups'> to <class '__main__.FacadeSemigroups'> for axiom Facade?
sage: Semigroups.Facade = FacadeSemigroups
sage: base_category_class_and_axiom(FacadeSemigroups)
(<class 'sage.categories.semigroups.Semigroups'>, 'Facade')
.. NOTE::
In the following example, we could possibly retrieve ``Sets``
from the class name. However this cannot be implemented
robustly until :trac:`9107` is fixed. Anyway this feature
has not been needed so far::
sage: Sets.Infinite
<class 'sage.categories.sets_cat.Sets.Infinite'>
sage: base_category_class_and_axiom(Sets.Infinite)
Traceback (most recent call last):
...
TypeError: Could not retrieve the base category class and axiom for <class 'sage.categories.sets_cat.Sets.Infinite'>.
...
"""
if "." in cls.__name__:
# Case 1: class name of the form Sets.Infinite
# Start of implementation when #9107 will be fixed:
# axiom = cls.__name__.split(".")[-1]
# ...
pass
else:
# Case 2: class name of the form FiniteSets or AlgebrasWithBasis,
# with the base class (say Algebras) being implemented in the
# standard location (sage.categories.algebras)
name = cls.__name__
for axiom in all_axioms:
if axiom == "WithBasis" and name.endswith(axiom):
base_name = name[:-len(axiom)]
elif name.startswith(axiom):
base_name = name[len(axiom):]
else:
continue
if base_name == "Sets": # Special case for Sets which is in sets_cat
base_module_name = "sets_cat"
else:
base_module_name = uncamelcase(base_name, "_")
try:
base_module = importlib.import_module("sage.categories."+base_module_name)
base_category_class = getattr(base_module, base_name)
assert getattr(base_category_class, axiom, None) is cls, \
"Missing (lazy import) link for {} to {} for axiom {}?".format(base_category_class, cls, axiom)
return base_category_class, axiom
except (ImportError,AttributeError):
pass
raise TypeError("""Could not retrieve the base category class and axiom for {}.
Please specify it explicitly using the attribute _base_category_class_and_axiom.
See CategoryWithAxiom for details.""".format(cls))
@cached_function
def axiom_of_nested_class(cls, nested_cls):
r"""
Given a class and a nested axiom class, return the axiom.
EXAMPLES:
This uses some heuristics like checking if the nested_cls carries
the name of the axiom, or is built by appending or prepending the
name of the axiom to that of the class::
sage: from sage.categories.category_with_axiom import TestObjects, axiom_of_nested_class
sage: axiom_of_nested_class(TestObjects, TestObjects.FiniteDimensional)
'FiniteDimensional'
sage: axiom_of_nested_class(TestObjects.FiniteDimensional, TestObjects.FiniteDimensional.Finite)
'Finite'
sage: axiom_of_nested_class(Sets, FiniteSets)
'Finite'
sage: axiom_of_nested_class(Algebras, AlgebrasWithBasis)
'WithBasis'
In all other cases, the nested class should provide an attribute
``_base_category_class_and_axiom``::
sage: Semigroups._base_category_class_and_axiom
(<class 'sage.categories.magmas.Magmas'>, 'Associative')
sage: axiom_of_nested_class(Magmas, Semigroups)
'Associative'
"""
try:
axiom = nested_cls.__dict__["_base_category_class_and_axiom"][1]
except KeyError:
assert not isinstance(cls, DynamicMetaclass)
nested_cls_name = nested_cls.__name__.split(".")[-1]
if nested_cls_name in all_axioms:
axiom = nested_cls_name
else:
cls_name = cls.__name__.split(".")[-1]
if nested_cls_name.startswith(cls_name):
axiom = nested_cls_name[len(cls_name):]
elif nested_cls_name.endswith(cls_name):
axiom = nested_cls_name[:-len(cls_name)]
else:
raise ValueError("could not infer axiom for the nested class {} of {}".format(nested_cls, cls))
assert axiom in all_axioms, \
"Incorrect deduction ({}) for the name of the axiom for the nested class {} of {}".format(axiom, nested_cls, cls)
assert axiom in cls.__dict__ and cls.__dict__[axiom] == nested_cls, \
"{} not a nested axiom class of {} for axiom {}".format(nested_cls, cls, axiom)
return axiom
class CategoryWithAxiom(Category):
r"""
An abstract class for categories obtained by adding an axiom
to a base category.
See the :mod:`category primer <sage.categories.primer>`, and in
particular its :ref:`section about axioms <category-primer-axioms>`
for an introduction to axioms, and :class:`CategoryWithAxiom` for
how to implement axioms and the documentation of the axiom
infrastructure.
.. automethod:: CategoryWithAxiom.__classcall__
.. automethod:: CategoryWithAxiom.__classget__
.. automethod:: CategoryWithAxiom.__init__
.. automethod:: CategoryWithAxiom._repr_object_names
.. automethod:: CategoryWithAxiom._repr_object_names_static
.. automethod:: CategoryWithAxiom._test_category_with_axiom
.. automethod:: CategoryWithAxiom._without_axioms
"""
@lazy_class_attribute
def _base_category_class_and_axiom(cls):
r"""
The class of the base category and the axiom for this class.
By default, and when possible, this attribute is deduced from
the name of this class (see
:func:`base_category_class_and_axiom`). For a nested class,
when the category is first created from its base category as
in e.g. ``Sets().Infinite()``, this attribute is instead set
explicitly by :meth:`__classget__`.
When this is not sufficient, that is when ``cls`` is not
implemented as a nested class and the base category and the
axiom cannot be deduced from the name of ``cls``, this
attribute should be set explicitly by ``cls``.
The origin of the attribute is stored in the attribute
``_base_category_class_and_axiom_origin``.
.. SEEALSO:: :meth:`_axiom`
EXAMPLES:
``CommutativeRings`` is not a nested class, but the name of
the base category and the axiom can be deduced::
sage: CommutativeRings()._base_category_class_and_axiom
(<class 'sage.categories.rings.Rings'>, 'Commutative')
sage: CommutativeRings()._base_category_class_and_axiom_origin
'deduced by base_category_class_and_axiom'
``Sets.Infinite`` is a nested class, so the attribute is set
by :meth:`CategoryWithAxiom.__classget__` the first time
``Sets().Infinite()`` is called::
sage: Sets().Infinite()
Category of infinite sets
sage: Sets.Infinite._base_category_class_and_axiom
(<class 'sage.categories.sets_cat.Sets'>, 'Infinite')
sage: Sets.Infinite._base_category_class_and_axiom_origin
'set by __classget__'
``Fields`` is not a nested class, and the name of the base
category and axioms cannot be deduced from the name
``Fields``; so this attributes needs to be set explicitly in
the ``Fields`` class::
sage: Fields()._base_category_class_and_axiom
(<class 'sage.categories.division_rings.DivisionRings'>, 'Commutative')
sage: Fields()._base_category_class_and_axiom_origin
'hardcoded'
.. NOTE::
The base category class is often another category with
axiom, therefore having a special ``__classget__`` method.
Storing the base category class and the axiom in a single
tuple attribute -- instead of two separate attributes --
has the advantage of not trigerring, for example,
``Semigroups.__classget__`` upon
``Monoids._base_category_class``.
"""
base_category_class, axiom = base_category_class_and_axiom(cls)
cls._base_category_class_and_axiom_origin = "deduced by base_category_class_and_axiom"
return (base_category_class, axiom)
_base_category_class_and_axiom_origin = "hardcoded"
@lazy_class_attribute
def _axiom(cls):
r"""
The axiom for this category with axiom.
.. SEEALSO:: :meth:`_base_category_class_and_axiom`
EXAMPLES::
sage: FiniteSets._axiom
'Finite'
sage: Sets.Finite._axiom
'Finite'
sage: Algebras.Commutative._axiom
'Commutative'
The result can be less obvious::
sage: Semigroups._axiom
'Associative'
sage: Rings._axiom
'Unital'
sage: Fields._axiom
'Commutative'
"""
return cls._base_category_class_and_axiom[1]
@staticmethod
def __classcall__(cls, *args, **options):
"""
Make ``FoosBar(**)`` an alias for ``Foos(**)._with_axiom("Bar")``.
EXAMPLES::
sage: FiniteGroups()
Category of finite groups
sage: ModulesWithBasis(ZZ)
Category of modules with basis over Integer Ring
sage: AlgebrasWithBasis(QQ)
Category of algebras with basis over Rational Field
This is relevant when e.g. ``Foos(**)`` does some non trivial
transformations::
sage: Modules(QQ) is VectorSpaces(QQ)
True
sage: type(Modules(QQ))
<class 'sage.categories.vector_spaces.VectorSpaces_with_category'>
sage: ModulesWithBasis(QQ) is VectorSpaces(QQ).WithBasis()
True
sage: type(ModulesWithBasis(QQ))
<class 'sage.categories.vector_spaces.VectorSpaces.WithBasis_with_category'>
"""
(base_category_class, axiom) = cls._base_category_class_and_axiom
if len(args) == 1 and not options and isinstance(args[0], base_category_class):
return super(CategoryWithAxiom, cls).__classcall__(cls, args[0])
else:
# The "obvious" idiom
## return cls(base_category_class(*args, **options))
# fails with ModulesWithBasis(QQ) as follows: The
# base_category_class is Modules, but Modules(QQ) is an instance
# of VectorSpaces and not of Modules. Hence,
# ModulesWithBasis.__classcall__ will not accept this instance as
# the first argument. Instead, we apply the axiom to the instance:
return base_category_class(*args, **options)._with_axiom(axiom)
@staticmethod
def __classget__(cls, base_category, base_category_class):
r"""
Implement the binding behavior for categories with axioms.
This method implements a binding behavior on category with
axioms so that, when a category ``Cs`` implements an axiom
``A`` with a nested class ``Cs.A``, the expression ``Cs().A``
evaluates to the method defining the axiom ``A`` and not the
nested class. See `those design notes
<category-with-axiom-design>`_ for the rationale behind this
behavior.
EXAMPLES::
sage: Sets().Infinite()
Category of infinite sets
sage: Sets().Infinite
Cached version of <function ...Infinite at ...>
sage: Sets().Infinite.f == Sets.SubcategoryMethods.Infinite.f
True
We check that this also works when the class is implemented in
a separate file, and lazy imported::
sage: Sets().Finite
Cached version of <function ...Finite at ...>
There is no binding behavior when accessing ``Finite`` or
``Infinite`` from the class of the category instead of the
category itself::
sage: Sets.Finite
<class 'sage.categories.finite_sets.FiniteSets'>
sage: Sets.Infinite
<class 'sage.categories.sets_cat.Sets.Infinite'>
This method also initializes the attribute
``_base_category_class_and_axiom`` if not already set::
sage: Sets.Infinite._base_category_class_and_axiom
(<class 'sage.categories.sets_cat.Sets'>, 'Infinite')
sage: Sets.Infinite._base_category_class_and_axiom_origin
'set by __classget__'
"""
# TODO: this is super paranoid; see if this can be simplified a bit
if base_category is not None:
assert base_category.__class__ is base_category_class
assert isinstance(base_category_class, DynamicMetaclass)
if isinstance(base_category_class, DynamicMetaclass):
base_category_class = base_category_class.__base__
if "_base_category_class_and_axiom" not in cls.__dict__:
cls._base_category_class_and_axiom = (base_category_class, axiom_of_nested_class(base_category_class, cls))
cls._base_category_class_and_axiom_origin = "set by __classget__"
else:
assert cls._base_category_class_and_axiom[0] is base_category_class, \
"base category class for {} mismatch; expected {}, got {}".format(
cls, cls._base_category_class_and_axiom[0], base_category_class)
# Workaround #15648: if Rings.Finite is a LazyImport object,
# this forces the substitution of the object back into Rings
# to avoid resolving the lazy import over and over
if isinstance(base_category_class.__dict__[cls._axiom], LazyImport):
setattr(base_category_class, cls._axiom, cls)
if base_category is None:
return cls
# For Rings().Finite, this returns the method
# Sets.SubcategoryMethods.Finite, with its first argument bound to Rings()
return getattr(super(base_category.__class__.__base__, base_category), cls._axiom)
def __init__(self, base_category):
"""
TESTS::
sage: C = Sets.Finite(); C
Category of finite sets
sage: type(C)
<class 'sage.categories.finite_sets.FiniteSets_with_category'>
sage: type(C).__base__.__base__
<class 'sage.categories.category_with_axiom.CategoryWithAxiom_singleton'>
sage: TestSuite(C).run()
"""
# A hack to upgrade axiom categories of singleton categories
# to be singleton categories themselves
if isinstance(base_category, Category_singleton) and not isinstance(self, CategoryWithAxiom_singleton):
cls = self.__class__
assert cls.__base__ == CategoryWithAxiom
cls.__bases__ = (CategoryWithAxiom_singleton,)+cls.__bases__[1:]
self._base_category = base_category
Category.__init__(self)
def _test_category_with_axiom(self, **options):
r"""
Run generic tests on this category with axioms.
.. SEEALSO:: :class:`TestSuite`.
This check that an axiom category of a
:class:`Category_singleton` is a singleton category, and
similarwise for :class:`Category_over_base_ring`.
EXAMPLES::
sage: Sets().Finite()._test_category_with_axiom()
sage: Modules(ZZ).FiniteDimensional()._test_category_with_axiom()
"""
tester = self._tester(**options)
base = self.base_category()
if isinstance(base, Category_singleton):
tester.assertIsInstance(self, CategoryWithAxiom_singleton)
if isinstance(base, Category_over_base_ring):
tester.assertIsInstance(self, CategoryWithAxiom_over_base_ring)
def extra_super_categories(self):
"""
Return the extra super categories of a category with axiom.
Default implementation which returns ``[]``.
EXAMPLES::
sage: FiniteSets().extra_super_categories()
[]
"""
return []
@cached_method
def super_categories(self):
"""
Return a list of the (immediate) super categories of
``self``, as per :meth:`Category.super_categories`.
This implements the property that if ``As`` is a subcategory
of ``Bs``, then the intersection of ``As`` with ``FiniteSets()``
is a subcategory of ``As`` and of the intersection of ``Bs``
with ``FiniteSets()``.
EXAMPLES:
A finite magma is both a magma and a finite set::
sage: Magmas().Finite().super_categories()
[Category of magmas, Category of finite sets]
Variants::
sage: Sets().Finite().super_categories()
[Category of sets]
sage: Monoids().Finite().super_categories()
[Category of monoids, Category of finite semigroups]
EXAMPLES:
TESTS::
sage: from sage.categories.category_with_axiom import TestObjects
sage: C = TestObjects().FiniteDimensional().Unital().Commutative().Finite()
sage: sorted(C.super_categories(), key=str)
[Category of finite commutative test objects,
Category of finite dimensional commutative unital test objects,
Category of finite finite dimensional test objects]
"""
base_category = self._base_category
axiom = self._axiom
return Category.join((base_category,) +
tuple(cat
for category in base_category._super_categories
for cat in category._with_axiom_as_tuple(axiom)) +
tuple(self.extra_super_categories()),
ignore_axioms = ((base_category, axiom),),
as_list = True)
def additional_structure(self):
r"""
Return the additional structure defined by ``self``.
OUTPUT: ``None``
By default, a category with axiom defines no additional
structure.
.. SEEALSO:: :meth:`Category.additional_structure`.
EXAMPLES::
sage: Sets().Finite().additional_structure()
sage: Monoids().additional_structure()
TESTS::
sage: Sets().Finite().additional_structure.__module__
'sage.categories.category_with_axiom'
"""
return None
@staticmethod
def _repr_object_names_static(category, axioms):
r"""
INPUT:
- ``base_category`` -- a category
- ``axioms`` -- a list or iterable of strings
EXAMPLES::
sage: from sage.categories.category_with_axiom import CategoryWithAxiom
sage: CategoryWithAxiom._repr_object_names_static(Semigroups(), ["Flying", "Blue"])
'flying blue semigroups'
sage: CategoryWithAxiom._repr_object_names_static(Algebras(QQ), ["Flying", "WithBasis", "Blue"])
'flying blue algebras with basis over Rational Field'
sage: CategoryWithAxiom._repr_object_names_static(Algebras(QQ), ["WithBasis"])
'algebras with basis over Rational Field'
sage: CategoryWithAxiom._repr_object_names_static(Sets().Finite().Subquotients(), ["Finite"])
'subquotients of finite sets'
sage: CategoryWithAxiom._repr_object_names_static(Monoids(), ["Unital"])
'monoids'
sage: CategoryWithAxiom._repr_object_names_static(Algebras(QQ['x']['y']), ["Flying", "WithBasis", "Blue"])
'flying blue algebras with basis over Univariate Polynomial Ring in y over Univariate Polynomial Ring in x over Rational Field'
If the axioms is a set or frozen set, then they are first
sorted using :func:`canonicalize_axioms`::
sage: CategoryWithAxiom._repr_object_names_static(Semigroups(), set(["Finite", "Commutative", "Facade"]))
'facade finite commutative semigroups'
.. SEEALSO:: :meth:`_repr_object_names`
.. NOTE::
The logic here is shared between :meth:`_repr_object_names`
and :meth:`.category.JoinCategory._repr_object_names`
TESTS::
sage: from sage.categories.homsets import Homsets
sage: CategoryWithAxiom._repr_object_names_static(Homsets(), ["Endset"])
'endsets'
sage: CategoryWithAxiom._repr_object_names_static(PermutationGroups(), ["FinitelyGeneratedAsMagma"])
'finitely generated permutation groups'
sage: CategoryWithAxiom._repr_object_names_static(Rings(), ["FinitelyGeneratedAsMagma"])
'finitely generated as magma rings'
"""
from sage.categories.additive_magmas import AdditiveMagmas
axioms = canonicalize_axioms(all_axioms,axioms)
base_category = category._without_axioms(named=True)
if isinstance(base_category, CategoryWithAxiom): # Smelly runtime type checking
result = super(CategoryWithAxiom, base_category)._repr_object_names()
else:
result = base_category._repr_object_names()
for axiom in reversed(axioms):
# TODO: find a more generic way to handle the special cases below
if axiom in base_category.axioms():
# If the base category already has this axiom, we
# need not repeat it here. See the example with
# Sets().Finite().Subquotients() or Monoids()
continue
base_category = base_category._with_axiom(axiom)
if axiom == "WithBasis":
result = result.replace(" over ", " with basis over ", 1)
elif axiom == "Connected" and "graded " in result:
result = result.replace("graded ", "graded connected ", 1)
elif axiom == "Connected" and "filtered " in result:
result = result.replace("filtered ", "filtered connected ", 1)
elif axiom == "Stratified" and "graded " in result:
result = result.replace("graded ", "stratified ", 1)
elif axiom == "Nilpotent" and "finite dimensional " in result:
# We need to put nilpotent before finite dimensional in the
# axioms ordering so we do not (unnecessarily) display
# 'nilpotent' in 'finite dimensional nilpotent stratified'.
# So we need to swap the order here.
result = result.replace("finite dimensional ", "finite dimensional nilpotent ", 1)
elif axiom == "Endset" and "homsets" in result:
# Without the space at the end to handle Homsets().Endset()
result = result.replace("homsets", "endsets", 1)
elif axiom == "FinitelyGeneratedAsMagma" and \
not base_category.is_subcategory(AdditiveMagmas()):
result = "finitely generated " + result
else:
result = uncamelcase(axiom) + " " + result
return result
def _repr_object_names(self):
r"""
The names of the objects of this category, as used by ``_repr_``.
.. SEEALSO:: :meth:`Category._repr_object_names`
EXAMPLES::
sage: FiniteSets()._repr_object_names()
'finite sets'
sage: AlgebrasWithBasis(QQ).FiniteDimensional()._repr_object_names()
'finite dimensional algebras with basis over Rational Field'
sage: Monoids()._repr_object_names()
'monoids'
sage: Semigroups().Unital().Finite()._repr_object_names()
'finite monoids'
sage: Algebras(QQ).Commutative()._repr_object_names()
'commutative algebras over Rational Field'
.. NOTE::
This is implemented by taking _repr_object_names from
self._without_axioms(named=True), and adding the names
of the relevant axioms in appropriate order.
"""
return CategoryWithAxiom._repr_object_names_static(self, self.axioms())
def base_category(self):
r"""
Return the base category of ``self``.
EXAMPLES::
sage: C = Sets.Finite(); C
Category of finite sets
sage: C.base_category()
Category of sets
sage: C._without_axioms()
Category of sets
TESTS::
sage: from sage.categories.category_with_axiom import TestObjects, CategoryWithAxiom
sage: C = TestObjects().Commutative().Facade()
sage: assert isinstance(C, CategoryWithAxiom)
sage: C._without_axioms()
Category of test objects
"""
return self._base_category
def __reduce__(self):
r"""
Implement the pickle protocol.
This overides the implementation in
:meth:`UniqueRepresentation.__reduce__` in order to not
exposes the implementation detail that, for example, the
category of magmas which distribute over an associative
additive magma is implemented as
``MagmasAndAdditiveMagmas.Distributive.AdditiveAssociative.AdditiveCommutative``
and not
``MagmasAndAdditiveMagmas.Distributive.AdditiveCommutative.AdditiveAssociative``::
EXAMPLES::
sage: C = Semigroups()
sage: reduction = C.__reduce__(); reduction
(<function call_method at ...>, (Category of magmas, '_with_axiom', 'Associative'))
sage: loads(dumps(C)) is C
True
sage: FiniteSets().__reduce__()
(<function call_method at ...>, (Category of sets, '_with_axiom', 'Finite'))
sage: from sage.categories.magmas_and_additive_magmas import MagmasAndAdditiveMagmas
sage: C = MagmasAndAdditiveMagmas().Distributive().AdditiveAssociative().AdditiveCommutative()
sage: C.__class__
<class 'sage.categories.distributive_magmas_and_additive_magmas.DistributiveMagmasAndAdditiveMagmas.AdditiveAssociative.AdditiveCommutative_with_category'>
sage: C.__reduce__()
(<function call_method at ...>, (Category of additive associative distributive magmas and additive magmas, '_with_axiom', 'AdditiveCommutative'))
"""
return (call_method, (self._base_category, "_with_axiom", self._axiom))
@cached_method
def _without_axiom(self, axiom):
r"""
Return this category, with axiom ``axiom`` removed.
OUTPUT:
A category ``C`` which does not have axiom ``axiom`` and such
that either ``C`` is ``self``, or adding back all the axioms
of ``self`` gives back ``self``.
.. SEEALSO:: :meth:`Category._without_axiom`
.. WARNING:: This is not guaranteed to be robust.
EXAMPLES::
sage: Groups()._without_axiom("Unital")
Category of semigroups
sage: Groups()._without_axiom("Associative")
Category of inverse unital magmas
sage: Groups().Commutative()._without_axiom("Unital")
Category of commutative semigroups
"""
axioms = self.axioms().difference([axiom])
return self._without_axioms()._with_axioms(axioms)
@cached_method
def _without_axioms(self, named=False):
"""
Return the category without the axioms that have been
added to create it.
EXAMPLES::
sage: Sets().Finite()._without_axioms()
Category of sets
sage: Monoids().Finite()._without_axioms()
Category of magmas
This is because::
sage: Semigroups().Unital() is Monoids()
True
If ``named`` is ``True``, then ``_without_axioms`` stops at the
first category that has an explicit name of its own::
sage: Sets().Finite()._without_axioms(named=True)
Category of sets
sage: Monoids().Finite()._without_axioms(named=True)
Category of monoids
Technically we test this by checking if the class specifies
explicitly the attribute ``_base_category_class_and_axiom``
by looking up ``_base_category_class_and_axiom_origin``.
Some more examples::
sage: Algebras(QQ).Commutative()._without_axioms()
Category of magmatic algebras over Rational Field
sage: Algebras(QQ).Commutative()._without_axioms(named=True)
Category of algebras over Rational Field
"""
if named and self._base_category_class_and_axiom_origin == "hardcoded":
return self
return self._base_category._without_axioms(named=named)
@cached_method
def axioms(self):
r"""
Return the axioms known to be satisfied by all the
objects of ``self``.
.. SEEALSO:: :meth:`Category.axioms`
EXAMPLES::
sage: C = Sets.Finite(); C
Category of finite sets
sage: C.axioms()
frozenset({'Finite'})
sage: C = Modules(GF(5)).FiniteDimensional(); C
Category of finite dimensional vector spaces over Finite Field of size 5
sage: sorted(C.axioms())
['AdditiveAssociative', 'AdditiveCommutative', 'AdditiveInverse',
'AdditiveUnital', 'Finite', 'FiniteDimensional']
sage: sorted(FiniteMonoids().Algebras(QQ).axioms())
['AdditiveAssociative', 'AdditiveCommutative', 'AdditiveInverse',
'AdditiveUnital', 'Associative', 'Distributive',
'FiniteDimensional', 'Unital', 'WithBasis']
sage: sorted(FiniteMonoids().Algebras(GF(3)).axioms())
['AdditiveAssociative', 'AdditiveCommutative', 'AdditiveInverse',
'AdditiveUnital', 'Associative', 'Distributive', 'Finite',
'FiniteDimensional', 'Unital', 'WithBasis']
sage: from sage.categories.magmas_and_additive_magmas import MagmasAndAdditiveMagmas
sage: MagmasAndAdditiveMagmas().Distributive().Unital().axioms()
frozenset({'Distributive', 'Unital'})
sage: D = MagmasAndAdditiveMagmas().Distributive()
sage: X = D.AdditiveAssociative().AdditiveCommutative().Associative()
sage: X.Unital().super_categories()[1]
Category of monoids
sage: X.Unital().super_categories()[1] is Monoids()
True
"""
# We would want to write the following line:
# return super(CategoryWithAxiom, self).axioms() | {self._axiom}
# However one currently can't use super to call a cached
# method in a super class. So we dup the code from there ...
return frozenset(axiom
for category in self._super_categories
for axiom in category.axioms()) | {self._axiom}
class CategoryWithAxiom_over_base_ring(CategoryWithAxiom, Category_over_base_ring):
def __init__(self, base_category):
"""
TESTS::
sage: C = Modules(ZZ).FiniteDimensional(); C
Category of finite dimensional modules over Integer Ring
sage: type(C)
<class 'sage.categories.modules.Modules.FiniteDimensional_with_category'>
sage: type(C).__base__.__base__
<class 'sage.categories.category_with_axiom.CategoryWithAxiom_over_base_ring'>
sage: TestSuite(C).run()
"""
# FIXME: this basically duplicates the code from
# CategoryWithAxiom.__init__; but we can't call the latter without
# calling Category.__init__ twice. One could instead set
# "self.__base", which is done in Category_over_base_ring.__init__,
# but then one has to take into account Python's name mangling.
self._base_category = base_category
Category_over_base_ring.__init__(self, base_category.base_ring())
class CategoryWithAxiom_singleton(Category_singleton, CategoryWithAxiom):#, Category_singleton, FastHashable_class):
pass
"""
The following workaround is needed until any :class:`CategoryWithAxiom` of a
:class:`Category_over_base_ring` becomes automatically a
:class:`CategoryWithAxiom_over_base_ring`::
sage: from sage.categories.category_with_axiom import TestObjectsOverBaseRing, Category_over_base_ring
sage: from sage.categories.category import JoinCategory
sage: isinstance(TestObjectsOverBaseRing(QQ), Category_over_base_ring)
True
sage: C = TestObjectsOverBaseRing(QQ).Commutative()
sage: isinstance(C, Category_over_base_ring) # todo: not implemented
True
sage: C.FiniteDimensional()
Category of finite dimensional commutative test objects over base ring over Rational Field
sage: C.Commutative()
Category of commutative test objects over base ring over Rational Field
sage: C.Unital()
Category of commutative unital test objects over base ring over Rational Field
sage: C = TestObjectsOverBaseRing(IntegerModRing(2)).Connected()
sage: isinstance(C, JoinCategory)
True
sage: isinstance(C, Category_over_base_ring) # todo: not implemented
True
sage: C.FiniteDimensional()
Category of finite dimensional connected test objects over base ring over Ring of integers modulo 2
sage: C.Connected()
Category of connected test objects over base ring over Ring of integers modulo 2
"""
##############################################################################
# Utilities and tests tools
def axiom(axiom):
"""
Return a function/method ``self -> self._with_axiom(axiom)``.
This can used as a shorthand to define axioms, in particular in
the tests below. Usually one will want to attach documentation to
an axiom, so the need for such a shorthand in real life might not
be that clear, unless we start creating lots of axioms.
In the long run maybe this could evolve into an ``@axiom`` decorator.
EXAMPLES::
sage: from sage.categories.category_with_axiom import axiom
sage: axiom("Finite")(Semigroups())
Category of finite semigroups
Upon assigning the result to a class this becomes a method::
sage: class As:
....: def _with_axiom(self, axiom): return self, axiom
....: Finite = axiom("Finite")
sage: As().Finite()
(<__main__.As ... at ...>, 'Finite')
"""
def with_axiom(self):
return self._with_axiom(axiom)
with_axiom.__name__ = axiom
return with_axiom
class Blahs(Category_singleton):
r"""
A toy singleton category, for testing purposes.
This is the root of a hierarchy of mathematically meaningless
categories, used for testing Sage's category framework:
- :class:`Bars`
- :class:`TestObjects`
- :class:`TestObjectsOverBaseRing`
"""
def super_categories(self):
"""
TESTS::
sage: from sage.categories.category_with_axiom import Blahs
sage: Blahs().super_categories()
[Category of sets]
sage: TestSuite(Blahs()).run()
"""
from sage.categories.sets_cat import Sets
return [Sets()]
class SubcategoryMethods:
FiniteDimensional = axiom("FiniteDimensional")
Commutative = axiom("Commutative")
Unital = axiom("Unital")
Connected = axiom("Connected")
Flying = axiom("Flying")
Blue = axiom("Blue")
class FiniteDimensional(CategoryWithAxiom):
pass
class Commutative(CategoryWithAxiom):
pass
class Connected(CategoryWithAxiom):
pass
class Unital(CategoryWithAxiom):
class Blue(CategoryWithAxiom):
pass
class Flying(CategoryWithAxiom):
def extra_super_categories(self):
"""
This illustrates a way to have an axiom imply another one.
Here, we want ``Flying`` to imply ``Unital``, and to put
the class for the category of unital flying blahs in
``Blahs.Flying`` rather than ``Blahs.Unital.Flying``.
TESTS::
sage: from sage.categories.category_with_axiom import Blahs, TestObjects, Bars
sage: Blahs().Flying().extra_super_categories()
[Category of unital blahs]
sage: Blahs().Flying()
Category of flying unital blahs
"""
return [Blahs().Unital()]
def Blue_extra_super_categories(self):
"""
Illustrates a current limitation in the way to have an axiom
imply another one.
Here, we would want ``Blue`` to imply ``Unital``, and to put
the class for the category of unital blue blahs in
``Blahs.Unital.Blue`` rather than ``Blahs.Blue``.
This currently fails because ``Blahs`` is the category where
the axiom ``Blue`` is defined, and the specifications
currently impose that a category defining an axiom should also
implement it (here in an category with axiom
``Blahs.Blue``). In practice, due to this violation of the
specifications, the axiom is lost during the join calculation.
.. TODO::
Decide whether we care about this feature. In such a
situation, we are not really defining a new axiom, but
just defining an axiom as an alias for a couple others,
which might not be that useful.
.. TODO::
Improve the infrastructure to detect and report this
violation of the specifications, if this is
easy. Otherwise, it's not so bad: when defining an axiom A
in a category ``Cs`` the first thing one is supposed to
doctest is that ``Cs().A()`` works. So the problem should
not go unnoticed.
TESTS::
sage: from sage.categories.category_with_axiom import Blahs, TestObjects, Bars
sage: Blahs().Blue_extra_super_categories()
[Category of unital blahs]
sage: Blahs().Blue() # todo: not implemented
Category of blue unital blahs
"""
return [Blahs().Unital()]
class Bars(Category_singleton):
r"""
A toy singleton category, for testing purposes.
.. SEEALSO:: :class:`Blahs`
"""
def super_categories(self):
"""
TESTS::
sage: from sage.categories.category_with_axiom import Bars
sage: Bars().super_categories()
[Category of blahs]
sage: TestSuite(Bars()).run()
"""
return [Blahs()]
def Unital_extra_super_categories(self):
"""
Return extraneous super categories for the unital objects of ``self``.
This method specifies that a unital bar is a test object.
Thus, the categories of unital bars and of unital test objects
coincide.
EXAMPLES::
sage: from sage.categories.category_with_axiom import Bars, TestObjects
sage: Bars().Unital_extra_super_categories()
[Category of test objects]
sage: Bars().Unital()
Category of unital test objects
sage: TestObjects().Unital().all_super_categories()
[Category of unital test objects,
Category of unital blahs,
Category of test objects,
Category of bars,
Category of blahs,
Category of sets,
Category of sets with partial maps,
Category of objects]
"""
return [TestObjects()]
class TestObjects(Category_singleton):
r"""
A toy singleton category, for testing purposes.
.. SEEALSO:: :class:`Blahs`
"""
def super_categories(self):
"""
TESTS::
sage: from sage.categories.category_with_axiom import TestObjects
sage: TestObjects().super_categories()
[Category of bars]
sage: TestSuite(TestObjects()).run()
"""
return [Bars()]
class FiniteDimensional(CategoryWithAxiom):
class Finite(CategoryWithAxiom):
pass
class Unital(CategoryWithAxiom):
class Commutative(CategoryWithAxiom):
pass
class Commutative(CategoryWithAxiom):
class Facade(CategoryWithAxiom):
pass
class FiniteDimensional(CategoryWithAxiom):
pass
class Finite(CategoryWithAxiom):
pass
class Unital(CategoryWithAxiom):
pass
class TestObjectsOverBaseRing(Category_over_base_ring):
r"""
A toy singleton category, for testing purposes.
.. SEEALSO:: :class:`Blahs`
"""
def super_categories(self):
"""
TESTS::
sage: from sage.categories.category_with_axiom import TestObjectsOverBaseRing
sage: TestObjectsOverBaseRing(QQ).super_categories()
[Category of test objects]
sage: TestObjectsOverBaseRing.Unital.an_instance()
Category of unital test objects over base ring over Rational Field
sage: TestObjectsOverBaseRing.FiniteDimensional.Unital.an_instance()
Category of finite dimensional unital test objects over base ring over Rational Field
sage: TestSuite(TestObjectsOverBaseRing(QQ).FiniteDimensional().Unital().Commutative()).run()
"""
return [TestObjects()]
class FiniteDimensional(CategoryWithAxiom_over_base_ring):
class Finite(CategoryWithAxiom_over_base_ring):
pass
class Unital(CategoryWithAxiom_over_base_ring):
class Commutative(CategoryWithAxiom_over_base_ring):
pass
class Commutative(CategoryWithAxiom_over_base_ring):
class Facade(CategoryWithAxiom_over_base_ring):
pass
class FiniteDimensional(CategoryWithAxiom_over_base_ring):
pass
class Finite(CategoryWithAxiom_over_base_ring):
pass
class Unital(CategoryWithAxiom_over_base_ring):
pass
| 40.261566 | 167 | 0.676466 |
acf466c2d23763178d45169311c22d543a45aadb | 309 | py | Python | server/accounts/admin.py | tanvirtin/tinmart | a3d0d3fb24f525c36e814338dac42580ab865efc | [
"MIT"
] | 2 | 2019-07-17T08:03:28.000Z | 2021-12-22T05:36:45.000Z | server/accounts/admin.py | tanvirtin/tinmart | a3d0d3fb24f525c36e814338dac42580ab865efc | [
"MIT"
] | 3 | 2020-08-09T07:35:30.000Z | 2020-08-09T07:35:48.000Z | server/accounts/admin.py | tanvirtin/tinmart | a3d0d3fb24f525c36e814338dac42580ab865efc | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import User
# Register your models here.
# registering the User model and telling django that this User model will be the UserAdmin, or the model responsible for holding users
admin.site.register(User, UserAdmin)
| 34.333333 | 134 | 0.812298 |
acf466dd559e20427abba03ec70daddbed1cafab | 240 | py | Python | python/tests/test_zsession.py | zcred/zsession | 7c936d203b83735ca857a48462dd22e98161098f | [
"MIT"
] | 8 | 2017-03-24T05:51:32.000Z | 2017-04-23T20:45:42.000Z | python/tests/test_zsession.py | zcred/zsession | 7c936d203b83735ca857a48462dd22e98161098f | [
"MIT"
] | null | null | null | python/tests/test_zsession.py | zcred/zsession | 7c936d203b83735ca857a48462dd22e98161098f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
test_zsession
-------------
Tests for the `zsession` module.
"""
import unittest
import zsession
class TestZsession(unittest.TestCase):
def test_decode(self):
zsession.decode("world!")
pass
| 14.117647 | 38 | 0.645833 |
acf468744579f45c420951fae71382b7fc9de184 | 4,234 | py | Python | main.py | ugis70194/codeforces_pdf_generator | d7b04e6d93e2f8eee08c3e265e7b67736c6c562d | [
"MIT"
] | null | null | null | main.py | ugis70194/codeforces_pdf_generator | d7b04e6d93e2f8eee08c3e265e7b67736c6c562d | [
"MIT"
] | null | null | null | main.py | ugis70194/codeforces_pdf_generator | d7b04e6d93e2f8eee08c3e265e7b67736c6c562d | [
"MIT"
] | null | null | null | from urllib import request
from urllib.parse import urlparse
import PyPDF2
import lxml.html
import pdfkit
import re
import sys
import argparse
javascript_delay = 2500
class CFProblem:
def get_problem_id(url):
m = re.match(r'/contest/([0-9]+)/problem/([A-Z0-9]+)', urlparse(url).path)
return m.groups()[0] + m.groups()[1]
def __init__(self, url):
print("load codeforces problem %s" % url)
html = request.urlopen(url)
self.problem_id = CFProblem.get_problem_id(url)
self.pdf_name = 'CF' + self.problem_id + '.pdf'
self.dom = lxml.html.fromstring(html.read())
self.contest_name = self.dom.xpath('//*[@id="sidebar"]/div[1]/table/tbody/tr[1]/th/a')[0].text
base_tag = lxml.html.Element('base', href="https://%s" % urlparse(url).netloc)
style_tag = lxml.html.Element('style')
style_tag.text = '#pageContent>*:not(.problemindexholder) { display: none !important; } #header { display: none; } #footer { display: none; } .roundbox.menu-box { display: none; } #sidebar { display: none; } #body > br:nth-child(8) { display: none; } #pageContent { margin-right: 0 !important; } #body { padding-top: 0; } #MathJax_Message { display: none !important; }'
self.dom.xpath('//html')[0].insert(0, base_tag)
self.dom.xpath('//head')[0].append(style_tag)
contest_tag = lxml.html.Element('div')
contest_tag.text = self.contest_name
#contest_tag.attrib['class'] = 'title'
contest_tag.attrib['style'] = 'text-align: left;'
self.dom.xpath('//*[@class="header"]')[0].insert(0, contest_tag)
def save_as_pdf(self):
options = {
'page-size': 'A4',
'margin-top': '0.1in',
'margin-right': '0.1in',
'margin-bottom': '0.1in',
'margin-left': '0.1in',
'encoding': "UTF-8",
'javascript-delay': str(javascript_delay),
'no-outline': None,
#'quiet': None,
}
html_source = lxml.html.tostring(self.dom).decode('utf-8')
pdfkit.from_string(html_source, self.pdf_name, options=options)
print("saved problem %s as pdf %s" % (self.problem_id, self.pdf_name))
class CFContest:
def get_contest_id(url):
m = re.match(r'/contest/([0-9]+)', urlparse(url).path)
return m.groups()[0]
def __init__(self, url):
print("load codeforces contest %s" % url)
base = urlparse(url).netloc
html = request.urlopen(url)
self.dom = lxml.html.fromstring(html.read())
self.contest_id = CFContest.get_contest_id(url)
self.pdf_name = "CF" + self.contest_id + ".pdf"
self.problems = []
for problem_a_tag in self.dom.xpath('//table[@class="problems"]/tr[position() > 1]/td[1]/a'):
self.problems.append(CFProblem("https://" + base + problem_a_tag.attrib['href']))
def save_as_pdf(self):
merger = PyPDF2.PdfFileMerger()
for problem in self.problems:
problem.save_as_pdf()
merger.append(problem.pdf_name)
merger.write(self.pdf_name)
merger.close()
print("saved contest %s as pdf %s" % (self.contest_id, self.pdf_name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='This scirpt is to generate PDF of problems on codeforces.')
parser.add_argument('contest_id', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=str, \
choices=None, \
help='Contest ID', \
metavar=None)
parser.add_argument('-p', '--problems', \
action='store', \
nargs='+', \
const=None, \
default=None, \
type=str, \
choices=None, \
help='Problems', \
metavar=None)
args = parser.parse_args()
if args.problems == None:
url = 'https://codeforces.com/contest/%s/' % args.contest_id
contest = CFContest(url)
contest.save_as_pdf()
else:
for problem_id in args.problems:
url = 'https://codeforces.com/contest/%s/problem/%s' % (args.contest_id, problem_id)
problem = CFProblem(url)
problem.save_as_pdf()
| 39.203704 | 377 | 0.594473 |
acf469825da2a2c116cf571fda85feda7eeacda6 | 2,592 | py | Python | SunBreaker.py | RaghuA06/Other-Python-Projects | 22d0707d2244f0f14cc3cb7341ad0a5a2c3dbd6f | [
"Apache-2.0"
] | null | null | null | SunBreaker.py | RaghuA06/Other-Python-Projects | 22d0707d2244f0f14cc3cb7341ad0a5a2c3dbd6f | [
"Apache-2.0"
] | null | null | null | SunBreaker.py | RaghuA06/Other-Python-Projects | 22d0707d2244f0f14cc3cb7341ad0a5a2c3dbd6f | [
"Apache-2.0"
] | null | null | null | from tkinter import *
import requests
root = Tk()
root.title("SunBreaker : Weather Application")
root.geometry('500x400')
root.configure(background = "powderblue")
#Defining Early Variables
global theWeather, theDescription, theTemperature
theWeather = StringVar()
theDescription = StringVar()
theTemperature = IntVar()
# Making Frames
titleFrame = Frame(root, width = 500, height = 100, relief = "raise", bg = "powderblue")
titleFrame.pack(side = TOP)
inputFrame = Frame(root, width = 500, height = 100, relief = "raise", bg = "powderblue")
inputFrame.pack(side = TOP)
detailFrame = Frame(root, width = 500, height = 200, relief = "raise", bg = "powderblue")
detailFrame.pack(side = TOP)
### MAIN PROGRAMMING FOR THE WEATHER API ###
def thisClimate(event):
address = "http://api.openweathermap.org/data/2.5/weather?appid=0c42f7f6b53b244c78a418f4f181282a&q="
place = location.get().title()
url = address + place
data = requests.get(url).json()
getWeather = data['weather'][0]['main'].title()
getDescription = data['weather'][0]['description'].title()
getTemperature = data['main']['temp']
temp_in_celsius = getTemperature - 273.15
theWeather.set(getWeather)
theDescription.set(getDescription)
theTemperature.set(str(temp_in_celsius) + " °C")
### END OF PROGRAMMING FOR THE WEATHER API ###
# Adding items to titleFrame
title = Label(titleFrame, text = "SunBreaker", font = ("Arial", 48), fg = "orange", bg = "powderblue")
title.grid(row = 0, column = 0)
# Adding items to the inputFrame
global location
location = Entry(inputFrame, bd = 5, width = 20, font = "Arial 18")
location.grid(row = 0, column = 0)
location.insert(0, "Enter Location")
# Adding items to the detail Frame
spacer = Label(detailFrame, text ='', height = 5, bg = "powderblue")
spacer.grid(row = 0, column = 0)
weather = Entry(detailFrame, textvariable = theWeather, bd = 2, width = 20, font = "Arial 18", fg = "gray48")
weather.grid(row = 1, column = 0, pady = 10)
weather.insert(0, "Weather Display")
description = Entry(detailFrame, textvariable = theDescription, bd = 2, width = 20, font = "Arial 18", fg = "gray48")
description.grid(row = 2, column = 0, pady = 10)
description.insert(0, "Description Display")
temperature = Entry(detailFrame, textvariable = theTemperature, bd = 2, width = 20, font = "Arial 18", fg = "gray48")
temperature.grid(row = 3, column = 0, pady = 10)
temperature.insert(0, "Temperature Display")
root.bind('<Return>', thisClimate)
root.mainloop()
| 33.230769 | 118 | 0.677469 |
acf469877fbb05fa44372123393a25bd51d82303 | 6,514 | py | Python | qd.py | infyhr/python-utils | 9e74d4fe3ecc5242c0328a48cad5e92747f92819 | [
"MIT"
] | 1 | 2018-09-12T09:56:27.000Z | 2018-09-12T09:56:27.000Z | qd.py | infyhr/python-utils | 9e74d4fe3ecc5242c0328a48cad5e92747f92819 | [
"MIT"
] | null | null | null | qd.py | infyhr/python-utils | 9e74d4fe3ecc5242c0328a48cad5e92747f92819 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import argparse
import urllib2
import urlparse
import socket
import time
import sys
import datetime
""" Taken from php2python.com """
def date(unixtime, format = '%d.%m.%Y %H:%M'):
d = datetime.datetime.fromtimestamp(unixtime)
return d.strftime(format)
""" Taken from http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size """
def filesize(num):
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0 and num > -1024.0:
return "%3.3f %s" % (num, x)
num /= 1024.0
return "%3.3f%s" % (num, 'TB')
def main():
# Define some arguments
parser = argparse.ArgumentParser(description='Downloads a file off WAN. Supports only raw HTTP protocol. Dynamically adjusts the buffer size so the download is as fast as possible.')
parser.add_argument('--chunk', type=int, default=8192, help='Manually enter the buffer size (chunk size). This tends to be automatically adjusted.')
parser.add_argument('--quiet', action='store_true', help='Output nothing.')
parser.add_argument('url')
# Parse the arguments
args = parser.parse_args()
chunk = args.chunk
quiet = args.quiet
url = args.url
if not url:
quit('Cannot continue without a URL.')
sys.stdout.flush()
if not quiet:
print '[+] Got the URL argument...'
sys.stdout.flush()
# We got the URL as an argument...now check whether urlopen can actually /load/ it.
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1587.0 Safari/537.36',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en',
'Connection': 'keep-alive',
'Encoding': 'gzip,deflate,sdch',
}
request = urllib2.Request(url, None, headers)
handle = urllib2.urlopen(request)
except (urllib2.URLError, urllib2.HTTPError) as e:
quit('[-] Failed to load the URL, got exception ' + str(e.reason) + '. Cannot continue, exiting...')
sys.stdout.flush()
# Install urllib2 properly, to handle cookies
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
socket.setdefaulttimeout(120) # Set the timeout to two minutes.
# The URL is loaded at this point, get some information about the server, standard stuff, reda radi.
print '[+] The URL is ' + handle.geturl()
sys.stdout.flush()
print '[+] Got HTTP ' + str(handle.getcode())
sys.stdout.flush()
if handle.getcode() == 200:
print '[+] 200 OK ;-)'
sys.stdout.flush()
else:
print '[-] HTTP is NOT OK :S' # Man, that sucks
sys.stdout.flush()
print '[+] Printing HTTP response headers...\n'
sys.stdout.flush()
x = dict(handle.info())
for k, v in x.iteritems():
print '[+] %s: %s' % (k, v)
sys.stdout.flush()
# Now resolve some internet protocol addresses!
ip = socket.gethostbyname(urlparse.urlparse(handle.geturl()).netloc)
print '\n[+] IP: ' + socket.gethostbyname(urlparse.urlparse(handle.geturl()).netloc)
sys.stdout.flush()
# And now get the hostname from the IP...
print '[+] Hostname: ' + str(socket.gethostbyaddr(ip)[0])
sys.stdout.flush()
# The most important part now...how big is the file?
print '[+] File size is ' + filesize(int(handle.info()['Content-Length']))
sys.stdout.flush()
# Get the newly created file name by splitting the URL and taking the last part in the flowing directory.
file_name = url.split('/')[-1]
try:
fp = open(file_name, 'wb') # Write binary
except IOError as e:
quit('[-] Failed to create a temporary file. Permissions? $ chmod. Cannot continue.')
sys.stdout.flush()
print '[+] Partial file created, write permissions are available.'
sys.stdout.flush()
# Determine the divider usign the file bytes.
if not int(handle.info()['Content-Length']):
print '[-] Unable to automatically determine the chunk size. Using the --chunk'
sys.stdout.flush()
if chunk == 8192:
print '[+] Chunk size set to default (8192)'
sys.stdout.flush()
bytes = int(handle.info()['Content-Length'])
if 0 <= bytes <= 1024:
divider = 1
elif 1024 <= bytes <= 10485760:
divider = 10
elif 10485760 <= bytes <= 104857600:
divider = 10
elif 104857600 <= bytes <= 1048576000:
divider = 100
elif 1048576000 <= bytes <= 10485760000:
divider = 1000
a = bytes/divider
chunk_size = int(round(a/1024)) # Kilobytes
print '[+] Chunk size automatically set to ' + str(chunk_size)
sys.stdout.flush()
print '[+] Download is ready to start. Press Enter to start.'
sys.stdout.flush()
raw_input('')
sys.stdout.flush()
time.sleep(1)
# Everything looks OK, download should start
dl_so_far = 0
start_time = time.time() # UNIX timestamp equal to php time();
i = 0
print '[+] Download started @ ' + date(start_time)
sys.stdout.flush()
try:
while True:
sys.stdout.flush()
block = handle.read(chunk_size) # Read into `block`
dl_so_far += len(block)
if len(block) >= bytes or len(block) == 0: # Download finished.
break # Exit out of loop
fp.write(block) # Write it to a file.
i = i+1
percent = float(dl_so_far) / bytes
percent = round(percent*100, 2)
time_passed = time.time() - start_time
already_loaded = float(i*chunk_size)
speed = already_loaded/1048576
speed = speed / time_passed
print '[%.2f%%][#%s] Downloaded %s of %s\t\t\t%.3f MB/s' % (percent, str(i), str(filesize(dl_so_far)), str(filesize(bytes)), speed)
sys.stdout.flush()
except socket.timeout:
quit('[-] Connection timed out. Aborted.')
print '[+] Finished @'
sys.stdout.flush()
print '[+] Download took '
sys.stdout.flush()
# Don't forget to os.rename(file_name + '.part', file_name)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
quit('^C received, exiting...')
| 35.98895 | 186 | 0.605772 |
acf46a87c9a31ed1fd29870bc2ab3e6376028536 | 12,540 | py | Python | src/bgdev/utils/vector.py | BenoitGielly/bgdev | 1b2454c9fe8da7cf0a68341519a5291d5f790e75 | [
"MIT"
] | 3 | 2021-08-25T02:32:10.000Z | 2021-11-09T01:47:13.000Z | src/bgdev/utils/vector.py | BenoitGielly/bgdev | 1b2454c9fe8da7cf0a68341519a5291d5f790e75 | [
"MIT"
] | null | null | null | src/bgdev/utils/vector.py | BenoitGielly/bgdev | 1b2454c9fe8da7cf0a68341519a5291d5f790e75 | [
"MIT"
] | 1 | 2021-10-06T23:21:00.000Z | 2021-10-06T23:21:00.000Z | """Utility methods to help dealing with vectors.
:created: 10/10/2016
:author: Benoit GIELLY <benoit.gielly@gmail.com>
"""
from __future__ import absolute_import
import math
from math import sin, sqrt
from maya import cmds, mel
from maya.api.OpenMaya import MMatrix, MVector
import pymel.core as pm
def get_matrix_from_transforms(position, normal, tangent):
"""Construct an MMatrix from position, normal and tangent.
Args:
position (list): XYZ position.
normal (list): The normal vector used to compute rotation.
tangent (list): The tangent vector used to compute rotation.
Returns:
MMatrix: The MMatrix array.
"""
nor = MVector(normal).normal()
tan = MVector(tangent).normal()
ort = nor ^ tan
pos = MVector(position)
matrix = MMatrix()
for row, vector in enumerate([nor, tan, ort, pos]):
for column, value in enumerate(vector):
matrix.setElement(row, column, value)
return matrix
def get_matrix_from_nodes(
nodes, middle=True, aim_vector=(1, 0, 0), up_vector=(0, 1, 0)
):
# pylint: disable=too-many-locals
"""Return a matrix based on given nodes.
If passed nodes are 1 or more than 3, it simply return the manipulator
position as a matrix. Otherwise, it'll use the second node as the aim axis
and the third as up.
Args:
nodes (list): list of nodes to get matrix
middle (bool): snap in between nodes 1 and 2 if True, else on first.
aim_vector (tuple): default aim vector for the aimConstraint.
up_vector (tuple): default up vector for the aimConstraint.
Returns:
list: matrix array.
"""
# query manipMove position if 1 or more than 3 selected
if len(nodes) == 1 or len(nodes) > 3:
return get_manipulator_xforms(as_matrix=True)
# else, use vectors and matrix to determine position and aim_vector
if len(nodes) == 2:
pt_a, pt_b = get_vectors(nodes)
else:
pt_a, pt_b, pt_c = get_vectors(nodes)
# get vectors from each points
pos = (pt_a + pt_b) / 2 if middle else pt_a
x_vec = (pt_b - pos).normal()
y_vec = MVector(up_vector) if len(nodes) == 2 else (pt_c - pos).normal()
z_vec = x_vec ^ y_vec.normal()
y_vec = z_vec ^ x_vec.normal()
# build vectors and vector_array
vector1, vector2 = MVector(aim_vector), MVector(up_vector)
vector3 = vector1 ^ vector2
vector_array = [[], [], []]
for each, vect in zip([vector1, vector2, vector3], [x_vec, y_vec, z_vec]):
j = [list(each).index(i) for i in each if i != 0][0]
vector_array[j] = list(each[j] * vect) + [0]
# flattens vector_array into one simple list and add position to it
return [y for _ in vector_array for y in _] + list(pos) + [1]
def get_manipulator_xforms(as_matrix=False):
"""Query the manipulator position and orientation.
Args:
as_matrix (bool): if True, returns a as_matrix built from manip xforms.
Returns:
list: list of "XYZ" position and rotation values
or matrix array if `as_matrix` is True.
"""
# forces the move manipulator
mel.eval("setToolTo $gMove;")
position = cmds.manipMoveContext("Move", query=True, position=True)
rotation = cmds.manipPivot(query=True, orientation=True)[0]
if as_matrix:
return from_euler(rotation, translate=position)
return [position, rotation]
def get_vectors(nodes, mode="xform"):
"""Generate world position vectors of each given nodes.
Args:
nodes (list): list of nodes to return position as vector.
mode (str): choose between default "xform" or "pivot" to get world position.
Yields:
maya.api.OpenMaya.MVector: MVector of the node's world position
"""
for each in nodes:
position = (0, 0, 0)
if mode == "xform":
position = cmds.xform(
each,
query=True,
translation=True,
worldSpace=True,
)
elif mode == "pivot":
position = cmds.xform(
each,
query=True,
translation=True,
rotatePivot=True,
worldSpace=True,
)
# when using xform on component like faces or edge, the returned value
# will be a list of each vertices position, so we need to average that
if len(position) > 3:
vectors = [
MVector(position[i : i + 3])
for i in range(0, len(position), 3)
]
result = MVector()
for vector in vectors:
result += vector
position = result / len(vectors)
yield MVector(position)
def from_euler(rotation, translate=(0, 0, 0), radians=False):
# pylint: disable=too-many-locals
"""Convert euler rotation into 3-axis matrix.
Args:
rotation (tuple): Rotation values to add to the matrix table.
translate (tuple): Translation values to add to the matrix table.
radians (bool): If True, converts degrees to radians.
Returns:
list: Matrix of given euler rotates, with translate if given.
"""
x_value, y_value, z_value = rotation
# convert to radians if degrees are passed
if radians is False:
x_value, y_value, z_value = map(
math.radians,
(x_value, y_value, z_value),
)
cos_x, sin_x = math.cos(x_value), math.sin(x_value)
cos_y, sin_y = math.cos(y_value), math.sin(y_value)
cos_z, sin_z = math.cos(z_value), math.sin(z_value)
x_vector = (
cos_y * cos_z,
cos_y * sin_z,
-sin_y,
0.0,
)
y_vector = (
sin_x * sin_y * cos_z - cos_x * sin_z,
sin_x * sin_y * sin_z + cos_x * cos_z,
sin_x * cos_y,
0.0,
)
z_vector = (
cos_x * sin_y * cos_z + sin_x * sin_z,
cos_x * sin_y * sin_z - sin_x * cos_z,
cos_x * cos_y,
0.0,
)
t_vector = (translate[0], translate[1], translate[2], 1.0)
return x_vector + y_vector + z_vector + t_vector
def get_closest_point(source, targets, furthest=False):
"""Find the closest node to the source of each targets.
Args:
source (str): source node to use as starting point for distance calculation.
targets (list): each nodes to process.
furthest (bool): If True, gets the furthest node instead.
Returns:
str: the target node that's the closest to the source.
"""
distance = float("inf") if not furthest else 0
position = cmds.xform(
source, query=True, translation=True, worldSpace=True
)
closest_node = None
for node in targets:
node_pos = cmds.xform(
node, query=True, translation=True, worldSpace=True
)
node_distance = (MVector(node_pos) - MVector(position)).length()
is_different = (
node_distance < distance
if not furthest
else node_distance > distance
)
if is_different:
closest_node = node
distance = node_distance
return closest_node
def get_distance_between(
node1,
node2,
distance_between=False,
bounding_box=False,
rotate_pivot=False,
):
"""Get the distance between two objects.
Args:
node1 (str): Node that determines start position
node2 (str): Node that determines end position
distance_between (bool): If True, creates a distance_between node,
query its value and delete it.
bounding_box (bool): If True, creates a distance_between node,
rotate_pivot (bool): If True, creates a distance_between node,
Returns:
float: distance between two given nodes.
"""
if distance_between:
return use_distance_between(node1, node2)
if bounding_box:
node1 = cmds.xform(
node1, query=True, bounding_box=True, worldSpace=True
)
node2 = cmds.xform(
node2, query=True, bounding_box=True, worldSpace=True
)
elif rotate_pivot:
node1 = cmds.xform(
node1, query=True, worldSpace=True, rotate_pivot=True
)
node2 = cmds.xform(
node2, query=True, worldSpace=True, rotate_pivot=True
)
else:
node1 = cmds.xform(
node1, query=True, translation=True, worldSpace=True
)
node2 = cmds.xform(
node2, query=True, translation=True, worldSpace=True
)
return (
(node1[0] - node2[0]) ** 2
+ (node1[1] - node2[1]) ** 2
+ (node1[2] - node2[2]) ** 2
) ** 0.5
def use_distance_between(node1, node2):
"""Use a distance between node to get the distance between two nodes."""
dist = cmds.createNode("distanceBetween")
cmds.connectAttr(node1 + ".worldMatrix[0]", dist + ".inMatrix1")
cmds.connectAttr(node2 + ".worldMatrix[0]", dist + ".inMatrix2")
value = cmds.getAttr(dist + ".distance")
cmds.delete(dist)
return value
def aim_in_plane(positions, aim_vector=(1, 0, 0), up_vector=(0, 1, 0)):
"""Align selected locators based on plane made of the first and last."""
# pylint: disable=too-many-locals
# create nulls and snap them to given positions
nulls = []
for pos in positions:
null = pm.createNode("transform")
pm.xform(null, translation=pos, worldSpace=True)
nulls.append(null)
locator = pm.spaceLocator()
locator.setMatrix(nulls[0].getMatrix(worldSpace=True))
# reverse vectors if we're on the right side (YZ plane)
x_axis = locator.getTranslation(space="world")[0]
if x_axis < 0:
aim_vector = [-1 * x for x in aim_vector]
up_vector = [-1 * x for x in up_vector]
# aim to nulls[2]
pm.delete(
pm.aimConstraint(
nulls[-1],
locator,
maintainOffset=False,
aimVector=aim_vector,
upVector=up_vector,
worldUpObject=nulls[1],
worldUpType="object",
),
)
# find AH distance
index = len(nulls) // 2
pt_a = pm.datatypes.Point(nulls[0].getTranslation(space="world"))
pt_b = pm.datatypes.Point(nulls[index].getTranslation(space="world"))
pt_c = pm.datatypes.Point(nulls[-1].getTranslation(space="world"))
c_side = pt_b - pt_a
b_side = pt_c - pt_a
height = sin(c_side.angle(b_side)) * c_side.length()
ah_dist = sqrt(pow(c_side.length(), 2) - pow(height, 2))
# offset by ah_dist along aim axis
ah_values = [ah_dist * x for x in aim_vector]
pm.move(
locator,
*ah_values,
relative=True,
objectSpace=True,
worldSpaceDistance=True
)
# re-orient properly
pm.delete(
pm.aimConstraint(
nulls[index],
locator,
maintainOffset=False,
aimVector=aim_vector,
upVector=up_vector,
worldUpObject=nulls[0],
worldUpType="object",
),
)
# move forward by half of AC
ac_values = [b_side.length() * x for x in aim_vector]
pm.move(
locator,
*ac_values,
relative=True,
objectSpace=True,
worldSpaceDistance=True
)
# orient the base locator
for i, each in enumerate(nulls, 1):
if i < len(nulls):
tmp = pm.spaceLocator()
tmp.setMatrix(each.getMatrix(worldSpace=True))
aim = pm.aimConstraint(
nulls[i],
tmp,
maintainOffset=False,
aimVector=aim_vector,
upVector=up_vector,
worldUpObject=locator,
worldUpType="object",
)
orientation = pm.xform(
tmp, query=True, worldSpace=True, rotation=True
)
pm.delete(aim, tmp)
pm.xform(each, rotation=orientation, worldSpace=True)
else:
tmp = pm.spaceLocator()
pm.parent(tmp, nulls[-2])
tmp.resetFromRestPosition()
orientation = pm.xform(
tmp, query=True, worldSpace=True, rotation=True
)
pm.xform(each, rotation=orientation, worldSpace=True)
pm.delete(tmp)
# cleanup and return
matrices = [
cmds.xform(x.name(), query=True, matrix=True, worldSpace=True)
for x in nulls
]
pm.delete(locator, nulls)
return matrices
| 29.64539 | 84 | 0.598246 |
acf46b2b04d25134c7d8e56845c5499ee1eaed44 | 6,919 | py | Python | localflavor/ar/forms.py | MehdioKhan/django-localflavor | 7cb223bf801ebc7659cc314a8a870e47e5004488 | [
"BSD-3-Clause"
] | 1 | 2018-11-28T22:08:17.000Z | 2018-11-28T22:08:17.000Z | localflavor/ar/forms.py | DalavanCloud/django-localflavor | b78df3bbfa5e07e3f6b78a09d43c45eb39fa1196 | [
"BSD-3-Clause"
] | null | null | null | localflavor/ar/forms.py | DalavanCloud/django-localflavor | b78df3bbfa5e07e3f6b78a09d43c45eb39fa1196 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""AR-specific Form helpers."""
from __future__ import unicode_literals
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField, Select
from django.utils.translation import ugettext_lazy as _
from .ar_provinces import PROVINCE_CHOICES
class ARProvinceSelect(Select):
"""A Select widget that uses a list of Argentinean provinces/autonomous cities as its choices."""
def __init__(self, attrs=None):
super(ARProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class ARPostalCodeField(RegexField):
"""
A field that accepts a 'classic' NNNN Postal Code or a CPA.
See:
* http://www.correoargentino.com.ar/cpa/que_es
* http://www.correoargentino.com.ar/cpa/como_escribirlo
"""
default_error_messages = {
'invalid': _("Enter a postal code in the format NNNN or ANNNNAAA."),
}
def __init__(self, max_length=8, min_length=4, *args, **kwargs):
super(ARPostalCodeField, self).__init__(
r'^\d{4}$|^[A-HJ-NP-Za-hj-np-z]\d{4}\D{3}$',
max_length=max_length, min_length=min_length, *args, **kwargs
)
def clean(self, value):
value = super(ARPostalCodeField, self).clean(value)
if value in self.empty_values:
return self.empty_value
if len(value) not in (4, 8):
raise ValidationError(self.error_messages['invalid'])
if len(value) == 8:
return '%s%s%s' % (value[0].upper(), value[1:5], value[5:].upper())
return value
class ARDNIField(CharField):
"""A field that validates 'Documento Nacional de Identidad' (DNI) numbers."""
default_error_messages = {
'invalid': _("This field requires only numbers."),
'max_digits': _("This field requires 7 or 8 digits."),
}
def __init__(self, max_length=10, min_length=7, *args, **kwargs):
super(ARDNIField, self).__init__(max_length=max_length, min_length=min_length, *args, **kwargs)
def clean(self, value):
"""Value can be a string either in the [X]X.XXX.XXX or [X]XXXXXXX formats."""
value = super(ARDNIField, self).clean(value)
if value in self.empty_values:
return self.empty_value
if not value.isdigit():
value = value.replace('.', '')
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'])
if len(value) not in (7, 8):
raise ValidationError(self.error_messages['max_digits'])
return value
class ARCUITField(RegexField):
"""
This field validates a CUIT (Código Único de Identificación Tributaria).
A CUIT is of the form XX-XXXXXXXX-V. The last digit is a check digit.
More info:
http://es.wikipedia.org/wiki/Clave_%C3%9Anica_de_Identificaci%C3%B3n_Tributaria
Info in English:
http://www.justlanded.com/english/Argentina/Argentina-Guide/Visas-Permits/Other-Legal-Documents
.. versionchanged:: 2.1
``ARCUITField`` now also accepts CUIT with prefix 34.
"""
default_error_messages = {
'invalid': _('Enter a valid CUIT in XX-XXXXXXXX-X or XXXXXXXXXXXX format.'),
'checksum': _("Invalid CUIT."),
'legal_type': _('Invalid legal type. Type must be 27, 20, 30, 23, 24, 33 or 34.'),
}
def __init__(self, *args, **kwargs):
super(ARCUITField, self).__init__(r'^\d{2}-?\d{8}-?\d$', *args, **kwargs)
def clean(self, value):
"""Value can be either a string in the format XX-XXXXXXXX-X or an 11-digit number."""
value = super(ARCUITField, self).clean(value)
if value in self.empty_values:
return self.empty_value
value, cd = self._canon(value)
if not value[:2] in ['27', '20', '30', '23', '24', '33', '34']:
raise ValidationError(self.error_messages['legal_type'])
if self._calc_cd(value) != cd:
raise ValidationError(self.error_messages['checksum'])
return self._format(value, cd)
def _canon(self, cuit):
cuit = cuit.replace('-', '')
return cuit[:-1], cuit[-1]
def _calc_cd(self, cuit):
# Calculation code based on:
# http://es.wikipedia.org/wiki/C%C3%B3digo_%C3%9Anico_de_Identificaci%C3%B3n_Tributaria
mults = (5, 4, 3, 2, 7, 6, 5, 4, 3, 2)
tmp = sum([m * int(cuit[idx]) for idx, m in enumerate(mults)])
result = 11 - (tmp % 11)
if result == 11:
result = 0
elif result == 10:
result = 9
return str(result)
def _format(self, cuit, check_digit=None):
if check_digit is None:
check_digit = cuit[-1]
cuit = cuit[:-1]
return '%s-%s-%s' % (cuit[:2], cuit[2:], check_digit)
class ARCBUField(CharField):
"""
This field validates a CBU (Clave Bancaria Uniforme).
A CBU is a 22-digits long number. The first 8 digits denote bank and branch number,
plus a verifying digit. The remaining 14 digits denote an account number, plus a verifying digit.
More info:
https://es.wikipedia.org/wiki/Clave_Bancaria_Uniforme
.. versionadded:: 1.3
"""
default_error_messages = {
'invalid': _('Enter a valid CBU in XXXXXXXXXXXXXXXXXXXXXX format.'),
'max_length': _('CBU must be exactly 22 digits long.'),
'min_length': _('CBU must be exactly 22 digits long.'),
'checksum': _('Invalid CBU.'),
}
def __init__(self, *args, **kwargs):
kwargs['min_length'] = kwargs['max_length'] = 22
super(ARCBUField, self).__init__(*args, **kwargs)
def _valid_block(self, block, ponderator):
number = block[:-1]
v_digit = int(block[-1])
block_sum = sum(x * int(y) for x, y in zip(ponderator, number))
remainder = block_sum % 10
# The verification digit and the result of the calculation must be the same.
# In the edge case that the remainder is 0, the verification digit must be 0 too.
if remainder == 0:
return v_digit == remainder
return v_digit == (10 - remainder)
def _checksum(self, value):
block_1 = value[0:8]
block_2 = value[8:22]
ponderator_1 = (9, 7, 1, 3, 9, 7, 1, 3)
ponderator_2 = (3, 9, 7, 1, 3, 9, 7, 1, 3, 9, 7, 1, 3)
is_valid_1 = self._valid_block(block_1, ponderator_1)
is_valid_2 = self._valid_block(block_2, ponderator_2)
return is_valid_1 and is_valid_2
def clean(self, value):
"""Value must be a 22 digits long number."""
value = super(ARCBUField, self).clean(value)
if value in self.empty_values:
return self.empty_value
if not value.isdigit():
raise ValidationError(self.error_messages['invalid'])
if not self._checksum(value):
raise ValidationError(self.error_messages['checksum'])
return value
| 35.121827 | 103 | 0.62379 |
acf46b9fef249f8bb38b03973ec1919586a6da42 | 4,588 | py | Python | xcube/api/readwrite.py | dzelge/xcube | 1e5049a227df4a50435d9aac6aacf2bcbaa3e2dd | [
"MIT"
] | null | null | null | xcube/api/readwrite.py | dzelge/xcube | 1e5049a227df4a50435d9aac6aacf2bcbaa3e2dd | [
"MIT"
] | null | null | null | xcube/api/readwrite.py | dzelge/xcube | 1e5049a227df4a50435d9aac6aacf2bcbaa3e2dd | [
"MIT"
] | null | null | null | from contextlib import contextmanager
import xarray as xr
from .verify import assert_cube
from xcube.util.dsio import find_dataset_io, guess_dataset_format
@contextmanager
def open_cube(input_path: str,
format_name: str = None,
**kwargs) -> xr.Dataset:
"""
The ``read_cube`` function as context manager that auto-closes the cube read.
:param input_path: input path
:param format_name: format, e.g. "zarr" or "netcdf4"
:param kwargs: format-specific keyword arguments
:return: data cube
"""
dataset = read_cube(input_path, format_name, **kwargs)
try:
yield dataset
finally:
dataset.close()
def read_cube(input_path: str,
format_name: str = None,
**kwargs) -> xr.Dataset:
"""
Read a data cube from *input_path*.
If *format* is not provided it will be guessed from *input_path*.
:param input_path: input path
:param format_name: format, e.g. "zarr" or "netcdf4"
:param kwargs: format-specific keyword arguments
:return: data cube
"""
return read_dataset(input_path, format_name=format_name, is_cube=True, **kwargs)
def write_cube(cube: xr.Dataset,
output_path: str,
format_name: str = None,
cube_asserted: bool = False,
**kwargs) -> xr.Dataset:
"""
Write a data cube to *output_path*.
If *format* is not provided it will be guessed from *output_path*.
:param cube: Data cube to be written.
:param output_path: output path
:param format_name: format, e.g. "zarr" or "netcdf4"
:param kwargs: format-specific keyword arguments
:param cube_asserted: If False, *cube* will be verified, otherwise it is expected to be a valid cube.
:return: data cube *cube*
"""
if not cube_asserted:
assert_cube(cube)
return write_dataset(cube, output_path, format_name=format_name, **kwargs)
@contextmanager
def open_dataset(input_path: str,
format_name: str = None,
is_cube: bool = False,
**kwargs) -> xr.Dataset:
"""
The ``read_dataset`` function as context manager that auto-closes the dataset read.
:param input_path: input path
:param format_name: format, e.g. "zarr" or "netcdf4"
:param is_cube: Weather a ValueError will be raised, if the dataset read from *input_path* is not a data cube.
:param kwargs: format-specific keyword arguments
:return: dataset object
"""
dataset = read_dataset(input_path, format_name, is_cube=is_cube, **kwargs)
try:
yield dataset
finally:
dataset.close()
def read_dataset(input_path: str,
format_name: str = None,
is_cube: bool = False,
**kwargs) -> xr.Dataset:
"""
Read dataset from *input_path*.
If *format* is not provided it will be guessed from *output_path*.
:param input_path: input path
:param format_name: format, e.g. "zarr" or "netcdf4"
:param is_cube: Weather a ValueError will be raised, if the dataset read from *input_path* is not a data cube.
:param kwargs: format-specific keyword arguments
:return: dataset object
"""
format_name = format_name if format_name else guess_dataset_format(input_path)
if format_name is None:
raise ValueError("Unknown input format")
dataset_io = find_dataset_io(format_name, modes=["r"])
if dataset_io is None:
raise ValueError(f"Unknown input format {format_name!r} for {input_path}")
dataset = dataset_io.read(input_path, **kwargs)
if is_cube:
assert_cube(dataset)
return dataset
def write_dataset(dataset: xr.Dataset,
output_path: str,
format_name: str = None,
**kwargs) -> xr.Dataset:
"""
Write dataset to *output_path*.
If *format* is not provided it will be guessed from *output_path*.
:param dataset: Dataset to be written.
:param output_path: output path
:param format_name: format, e.g. "zarr" or "netcdf4"
:param kwargs: format-specific keyword arguments
:return: the input dataset
"""
format_name = format_name if format_name else guess_dataset_format(output_path)
if format_name is None:
raise ValueError("Unknown output format")
dataset_io = find_dataset_io(format_name, modes=["w"])
if dataset_io is None:
raise ValueError(f"Unknown output format {format_name!r} for {output_path}")
dataset_io.write(dataset, output_path, **kwargs)
return dataset
| 33.985185 | 114 | 0.657585 |
acf46d05bf80892316e63b4125dd91b6e9589369 | 709 | py | Python | credsweeper/credentials/candidate_key.py | ARKAD97/CredSweeper | 0f613cded13d6c28c19c57eac54dd245b2c318ea | [
"MIT"
] | 1 | 2022-03-03T18:11:59.000Z | 2022-03-03T18:11:59.000Z | credsweeper/credentials/candidate_key.py | shadowscatcher/CredSweeper | 0387ed76aca4a12154e15c49db8dc0901a014275 | [
"MIT"
] | null | null | null | credsweeper/credentials/candidate_key.py | shadowscatcher/CredSweeper | 0387ed76aca4a12154e15c49db8dc0901a014275 | [
"MIT"
] | null | null | null | from typing import Tuple
from credsweeper.credentials.line_data import LineData
class CandidateKey:
"""Class used to identify credential candidates. Candidates that detected same value on same string in a same file
would have identical CandidateKey"""
def __init__(self, line_data: LineData):
self.path: str = line_data.path
self.line_num: int = line_data.line_num
self.value: str = line_data.value
self.key: Tuple[str, int, str] = (self.path, self.line_num, self.value)
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
return self.key == other.key
def __ne__(self, other):
return not (self == other)
| 30.826087 | 118 | 0.67842 |
acf46d6484196802eca187487ed73ffb488ce4ea | 469 | py | Python | dvc/repo/get_url.py | Njuhobby/dvc | 948633782e79f7c7af29a36f010c57b439c95f16 | [
"Apache-2.0"
] | null | null | null | dvc/repo/get_url.py | Njuhobby/dvc | 948633782e79f7c7af29a36f010c57b439c95f16 | [
"Apache-2.0"
] | null | null | null | dvc/repo/get_url.py | Njuhobby/dvc | 948633782e79f7c7af29a36f010c57b439c95f16 | [
"Apache-2.0"
] | null | null | null | import os
import dvc.dependency as dependency
import dvc.output as output
from dvc.utils import resolve_output
def get_url(url, out=None):
out = resolve_output(url, out)
if os.path.exists(url):
url = os.path.abspath(url)
out = os.path.abspath(out)
(dep,) = dependency.loads_from(None, [url])
(out,) = output.loads_from(None, [out], use_cache=False)
if not dep.exists:
raise dep.DoesNotExistError(dep)
dep.download(out)
| 22.333333 | 60 | 0.678038 |
acf46e311a78410608889e001d8b71771ef72938 | 3,433 | py | Python | tests/test_npy.py | yumorozov/scikit-learn-intelex | 7a39c0a0e208b49f209168b01fb50206f962175f | [
"Apache-2.0"
] | 1 | 2021-12-24T16:53:01.000Z | 2021-12-24T16:53:01.000Z | tests/test_npy.py | yumorozov/scikit-learn-intelex | 7a39c0a0e208b49f209168b01fb50206f962175f | [
"Apache-2.0"
] | null | null | null | tests/test_npy.py | yumorozov/scikit-learn-intelex | 7a39c0a0e208b49f209168b01fb50206f962175f | [
"Apache-2.0"
] | null | null | null | #===============================================================================
# Copyright 2014 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
import unittest
import numpy as np
import daal4py as d4p
dv = d4p._get__daal_link_version__
daal_version = tuple(map(int, (dv()[0:4], dv()[4:8])))
class Test(unittest.TestCase):
def test_non_contig(self):
from numpy.random import rand
p = 10007
nx = 1017
ny = 77
X = rand(p + 1, nx + 1)
Xp = rand(p + 1, nx + 1)
y = rand(p + 1, ny + 1)
Xn = X[:p, :nx]
Xpn = Xp[:p, :nx]
yn = y[:p, :ny]
Xc = np.ascontiguousarray(Xn)
Xpc = np.ascontiguousarray(Xpn)
yc = np.ascontiguousarray(yn)
self.assertTrue(all([not Xn.flags['C_CONTIGUOUS'],
not Xpn.flags['C_CONTIGUOUS'],
not yn.flags['C_CONTIGUOUS']]))
self.assertTrue(all([Xc.flags['C_CONTIGUOUS'],
Xpc.flags['C_CONTIGUOUS'],
yc.flags['C_CONTIGUOUS']]))
self.assertTrue(all([np.allclose(Xc, Xn),
np.allclose(Xpc, Xpn),
np.allclose(yc, yn)]))
regr_train = d4p.linear_regression_training()
rtc = regr_train.compute(Xc, yc)
regr_predict = d4p.linear_regression_prediction()
rpc = regr_predict.compute(Xpc, rtc.model)
regr_train = d4p.linear_regression_training()
rtn = regr_train.compute(Xn, yn)
regr_predict = d4p.linear_regression_prediction()
rpn = regr_predict.compute(Xpn, rtn.model)
self.assertTrue(np.allclose(rpn.prediction, rpc.prediction))
def test_struct(self):
sdata = np.array([(0.5, -1.3, 1, 100.11, 1111111),
(2.5, -3.3, 2, 200.22, 2222222),
(4.5, -5.3, 2, 350.33, 3333333),
(6.5, -7.3, 0, 470.44, 4444444),
(8.5, -9.3, 1, 270.55, 55555)],
dtype=[('x', 'f4'),
('y', 'f4'),
('categ', 'i4'),
('value', 'f8'),
('super', 'i8')])
hdata = np.array([(0.5, -1.3, 1, 100.11, 1111111),
(2.5, -3.3, 2, 200.22, 2222222),
(4.5, -5.3, 2, 350.33, 3333333),
(6.5, -7.3, 0, 470.44, 4444444),
(8.5, -9.3, 1, 270.55, 55555)],
dtype=np.float64)
sr = d4p.cosine_distance().compute(sdata)
hr = d4p.cosine_distance().compute(hdata)
self.assertTrue(np.allclose(hr.cosineDistance, sr.cosineDistance))
if __name__ == '__main__':
unittest.main()
| 41.361446 | 80 | 0.499563 |
acf46fb5a52aa60fae692bb34f5a412052fde923 | 4,816 | py | Python | robosuite/wrappers/gym_wrapper.py | snasiriany/robosuite | 3e7c58362c78811b95fa3ae8e00eea212a411d70 | [
"MIT"
] | null | null | null | robosuite/wrappers/gym_wrapper.py | snasiriany/robosuite | 3e7c58362c78811b95fa3ae8e00eea212a411d70 | [
"MIT"
] | null | null | null | robosuite/wrappers/gym_wrapper.py | snasiriany/robosuite | 3e7c58362c78811b95fa3ae8e00eea212a411d70 | [
"MIT"
] | null | null | null | """
This file implements a wrapper for facilitating compatibility with OpenAI gym.
This is useful when using these environments with code that assumes a gym-like
interface.
"""
import numpy as np
from gym import spaces
from gym.core import Env
from robosuite.wrappers import Wrapper
class GymWrapper(Wrapper, Env):
"""
Initializes the Gym wrapper. Mimics many of the required functionalities of the Wrapper class
found in the gym.core module
Args:
env (MujocoEnv): The environment to wrap.
keys (None or list of str): If provided, each observation will
consist of concatenated keys from the wrapped environment's
observation dictionary. Defaults to proprio-state and object-state.
Raises:
AssertionError: [Object observations must be enabled if no keys]
"""
def __init__(self, env, keys=None):
# Run super method
super().__init__(env=env)
# Create name for gym
robots = "".join([type(robot.robot_model).__name__ for robot in self.env.robots])
self.name = robots + "_" + type(self.env).__name__
# Get reward range
self.reward_range = (0, self.env.reward_scale)
if keys is None:
keys = []
# Add object obs if requested
if self.env.use_object_obs:
keys += ["object-state"]
# Add image obs if requested
if self.env.use_camera_obs:
keys += [f"{cam_name}_image" for cam_name in self.env.camera_names]
# Iterate over all robots to add to state
for idx in range(len(self.env.robots)):
keys += ["robot{}_proprio-state".format(idx)]
self.keys = keys
# Gym specific attributes
self.env.spec = None
self.metadata = None
# set up observation and action spaces
obs = self.env.reset()
self.modality_dims = {key: obs[key].shape for key in self.keys}
flat_ob = self._flatten_obs(obs)
self.obs_dim = flat_ob.size
high = np.inf * np.ones(self.obs_dim)
low = -high
self.observation_space = spaces.Box(low=low, high=high)
low, high = self.env.action_spec
self.action_space = spaces.Box(low=low, high=high)
def _flatten_obs(self, obs_dict, verbose=False):
"""
Filters keys of interest out and concatenate the information.
Args:
obs_dict (OrderedDict): ordered dictionary of observations
verbose (bool): Whether to print out to console as observation keys are processed
Returns:
np.array: observations flattened into a 1d array
"""
ob_lst = []
for key in self.keys:
if key in obs_dict:
if verbose:
print("adding key: {}".format(key))
ob_lst.append(np.array(obs_dict[key]).flatten())
return np.concatenate(ob_lst)
def reset(self):
"""
Extends env reset method to return flattened observation instead of normal OrderedDict.
Returns:
np.array: Flattened environment observation space after reset occurs
"""
ob_dict = self.env.reset()
return self._flatten_obs(ob_dict)
def step(self, action, **kwargs):
"""
Extends vanilla step() function call to return flattened observation instead of normal OrderedDict.
Args:
action (np.array): Action to take in environment
Returns:
4-tuple:
- (np.array) flattened observations from the environment
- (float) reward from the environment
- (bool) whether the current episode is completed or not
- (dict) misc information
"""
ob_dict, reward, done, info = self.env.step(action, **kwargs)
return self._flatten_obs(ob_dict), reward, done, info
def seed(self, seed=None):
"""
Utility function to set numpy seed
Args:
seed (None or int): If specified, numpy seed to set
Raises:
TypeError: [Seed must be integer]
"""
# Seed the generator
if seed is not None:
try:
np.random.seed(seed)
except:
TypeError("Seed must be an integer type!")
def compute_reward(self, achieved_goal, desired_goal, info):
"""
Dummy function to be compatible with gym interface that simply returns environment reward
Args:
achieved_goal: [NOT USED]
desired_goal: [NOT USED]
info: [NOT USED]
Returns:
float: environment reward
"""
# Dummy args used to mimic Wrapper interface
return self.env.reward()
| 33.444444 | 107 | 0.600083 |
acf4703e2a9b12d21ea467c80c2a24b1674b7136 | 461 | py | Python | test_ci.py | mkdryden/obs-websocket-py-trio | 9583e3886afa820adfdc711bfaf91723658c6453 | [
"MIT"
] | null | null | null | test_ci.py | mkdryden/obs-websocket-py-trio | 9583e3886afa820adfdc711bfaf91723658c6453 | [
"MIT"
] | null | null | null | test_ci.py | mkdryden/obs-websocket-py-trio | 9583e3886afa820adfdc711bfaf91723658c6453 | [
"MIT"
] | null | null | null | from obswebsocket import ObsWS, requests, events
from trio import open_nursery
host = "127.0.0.1"
port = 4444
password = "secret"
def test_load():
with open_nursery() as n:
_ = ObsWS(n, host, port, password)
# Just test everything is ok with the object...
def test_build_ok_requests():
r = requests.GetVersion()
assert r.name == "GetVersion"
def test_build_ok_events():
e = events.Heartbeat()
assert e.name == "Heartbeat"
| 20.043478 | 51 | 0.67679 |
acf470db71f6798c0b0a1f199864bc35bc1eb799 | 1,759 | py | Python | Two-pointer/Sorting/Partition.py | Awesomeyaya/Leetcode-Two-pointer | 15cd0a73f5abc4d0d19d18c231750d31dc839dbe | [
"MIT"
] | null | null | null | Two-pointer/Sorting/Partition.py | Awesomeyaya/Leetcode-Two-pointer | 15cd0a73f5abc4d0d19d18c231750d31dc839dbe | [
"MIT"
] | null | null | null | Two-pointer/Sorting/Partition.py | Awesomeyaya/Leetcode-Two-pointer | 15cd0a73f5abc4d0d19d18c231750d31dc839dbe | [
"MIT"
] | 1 | 2018-10-29T17:33:52.000Z | 2018-10-29T17:33:52.000Z | '''
Partition tempelet: O(n)
pivot 左边 <pivot
pivot 右边 >= pivot
'''
def Partition(self,nums,start,end):
if start >= end:
return
mid = (start+end)//2
left, right, pivot = start, end, nums[mid]
while left <= right:
while left <= right and nums[left] < pivot:
left += 1
while left <= right and nums[right] >= pivot:
right -= 1
if left <= right:
nums[left],nums[right] = nums[right],nums[left]
left += 1
right -=1
'''
应用1: Find kth largest element
等于 find (length -k)th smallesr element
依旧是partition, 每次partition的right如果比k小,说明nums[k] < nums[left], 在nums[start,left]继续partition.
直到start == end 时 return nums[k],说明找到了。
'''
class Solution:
"""
@param n: An integer
@param nums: An array
@return: the Kth largest element
"""
def kthLargestElement(self, n, nums):
# write your code here
if not nums or n < 1 or n > len(nums):
return 0
return self.partition(nums,0,len(nums)-1,len(nums)-n)
def partition(self,nums,start,end,n):
if start == end:
return nums[n]
mid = (start+end)//2
left, right, pivot = start, end, nums[mid]
while left<= right:
while left <= right and nums[left] < pivot:
left += 1
while left <= right and nums[right] > pivot:
right -= 1
if left <= right:
nums[left],nums[right] = nums[right],nums[left]
left, right = left+1, right-1
if n <= right:
return self.partition(nums,start,right,n)
if n >= left:
return self.partition(nums,left,end,n)
return nums[n]
| 28.836066 | 91 | 0.536669 |
acf47118fc70074b68d1c70df90dc2b46e3dfd57 | 14,181 | py | Python | descqa/DeltaSigmaTest.py | AleksCipri/descqa | 1b692ea846d8a95dc98014881d2ad5cc19c94ee5 | [
"BSD-3-Clause"
] | null | null | null | descqa/DeltaSigmaTest.py | AleksCipri/descqa | 1b692ea846d8a95dc98014881d2ad5cc19c94ee5 | [
"BSD-3-Clause"
] | 1 | 2018-08-28T02:40:57.000Z | 2018-08-28T11:37:43.000Z | descqa/DeltaSigmaTest.py | AleksCipri/descqa | 1b692ea846d8a95dc98014881d2ad5cc19c94ee5 | [
"BSD-3-Clause"
] | null | null | null | import os
import numpy as np
import treecorr
from scipy.interpolate import interp1d
from astropy import units as u
from astropy.coordinates import SkyCoord, search_around_sky
import astropy.constants as cst
from astropy.cosmology import WMAP7 # pylint: disable=no-name-in-module
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['DeltaSigma']
class DeltaSigma(BaseValidationTest):
"""
This validation test looks at galaxy-shear correlations by comparing DeltaSigma.
"""
def __init__(self, **kwargs):
# pylint: disable=super-init-not-called
# validation data
validation_filepath = os.path.join(self.data_dir, kwargs['data_filename'])
self.data = kwargs['data']
self.zmin_l = kwargs['zmin_l']
self.zmax_l = kwargs['zmax_l']
self.zmin_s = kwargs['zmin_s']
self.zmax_s = kwargs['zmax_s']
self.max_background_galaxies = int(float(kwargs['max_background_galaxies']))
self.zmax = kwargs['zmax']
self.Rmin = kwargs['Rmin']
self.Rmax = kwargs['Rmax']
self.nR = kwargs['nR']
self.validation_data = np.loadtxt(validation_filepath)
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
# pylint: disable=no-member
# Try to read cosmology from catalog, otherwise defualts to WMAP7
try:
cosmo = catalog_instance.cosmology
except AttributeError:
cosmo = WMAP7
# Create interpolation tables for efficient computation of sigma crit
z = np.linspace(0, self.zmax, self.zmax*100)
d1 = cosmo.angular_diameter_distance(z) # in Mpc
angular_diameter_distance = interp1d(z, d1, kind='quadratic')
d2 = cosmo.comoving_transverse_distance(z) # in Mpc
comoving_transverse_distance = interp1d(z, d2, kind='quadratic')
# Now figure out the lenses, for the validation data available,
# each have slightly non-trivial cuts, so we do them separately... not totally ideal
if self.data == 'sdss_lowz':
# Singh et al (2015) (http://adsabs.harvard.edu/abs/2015MNRAS.450.2195S) measurements on the SDSS LOWZ sample.
res = catalog_instance.get_quantities(['redshift_true', 'ra', 'dec', 'shear_1', 'shear_2',
'mag_true_i_sdss', 'mag_true_z_sdss','mag_true_g_sdss', 'mag_true_r_sdss'])
# Compute mask for lowz sample
# These cuts are defined in section 3 of https://arxiv.org/pdf/1509.06529.pdf
# and summarised here: http://www.sdss.org/dr14/algorithms/boss_galaxy_ts/#TheBOSSLOWZGalaxySample
# Definition of auxiliary colors:
cperp = (res['mag_true_r_sdss'] - res['mag_true_i_sdss']) - (res['mag_true_g_sdss'] - res['mag_true_r_sdss'])/4.0 - 0.18
cpar = 0.7*(res['mag_true_g_sdss'] - res['mag_true_r_sdss']) + 1.2*((res['mag_true_r_sdss'] - res['mag_true_i_sdss'])-0.18)
# LOWZ selection cuts:
mask_lens = np.abs(cperp) < 0.2 # color boundaries
mask_lens &= res['mag_true_r_sdss'] < (13.5 + cpar/0.3) # sliding magnitude cut
mask_lens &= (res['mag_true_r_sdss'] > 16) &(res['mag_true_r_sdss'] < 19.6)
# Additional redshift cuts used in Singh et al. (2015)
mask_lens &= (res['redshift_true'] > self.zmin_l) & (res['redshift_true'] < self.zmax_l)
Mask_lens = [mask_lens]
fig = plt.figure()
if self.data == 'cfhtlens':
res = catalog_instance.get_quantities(['redshift_true', 'ra', 'dec', 'shear_1', 'shear_2',
'Mag_true_g_lsst_z0', 'Mag_true_r_lsst_z0'])
Mr_min = np.array([-21.0,-22.0,-23.0,-24.0])
Mr_max = np.array([-20.0,-21.5,-22.5,-23.5])
blue_frac = np.array([0.7,0.32,0.11,0.03])*100
gr = res['Mag_true_g_lsst_z0'] - res['Mag_true_r_lsst_z0'] # larger number means redder
Mask_lens = []
for i in range(4):
mask_lens = (res['redshift_true']>self.zmin_l) & (res['redshift_true']<self.zmax_l) & (res['Mag_true_r_lsst_z0']>Mr_min[i]) & (res['Mag_true_r_lsst_z0']<Mr_max[i])
gr_threshold = np.percentile(gr[mask_lens], blue_frac[i])
Mask_lens.append(mask_lens & (gr>gr_threshold))
Mask_lens.append(mask_lens & (gr<gr_threshold))
fig1 = plt.figure(1, figsize=(12,9))
fig2 = plt.figure(2, figsize=(12,5))
if self.data == 'sdss_main':
res = catalog_instance.get_quantities(['redshift_true', 'ra', 'dec', 'shear_1', 'shear_2',
'mag_true_i_sdss', 'mag_true_z_sdss','mag_true_g_sdss', 'mag_true_r_sdss', 'stellar_mass_bulge', 'stellar_mass_disk','Mag_true_g_sdss_z0','Mag_true_r_sdss_z0'])
gr = res['Mag_true_g_sdss_z0'] - res['Mag_true_r_sdss_z0'] # larger number means redder
sm = res['stellar_mass_bulge'] + res['stellar_mass_disk']
SM_min = np.array([10,10.7,11.2,11.6])
SM_max = np.array([10.4,11.0,11.4,15.0])
Mask_lens = []
for i in range(4):
mask_lens = (res['redshift_true']>self.zmin_l) & (res['redshift_true']<self.zmax_l) & (res['mag_true_r_sdss']< 17.7) & (np.log10(sm)>SM_min[i]) & (np.log10(sm)<SM_max[i])
Mask_lens.append(mask_lens & (gr>0.7)) # for the data, 0.7 is used for k-correct colors at z=0.1
Mask_lens.append(mask_lens & (gr<0.7))
fig1 = plt.figure(1, figsize=(12,9))
fig2 = plt.figure(2, figsize=(12,5))
# Computing mask for source sample, this only serves to keep the number of galaxies managable
mask_source = (res['redshift_true'] > self.zmin_s) & (res['redshift_true'] < self.zmax_s)
inds = np.where(mask_source)[0]
if len(inds) > int(self.max_background_galaxies):
mask_source[inds[np.random.choice(len(inds),
size=len(inds) - int(self.max_background_galaxies),
replace=False)]] = False
coords = SkyCoord(ra=res['ra']*u.degree, dec=res['dec']*u.degree)
coords_s = coords[mask_source]
# run gammat in thin redshift bins, loop over lens bins of different stellar mass and colors
for i in range(len(Mask_lens)):
nlens = len(np.where(Mask_lens[i])[0]) / catalog_instance.sky_area
with open(os.path.join(output_dir, 'galaxy_density_'+str(self.data)+'.dat'), 'a') as f:
f.write('{} \n'.format(nlens))
# Create astropy coordinate objects
coords_l = coords[Mask_lens[i]]
# Search for neighbours
idx1, idx2, sep2d, _ = search_around_sky(coords_l, coords_s, 3.*u.deg)
# Computing sigma crit for each pair
zl = res['redshift_true'][Mask_lens[i]][idx1]
zs = res['redshift_true'][mask_source][idx2]
# Warning: this assumes a flat universe
# See http://docs.astropy.org/en/v0.3/_modules/astropy/cosmology/core.html#FLRW.angular_diameter_distance_z1z2
dm1 = comoving_transverse_distance(zl)
dm2 = comoving_transverse_distance(zs)
angular_diameter_distance_z1z2 = u.Quantity((dm2 - dm1)/(1. + zs), u.Mpc)
sigcrit = cst.c**2 / (4.*np.pi*cst.G) * angular_diameter_distance(zs) / \
((1. + zl)**2. * angular_diameter_distance_z1z2 * angular_diameter_distance(zl))
# NOTE: the validation data is in comoving coordinates, the next few
# lines take care of proper unit conversions
# Apply unit conversion to obtain sigma crit in h Msol /pc^2 (comoving)
cms = u.Msun / u.pc**2
sigcrit = sigcrit*(u.kg/(u.Mpc* u.m)).to(cms) / cosmo.h
# Computing the projected separation for each pairs, in Mpc/h (comoving)
r = sep2d.rad*angular_diameter_distance(zl)*(1. + zl) * cosmo.h
# Computing the tangential shear
thetac = np.arctan2((coords_s[idx2].dec.rad - coords_l[idx1].dec.rad) / np.cos((coords_s[idx2].dec.rad + coords_l[idx1].dec.rad) / 2.0),coords_s[idx2].ra.rad - coords_l[idx1].ra.rad)
gammat = -(res['shear_1'][mask_source][idx2] * np.cos(2*thetac) - res['shear_2'][mask_source][idx2] * np.sin(2*thetac))
# Binning the tangential shear
bins = np.logspace(np.log10(self.Rmin), np.log10(self.Rmax), self.nR, endpoint=True)
counts = np.histogram(r, bins=bins)[0]
gt, b = np.histogram(r, bins=bins, weights=gammat*sigcrit)
rp = 0.5*(b[1:]+b[:-1])
gt = gt/counts
outfile = os.path.join(output_dir, 'DS_'+str(self.data)+'_'+str(i)+'.dat')
np.savetxt(outfile, np.vstack((rp, gt)).T)
if self.data == 'sdss_lowz':
ax = plt.subplot(111)
plt.errorbar(self.validation_data[:,0], self.validation_data[:,1], yerr=self.validation_data[:,2], label='SDSS LOWZ from Singh et al. (2015)',c='k', lw=1, marker='.', fmt='.', capthick=0.8, capsize=2.2)
plt.loglog(rp, gt, label=catalog_name)
plt.title('Lens number density: '+str(nlens)[:4]+' per sq. deg')
ax.set_xlabel('$r_p$ [Mpc/h]')
ax.set_ylabel(r'$\Delta \Sigma [h \ M_\odot / pc^2]$')
ax.set_xlim(self.Rmin*0.7, self.Rmax*1.3)
ax.set_ylim(0.5, 100)
if self.data == 'cfhtlens':
ii = np.mod(i,2)
iii = int(i/2)
plt.figure(1)
ax = plt.subplot(2,2,iii+1)
if ii==0:
plt.loglog(rp, gt, label=str(Mr_min[int(i/2)])+'< Mr < '+str(Mr_max[int(i/2)])+'; red; '+catalog_name, lw=2, color='r', alpha=0.5)
plt.errorbar(self.validation_data[:,0]/1000*(7./10.), self.validation_data[:,iii*2+1]/(7./10.), color='darkred', lw=2, marker='x', fmt='.', label='Velander et al. (2013)')
plt.text(self.Rmin*0.7*1.5, 1.5,'Red: '+str(nlens)[:4]+' per sq. deg')
else:
plt.loglog(rp, gt, label=str(Mr_min[int(i/2)])+'< Mr < '+str(Mr_max[int(i/2)])+'; blue', lw=2, color='b', alpha=0.5)
plt.errorbar(self.validation_data[:,0]/1000*(7./10.), self.validation_data[:,iii*2+2]/(7./10.), color='darkblue', lw=2, marker='x', fmt='.')
plt.title('Lens number density: '+str(nlens)[:4]+' per sq. deg')
plt.text(self.Rmin*0.7*1.5, 1.0,'Blue: '+str(nlens)[:4]+' per sq. deg')
ax.legend()
ax.set_xlabel('$r_p$ [Mpc/h]')
ax.set_ylabel(r'$\Delta \Sigma [h \ M_\odot / pc^2]$')
ax.set_xlim(self.Rmin*0.7, self.Rmax*1.3)
ax.set_ylim(0.5, 1000)
plt.tight_layout()
plt.figure(2)
ax = plt.subplot(1,2,ii+1)
plt.loglog(rp, gt, label='['+str(Mr_min[int(i/2)])+', '+str(Mr_max[int(i/2)])+']')
if ii==0:
plt.title('red')
else:
plt.title('blue')
if i==(len(Mask_lens)-1):
plt.legend()
ax.set_xlabel('$r_p$ [Mpc/h]')
ax.set_ylabel(r'$\Delta \Sigma [h \ M_\odot / pc^2]$')
ax.set_xlim(self.Rmin*0.7, self.Rmax*1.3)
ax.set_ylim(0.5, 500)
if self.data=='sdss_main':
ii = np.mod(i,2)
iii = int(i/2)
plt.figure(1)
ax = plt.subplot(2,2,iii+1)
if ii==0:
plt.loglog(rp, gt, label=str(SM_min[int(i/2)])+'< log10(M*) < '+str(SM_max[int(i/2)])+'; red; '+catalog_name, lw=2, color='r', alpha=0.5)
plt.errorbar(self.validation_data[:15,0], self.validation_data[ii*15:(ii+1)*15,int(i/2)*4+1], yerr=self.validation_data[ii*15:(ii+1)*15,int(i/2)*4+2], color='darkred', lw=2, marker='x', fmt='.', label='Mandelbaum et al. (2016)')
plt.text(self.Rmin*0.7*1.5, 1.5,'Red: '+str(nlens)[:4]+' per sq. deg')
else:
plt.loglog(rp, gt, label=str(SM_min[int(i/2)])+'< log10(M*) < '+str(SM_max[int(i/2)])+'; blue', lw=2, color='b', alpha=0.5)
plt.errorbar(self.validation_data[:15,0], self.validation_data[ii*15:(ii+1)*15,int(i/2)*4+1], yerr=self.validation_data[ii*15:(ii+1)*15,int(i/2)*4+2], color='darkblue', lw=2, marker='x', fmt='.')
plt.text(self.Rmin*0.7*1.5, 1,'Blue: '+str(nlens)[:4]+' per sq. deg')
ax.legend()
ax.set_xlabel('$r_p$ [Mpc/h]')
ax.set_ylabel(r'$\Delta \Sigma [h \ M_\odot / pc^2]$')
ax.set_xlim(self.Rmin*0.7, self.Rmax*1.3)
ax.set_ylim(0.5, 1000)
plt.tight_layout()
plt.figure(2)
ax = plt.subplot(1,2,ii+1)
plt.loglog(rp, gt, label='['+str(SM_min[int(i/2)])+', '+str(SM_max[int(i/2)])+']')
if ii==0:
plt.title('red')
else:
plt.title('blue')
if i==(len(Mask_lens)-1):
plt.legend()
ax.set_xlabel('$r_p$ [Mpc/h]')
ax.set_ylabel(r'$\Delta \Sigma [h \ M_\odot / pc^2]$')
ax.set_xlim(self.Rmin*0.7, self.Rmax*1.3)
ax.set_ylim(0.5, 500)
plt.tight_layout()
print(self.data)
if self.data=='cfhtlens' or self.data=='sdss_main':
fig1.savefig(os.path.join(output_dir, 'delta_sigma_'+str(catalog_name)+'1.png'))
plt.close(fig1)
fig2.savefig(os.path.join(output_dir, 'delta_sigma_'+str(catalog_name)+'2.png'))
plt.close(fig2)
else:
fig.savefig(os.path.join(output_dir, 'delta_sigma_'+str(catalog_name)+'.png'))
plt.close(fig)
return TestResult(inspect_only=True)
| 48.731959 | 248 | 0.561032 |
acf47215af1bf6c0940f2a22962d54298d9b349a | 1,465 | py | Python | bots/gammabot.py | garrrychan/hackathon_rps | d75e72573a0b42aa774af8321b9f20756a1f2f5e | [
"MIT"
] | 1 | 2019-05-04T16:37:29.000Z | 2019-05-04T16:37:29.000Z | bots/gammabot.py | garrrychan/hackathon_rps | d75e72573a0b42aa774af8321b9f20756a1f2f5e | [
"MIT"
] | null | null | null | bots/gammabot.py | garrrychan/hackathon_rps | d75e72573a0b42aa774af8321b9f20756a1f2f5e | [
"MIT"
] | null | null | null | import pickle
import random
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from utils import beat
### medium "Gamma" bot ####
class GammaBot:
def __init__(self):
options = ['rock', 'paper', 'scissors']
player = np.random.choice(options)
computer = np.random.choice(options)
# spoof first throw randomly so I can predict
# Ignore after since new_data grabs last data point only
self.history = {'player': [player], 'bot': [computer]}
def predict(self):
# mapper function
throw_mapper = LabelEncoder()
throw_mapper.fit(['rock', 'paper', 'scissors'])
# loading a random forest model from pickled file
pipe = pickle.load(open("pipe.pkl", "rb"))
# new piece of data from history
new_data = pd.DataFrame([{'player': self.history["player"][-1],'computer': self.history["bot"][-1]}])
# predict your throw
# apply transform to data, and then predict with final estimator
pred = throw_mapper.inverse_transform(pipe.predict(new_data.apply(throw_mapper.transform)))[0]
# y is bot throw
y = beat(pred)
# append bot throw to history
self.history['bot'].append(y)
return y
def throw(self, y):
x = self.predict()
# append player's throw to history
self.history['player'].append(y)
# return what I should throw
return x
| 34.880952 | 109 | 0.627304 |
acf47342e2251a256134d941ff45a2de394949be | 3,829 | py | Python | snake/informed_search_models/a_star_search.py | megh-khaire/SnakeAIs | 1dbc76a47a3bb4651c426f04671ae8ae12079c97 | [
"Apache-2.0"
] | null | null | null | snake/informed_search_models/a_star_search.py | megh-khaire/SnakeAIs | 1dbc76a47a3bb4651c426f04671ae8ae12079c97 | [
"Apache-2.0"
] | null | null | null | snake/informed_search_models/a_star_search.py | megh-khaire/SnakeAIs | 1dbc76a47a3bb4651c426f04671ae8ae12079c97 | [
"Apache-2.0"
] | null | null | null | from snake.main.game import Game
class AStar(Game):
def __init__(self, game_type):
Game.__init__(self, game_type)
self.open = [self.head]
self.closed = []
# Calculate initial path
self.generate_path()
def calculate_h(self, point):
'''Calculates heuristic i.e the Manhatten distance between selected node and goal state'''
return abs(self.food.x - point.x) + abs(self.food.y - point.y)
def generate_path(self):
'''Implements A* Search algorithm for snake traversal'''
self.path = [self.head]
self.closed = []
self.open = [self.head]
while self.open:
# Select start node as the node with lowest f value
current = min(self.open, key=lambda x: x.f)
# Remove selected node from self.open
self.open = [self.open[i] for i in range(len(self.open)) if not self.open[i] == current]
# Append selected node to closed_points
self.closed.append(current)
# Check if snake has reached the goal state
if current == self.food:
# Based on its origin determine the direction in which the snake will move
while current.origin:
self.path.append(current)
current = current.origin
return
# Explore neighbors of the selected node
current.generate_neighbors()
for neighbor in current.neighbors:
if neighbor not in self.obstacles and neighbor not in self.snake:
g_temp = current.g + 1
# If neighbor is not in self.open increase the cost of path and append neighbor to open
if neighbor not in self.open and neighbor not in self.closed:
neighbor.h = self.calculate_h(neighbor)
neighbor.g = g_temp
neighbor.f = neighbor.g + neighbor.h
neighbor.origin = current
self.open.append(neighbor)
# If neighbor is in self.open or self.closed
else:
# If neighbor is in self.open check if current neighbor has a better g value
if neighbor in self.open:
old_neighbor = [x for x in self.open if x == neighbor][0]
if old_neighbor.g > g_temp:
# update heuristic and g value
old_neighbor.h = self.calculate_h(neighbor)
old_neighbor.g = g_temp
old_neighbor.f = neighbor.g + neighbor.h
# update parent
old_neighbor.origin = current
# If neighbor is in self.open check if current neighbor has a better g value
elif neighbor in self.closed:
old_neighbor = [x for x in self.closed if x == neighbor][0]
if old_neighbor.g > g_temp:
# update heuristic and g value
old_neighbor.h = self.calculate_h(neighbor)
old_neighbor.g = g_temp
old_neighbor.f = neighbor.g + neighbor.h
# update parent
old_neighbor.origin = current
# Remove neighbor from closed and move it to open
self.closed = [self.closed[i] for i in range(len(self.closed)) if not self.closed[i] == old_neighbor]
self.open.append(old_neighbor)
self.path = []
| 51.053333 | 133 | 0.510316 |
acf4735a4a06f0288abec2a18a1f3a03491ea02e | 1,454 | py | Python | scripts/_oldstuff/voronoi.py | heistermann/trmmlib | b32cf623737285073e4c61bd0e01a0fe8b26c329 | [
"MIT"
] | null | null | null | scripts/_oldstuff/voronoi.py | heistermann/trmmlib | b32cf623737285073e4c61bd0e01a0fe8b26c329 | [
"MIT"
] | null | null | null | scripts/_oldstuff/voronoi.py | heistermann/trmmlib | b32cf623737285073e4c61bd0e01a0fe8b26c329 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 01 11:40:03 2016
@author: heistermann
"""
from scipy.spatial import Voronoi
from scipy.spatial import voronoi_plot_2d
import pylab as plt
import numpy as np
points = np.array([[0, 0.1], [0, 1.05], [0, 2.1], [1, 0], [1, 1], [1, 2],
[2, 0.1], [2.01, 1], [2.2, 2]])
vor = Voronoi(points)
vor.vertices
vor.regions
vor.ridge_vertices
plt.plot(points[:,0], points[:,1], 'o')
plt.plot(vor.vertices[:,0], vor.vertices[:,1], '*')
plt.xlim(-1, 3); plt.ylim(-1, 3)
colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k','b', 'g', 'r', 'c', 'm', 'y', 'k')
#for i,simplex in enumerate(vor.ridge_vertices):
# simplex = np.asarray(simplex)
# if np.all(simplex >= 0):
# plt.plot(vor.vertices[simplex,0], vor.vertices[simplex,1], color=colors[i])
voronoi_plot_2d(vor)
#center = points.mean(axis=0)
#for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
# simplex = np.asarray(simplex)
# if np.any(simplex < 0):
# i = simplex[simplex >= 0][0] # finite end Voronoi vertex
# t = points[pointidx[1]] - points[pointidx[0]] # tangent
# t /= np.linalg.norm(t)
# n = np.array([-t[1], t[0]]) # normal
# midpoint = points[pointidx].mean(axis=0)
# far_point = vor.vertices[i] + np.sign(np.dot(midpoint - center, n)) * n * 100
# plt.plot([vor.vertices[i,0], far_point[0]],
# [vor.vertices[i,1], far_point[1]], 'k--')
| 29.08 | 86 | 0.581843 |
acf473b087bef606a364879eea77007243f154a4 | 5,072 | py | Python | tests/shakedown/shakedown/dcos/master.py | ryanmaclean/marathon | cc35af421675205b797b31890e88b5fa4d178a02 | [
"Apache-2.0"
] | null | null | null | tests/shakedown/shakedown/dcos/master.py | ryanmaclean/marathon | cc35af421675205b797b31890e88b5fa4d178a02 | [
"Apache-2.0"
] | 1 | 2021-12-17T10:43:40.000Z | 2021-12-17T10:43:40.000Z | tests/shakedown/shakedown/dcos/master.py | ryanmaclean/marathon | cc35af421675205b797b31890e88b5fa4d178a02 | [
"Apache-2.0"
] | null | null | null | """Utilities for working with master"""
import contextlib
import logging
import json
import pytest
from datetime import timedelta
from . import master_ip, master_url, network
from .agent import kill_process_from_pid_file_on_host
from .command import run_command_on_master
from .spinner import time_wait
from .zookeeper import get_zk_node_children, get_zk_node_data
from .. import http
DISABLE_MASTER_INCOMING = "-I INPUT -p tcp --dport 5050 -j REJECT"
DISABLE_MASTER_OUTGOING = "-I OUTPUT -p tcp --sport 5050 -j REJECT"
logger = logging.getLogger(__name__)
def partition_master(incoming=True, outgoing=True):
""" Partition master's port alone. To keep DC/OS cluster running.
:param incoming: Partition incoming traffic to master process. Default True.
:param outgoing: Partition outgoing traffic from master process. Default True.
"""
logger.info('Partitioning master. Incoming:%s | Outgoing:%s', incoming, outgoing)
network.save_iptables(master_ip())
network.flush_all_rules(master_ip())
network.allow_all_traffic(master_ip())
if incoming and outgoing:
network.run_iptables(master_ip(), DISABLE_MASTER_INCOMING)
network.run_iptables(master_ip(), DISABLE_MASTER_OUTGOING)
elif incoming:
network.run_iptables(master_ip(), DISABLE_MASTER_INCOMING)
elif outgoing:
network.run_iptables(master_ip(), DISABLE_MASTER_OUTGOING)
else:
pass
def reconnect_master():
""" Reconnect a previously partitioned master to the network
"""
network.restore_iptables(master_ip())
def restart_master_node():
""" Restarts the master node
"""
run_command_on_master("sudo /sbin/shutdown -r now")
def systemctl_master(command='restart'):
""" Used to start, stop or restart the master process
"""
run_command_on_master('sudo systemctl {} dcos-mesos-master'.format(command))
def mesos_available_predicate():
url = master_url()
try:
response = http.get(url)
return response.status_code == 200
except Exception as e:
return False
def wait_for_mesos_endpoint(timeout_sec=timedelta(minutes=5).total_seconds()):
"""Checks the service url if available it returns true, on expiration
it returns false"""
return time_wait(lambda: mesos_available_predicate(), timeout_seconds=timeout_sec)
def _mesos_zk_nodes():
""" Returns all the children nodes under /mesos in zk
"""
return get_zk_node_children('/mesos')
def _master_zk_nodes_keys():
""" The masters can be registered in zk with arbitrary ids which start with
`json.info_`. This provides a list of all master keys.
"""
master_zk = []
for node in _mesos_zk_nodes():
if 'json.info' in node['title']:
master_zk.append(node['key'])
return master_zk
def get_all_masters():
""" Returns the json object that represents each of the masters.
"""
masters = []
for master in _master_zk_nodes_keys():
master_zk_str = get_zk_node_data(master)['str']
masters.append(json.loads(master_zk_str))
return masters
def get_all_master_ips():
""" Returns a list of IPs for the masters
"""
ips = []
for master in get_all_masters():
ips.append(master['hostname'])
return ips
def is_multi_master():
master_count = len(get_all_masters())
return master_count > 1
def required_masters(count):
""" Returns True if the number of private agents is equal to or greater than
the count. This is useful in using pytest skipif such as:
`pytest.mark.skipif('required_masters(3)')` which will skip the test if
the number of masters is only 1.
:param count: the number of required masters.
"""
master_count = len(get_all_masters())
# reverse logic (skip if less than count)
# returns True if less than count
return master_count < count
def masters(count=1):
return pytest.mark.skipif('required_masters({})'.format(count))
def start_master_http_service(port=7777, pid_file='python_http.pid'):
""" Starts a http service on the master leader. The main purpose is to serve
up artifacts for launched test applications. This is commonly used in combination
with copying tests or artifacts to the leader than configuring the messos task
to fetch from http://master.mesos:7777/artifact.tar (becareful in a multi-master env)
:param port: port to use for the http service
:return: pid_file
"""
run_command_on_master(
'nohup /opt/mesosphere/bin/python -m http.server {} > http.log 2>&1 & '
'echo $! > {}'.format(port, pid_file))
return pid_file
@contextlib.contextmanager
def master_http_service(port=7777):
pid_file = start_master_http_service(port)
yield
kill_process_from_pid_file_on_host(master_ip(), pid_file)
@contextlib.contextmanager
def disconnected_master(incoming=True, outgoing=True):
partition_master(incoming, outgoing)
try:
yield
finally:
# return config to previous state
reconnect_master()
| 29.149425 | 89 | 0.710765 |
acf47443519db63a108cd7d8f0eee4d3e2a3a6e6 | 1,932 | py | Python | plugins/image-custom/utils.py | fz6m/tomon-naixue | dfbdd69836f26d160cece34e204f9fb2ed731607 | [
"MIT"
] | 3 | 2020-08-23T17:43:09.000Z | 2020-08-31T04:43:42.000Z | plugins/image-custom/utils.py | fz6m/tomon-naixue | dfbdd69836f26d160cece34e204f9fb2ed731607 | [
"MIT"
] | null | null | null | plugins/image-custom/utils.py | fz6m/tomon-naixue | dfbdd69836f26d160cece34e204f9fb2ed731607 | [
"MIT"
] | null | null | null |
import os
import aiofiles
from enum import Enum
try:
import ujson as json
except:
import json
class Model(Enum):
ALL = '_all'
BLURRY = '_blurry'
SEND_AT = '_send_at'
SEND_DEFAULT = '_send_default'
class Status(Enum):
SUCCESS = '_success'
FAILURE = '_failure'
class Tools():
@staticmethod
def commandMatch(msg, commandList, model = Model.ALL):
if model == Model.ALL:
for c in commandList:
if c == msg:
return True
if model == Model.BLURRY:
for c in commandList:
if msg.find(c) != -1:
return True
return False
@staticmethod
def checkFolder(dir):
if not os.path.exists(dir):
os.makedirs(dir)
@staticmethod
async def readJsonFile(p):
if not os.path.exists(p):
return Status.FAILURE
async with aiofiles.open(p, 'r', encoding='utf-8') as f:
content = await f.read()
return json.loads(content)
@staticmethod
async def writeJsonFile(p, content):
async with aiofiles.open(p, 'w', encoding='utf-8') as f:
await f.write(json.dumps(content))
return Status.SUCCESS
@staticmethod
async def readFileByLine(p):
if not os.path.exists(p):
return Status.FAILURE
async with aiofiles.open(p, 'r', encoding = 'utf-8') as f:
content = await f.readlines()
return content
@staticmethod
async def readFileContent(p):
if not os.path.exists(p):
return Status.FAILURE
async with aiofiles.open(p, 'r', encoding = 'utf-8') as f:
content = await f.read()
return content.strip()
@staticmethod
async def writeFile(p, content):
async with aiofiles.open(p, 'w', encoding = 'utf-8') as f:
await f.write(content) | 22.206897 | 66 | 0.565217 |
acf4751c2453daba622b2167a22e54a9652ab3ba | 889 | py | Python | scripts/generate_random_mask.py | johnrest/speckle_removal | b57339f6458cd6e685306ca5c05fc1500500160b | [
"MIT"
] | null | null | null | scripts/generate_random_mask.py | johnrest/speckle_removal | b57339f6458cd6e685306ca5c05fc1500500160b | [
"MIT"
] | 4 | 2021-03-18T20:52:02.000Z | 2022-03-11T23:27:42.000Z | scripts/generate_random_mask.py | johnrest/speckle_removal | b57339f6458cd6e685306ca5c05fc1500500160b | [
"MIT"
] | null | null | null | from speck_rem import *
from speck_rem.dmd import *
target_folder = "D:/Research/SpeckleRemoval/Data/2019_01_25/test"
mask_image_prefix = "pattern_"
number_patterns = 20
grain_list = np.linspace(14, 24, 6, endpoint=True)
# Select period values in pixels
# period_list = np.linspace(8, 20, num=7, endpoint=True)
period = 10.0
# Select angle values
# angle_list = np.linspace(0.0, np.pi/2.0, num=5, endpoint=True)
angle = np.pi/4
for itr, item in enumerate(range(number_patterns)):
mask = Mask()
mask.compute_random_mask(period, angle, int(np.random.choice(grain_list, 1)))
print("Angle (grad): {0}; Period (pix): {1}".format(angle * 180 / np.pi, period))
current_image_file = os.path.join(target_folder, mask_image_prefix + "{:03d}".format(itr))
print("Writing image to file: ", current_image_file, )
mask.write_array_into_image_file(current_image_file, ".png") | 38.652174 | 94 | 0.723285 |
acf475397cbbc72fc4ec91f855987edc56599d2d | 9,794 | py | Python | test/ux/web/service/test_profiling.py | intel/neural-compressor | 16a4a12045fcb468da4d33769aff2c1a5e2ba6ba | [
"Apache-2.0"
] | 172 | 2021-09-14T18:34:17.000Z | 2022-03-30T06:49:53.000Z | test/ux/web/service/test_profiling.py | intel/lp-opt-tool | 130eefa3586b38df6c0ff78cc8807ae273f6a63f | [
"Apache-2.0"
] | 40 | 2021-09-14T02:26:12.000Z | 2022-03-29T08:34:04.000Z | test/ux/web/service/test_profiling.py | intel/neural-compressor | 16a4a12045fcb468da4d33769aff2c1a5e2ba6ba | [
"Apache-2.0"
] | 33 | 2021-09-15T07:27:25.000Z | 2022-03-25T08:30:57.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ProfilingService test."""
import unittest
from unittest.mock import MagicMock, patch
from werkzeug.wrappers import Response
from neural_compressor.ux.utils.exceptions import ClientErrorException, NotFoundException
from neural_compressor.ux.web.service.profiling import ProfilingService
class TestProfilingService(unittest.TestCase):
"""Test ProfilingService."""
def test_get_config_fails_when_no_workload_id_requested(self) -> None:
"""Test get_config."""
with self.assertRaisesRegex(ClientErrorException, "Missing id parameter"):
ProfilingService.get_config({})
@patch("neural_compressor.ux.web.service.profiling.ProfilingService._get_workload_data")
def test_get_config_fails_when_no_workload_found(
self,
mocked_get_workload_data: MagicMock,
) -> None:
"""Test get_config."""
mocked_get_workload_data.return_value = {}
with self.assertRaisesRegex(NotFoundException, "Unable to find config file"):
ProfilingService.get_config(
{
"id": [1],
},
)
mocked_get_workload_data.assert_called_with({"id": [1]})
@patch("neural_compressor.ux.web.service.profiling.ProfilingService._get_workload_data")
def test_get_config_fails_when_config_path_missing(
self,
mocked_get_workload_data: MagicMock,
) -> None:
"""Test get_config."""
mocked_get_workload_data.return_value = {
"config_path": None,
}
with self.assertRaisesRegex(
NotFoundException,
"Unable to find config file",
):
ProfilingService.get_config(
{
"id": [1],
},
)
mocked_get_workload_data.assert_called_with({"id": [1]})
@patch("neural_compressor.ux.web.service.workload.ResponseGenerator.serve_from_filesystem")
@patch("neural_compressor.ux.web.service.profiling.ProfilingService._get_workload_data")
def test_get_config(
self,
mocked_get_workload_data: MagicMock,
mocked_serve_from_filesystem: MagicMock,
) -> None:
"""Test get_config."""
mocked_get_workload_data.return_value = {
"config_path": "/some/fake/config/path.yaml",
}
expected = Response("fake config content")
mocked_serve_from_filesystem.return_value = expected
actual = ProfilingService.get_config(
{
"id": [1],
},
)
self.assertEqual(expected, actual)
mocked_get_workload_data.assert_called_with({"id": [1]})
mocked_serve_from_filesystem.assert_called_once_with(
path="/some/fake/config/path.yaml",
mimetype="text/vnd.yaml",
)
def test_get_code_template_fails_when_no_workload_id_requested(self) -> None:
"""Test get_code_template."""
with self.assertRaisesRegex(ClientErrorException, "Missing id parameter"):
ProfilingService.get_code_template({})
@patch("neural_compressor.ux.web.service.profiling.ProfilingService._get_workload_data")
def test_get_code_template_fails_when_no_workload_found(
self,
mocked_get_workload_data: MagicMock,
) -> None:
"""Test get_code_template."""
mocked_get_workload_data.return_value = {}
with self.assertRaisesRegex(
NotFoundException,
"Unable to find code template file",
):
ProfilingService.get_code_template(
{
"id": [1],
},
)
mocked_get_workload_data.assert_called_with({"id": [1]})
@patch("neural_compressor.ux.web.service.profiling.ProfilingService._get_workload_data")
def test_get_code_template_fails_when_code_template_path_missing(
self,
mocked_get_workload_data: MagicMock,
) -> None:
"""Test get_code_template."""
mocked_get_workload_data.return_value = {
"code_template_path": None,
}
with self.assertRaisesRegex(
NotFoundException,
"Unable to find code template file",
):
ProfilingService.get_code_template(
{
"id": [1],
},
)
mocked_get_workload_data.assert_called_with({"id": [1]})
@patch("neural_compressor.ux.web.service.workload.ResponseGenerator.serve_from_filesystem")
@patch("neural_compressor.ux.web.service.profiling.ProfilingService._get_workload_data")
def test_get_code_template(
self,
mocked_get_workload_data: MagicMock,
mocked_serve_from_filesystem: MagicMock,
) -> None:
"""Test get_code_template."""
mocked_get_workload_data.return_value = {
"code_template_path": "/some/fake/code/template/path.py",
}
expected = Response("fake code template content")
mocked_serve_from_filesystem.return_value = expected
actual = ProfilingService.get_code_template(
{
"id": [1],
},
)
self.assertEqual(expected, actual)
mocked_get_workload_data.assert_called_with({"id": [1]})
mocked_serve_from_filesystem.assert_called_once_with(
path="/some/fake/code/template/path.py",
mimetype="text/x-python",
)
def test_get_output_fails_when_no_workload_id_requested(self) -> None:
"""Test get_output."""
with self.assertRaisesRegex(ClientErrorException, "Missing id parameter"):
ProfilingService.get_output({})
@patch("neural_compressor.ux.web.service.profiling.ProfilingService._get_workload_data")
def test_get_output_fails_when_no_workload_found(
self,
mocked_get_workload_data: MagicMock,
) -> None:
"""Test get_output."""
mocked_get_workload_data.return_value = {}
with self.assertRaisesRegex(
NotFoundException,
"Unable to find output log",
):
ProfilingService.get_output(
{
"id": [1],
},
)
mocked_get_workload_data.assert_called_with({"id": [1]})
@patch("neural_compressor.ux.web.service.profiling.ProfilingService._get_workload_data")
def test_get_output_fails_when_log_path_missing(
self,
mocked_get_workload_data: MagicMock,
) -> None:
"""Test get_output."""
mocked_get_workload_data.return_value = {
"log_path": None,
}
with self.assertRaisesRegex(
NotFoundException,
"Unable to find output log",
):
ProfilingService.get_output(
{
"id": [1],
},
)
mocked_get_workload_data.assert_called_with({"id": [1]})
@patch("neural_compressor.ux.web.service.workload.ResponseGenerator.serve_from_filesystem")
@patch("neural_compressor.ux.web.service.profiling.ProfilingService._get_workload_data")
def test_get_output(
self,
mocked_get_workload_data: MagicMock,
mocked_serve_from_filesystem: MagicMock,
) -> None:
"""Test get_output."""
filesystem_response = Response("fake output content")
mocked_get_workload_data.return_value = {
"log_path": "/some/fake/output/path.log",
}
mocked_serve_from_filesystem.return_value = filesystem_response
actual = ProfilingService.get_output(
{
"id": [1],
},
)
self.assertEqual(filesystem_response.data, actual.data)
self.assertEqual("3", actual.headers.get("refresh"))
mocked_get_workload_data.assert_called_with({"id": [1]})
mocked_serve_from_filesystem.assert_called_once_with(
path="/some/fake/output/path.log",
mimetype="text/plain",
)
@patch("neural_compressor.ux.web.service.workload.ResponseGenerator.serve_from_filesystem")
@patch("neural_compressor.ux.web.service.profiling.ProfilingService._get_workload_data")
def test_get_output_with_failure(
self,
mocked_get_workload_data: MagicMock,
mocked_serve_from_filesystem: MagicMock,
) -> None:
"""Test get_output."""
mocked_get_workload_data.return_value = {
"log_path": "/some/fake/output/path.log",
}
mocked_serve_from_filesystem.side_effect = NotFoundException("Unable to find file.")
actual = ProfilingService.get_output(
{
"id": [1],
},
)
self.assertEqual("Unable to find file.", actual.data.decode("utf-8"))
self.assertEqual(404, actual.status_code)
self.assertEqual("3", actual.headers.get("refresh"))
mocked_get_workload_data.assert_called_with({"id": [1]})
mocked_serve_from_filesystem.assert_called_once_with(
path="/some/fake/output/path.log",
mimetype="text/plain",
)
if __name__ == "__main__":
unittest.main()
| 34.607774 | 95 | 0.636818 |
acf4758132ef931e7bf8b3a2313c1937bae97228 | 1,489 | py | Python | examples/experimental/native_mnist_pytorch/trial_impl.py | sidneyw/determined | 77e045c31909e0c592fba1bf359123ee16f0c531 | [
"Apache-2.0"
] | 3 | 2020-04-30T03:56:15.000Z | 2020-04-30T04:01:24.000Z | examples/experimental/native_mnist_pytorch/trial_impl.py | takabayashi/determined | 820c7250d8fdc6abba83c106f36eede6fc9f5f3a | [
"Apache-2.0"
] | 1 | 2022-02-10T07:31:44.000Z | 2022-02-10T07:31:44.000Z | examples/experimental/native_mnist_pytorch/trial_impl.py | takabayashi/determined | 820c7250d8fdc6abba83c106f36eede6fc9f5f3a | [
"Apache-2.0"
] | 2 | 2020-07-10T23:08:23.000Z | 2021-01-13T10:01:59.000Z | """
This example demonstrates training a simple DNN with pytorch using the Determined
Native API.
"""
import argparse
import json
import pathlib
from determined import experimental
import determined as det
import model_def
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
dest="config",
help="Specifies Determined Experiment configuration.",
default="{}",
)
parser.add_argument(
"--mode", dest="mode", help="Specifies local mode or cluster mode.", default="cluster"
)
args = parser.parse_args()
config = {
"data": {
"url": "https://s3-us-west-2.amazonaws.com/determined-ai-test-data/pytorch_mnist.tar.gz"
},
"hyperparameters": {
"learning_rate": det.Log(minval=-3.0, maxval=-1.0, base=10),
"dropout": det.Double(minval=0.2, maxval=0.8),
"global_batch_size": det.Constant(value=64),
"n_filters1": det.Constant(value=32),
"n_filters2": det.Constant(value=32),
},
"searcher": {
"name": "single",
"metric": "validation_error",
"max_steps": 20,
"smaller_is_better": True,
},
}
config.update(json.loads(args.config))
experimental.create(
trial_def=model_def.MNistTrial,
config=config,
mode=experimental.Mode(args.mode),
context_dir=str(pathlib.Path.cwd()),
)
| 27.574074 | 100 | 0.597045 |
acf4771c35f194e7edc2e7ef7d565075a70d50df | 2,408 | py | Python | fairseq/tasks/__init__.py | young-zonglin/fairseq-extended | d36b33a7b5bf3e8dfccdbd06b360e9abd80bcc0e | [
"BSD-3-Clause"
] | 13 | 2019-07-15T22:30:35.000Z | 2021-10-02T08:24:07.000Z | fairseq/tasks/__init__.py | young-zonglin/fairseq-extended | d36b33a7b5bf3e8dfccdbd06b360e9abd80bcc0e | [
"BSD-3-Clause"
] | 1 | 2020-09-12T17:46:55.000Z | 2020-09-28T07:32:11.000Z | fairseq/tasks/__init__.py | young-zonglin/fairseq-extended | d36b33a7b5bf3e8dfccdbd06b360e9abd80bcc0e | [
"BSD-3-Clause"
] | 6 | 2021-02-25T08:56:31.000Z | 2022-03-20T08:51:28.000Z | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import argparse
import importlib
import os
from .fairseq_task import FairseqTask
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(args):
return TASK_REGISTRY[args.task].setup_task(args)
def register_task(name):
"""
New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Please see the
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError('Cannot register duplicate task ({})'.format(name))
if not issubclass(cls, FairseqTask):
raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__))
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
return cls
return register_task_cls
# automatically import any Python files in the tasks/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
task_name = file[:file.find('.py')]
importlib.import_module('fairseq.tasks.' + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group('Task name')
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group('Additional command-line arguments')
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + '_parser'] = parser
| 31.684211 | 104 | 0.653654 |
acf47776aa38bd48bb31aed74945801057509b87 | 291 | py | Python | examples/simple_sequenticon.py | Edinburgh-Genome-Foundry/sequenticon | 710286472cc25de8e5f32cd6a91b46078d4f08c9 | [
"MIT"
] | 29 | 2018-04-13T07:31:17.000Z | 2021-11-14T17:11:58.000Z | examples/simple_sequenticon.py | Edinburgh-Genome-Foundry/sequenticon | 710286472cc25de8e5f32cd6a91b46078d4f08c9 | [
"MIT"
] | 3 | 2018-04-20T15:14:46.000Z | 2020-06-23T11:32:52.000Z | examples/simple_sequenticon.py | Edinburgh-Genome-Foundry/sequenticon | 710286472cc25de8e5f32cd6a91b46078d4f08c9 | [
"MIT"
] | 3 | 2018-04-20T15:07:18.000Z | 2020-06-23T06:32:51.000Z | import os
from sequenticon import sequenticon
# Sequence to PNG
sequenticon(sequence="ATGTGCCGAT", output_path="simple_sequenticon_1.png")
# Record file to PNG
sequenticon(sequence=os.path.join("example_sequences", "seq4.gb"),
output_path="simple_sequenticon_2.png", size=120)
| 29.1 | 74 | 0.769759 |
acf477e2f5456267b47f2506cf53286f670503fb | 16,750 | py | Python | src/utils.py | cgartrel/scalable-nonsymmetric-DPPs | 48443bed6d7dce18a971f5e89a489da98c0d7091 | [
"MIT"
] | 2 | 2021-04-27T12:42:43.000Z | 2021-04-27T12:42:54.000Z | src/utils.py | cgartrel/scalable-nonsymmetric-DPPs | 48443bed6d7dce18a971f5e89a489da98c0d7091 | [
"MIT"
] | null | null | null | src/utils.py | cgartrel/scalable-nonsymmetric-DPPs | 48443bed6d7dce18a971f5e89a489da98c0d7091 | [
"MIT"
] | null | null | null | """
Synopsis: Some useful functions.
"""
import sys
import os
Header = os.path.dirname(os.path.abspath(__file__))
Header = Header[:-3]
sys.path.append(Header)
import random
import tempfile
import glob
import io
import codecs
import logging
import argparse
import sqlite3
import numpy as np
import pandas as pd
import torch
from torch.nn.utils.rnn import pad_sequence
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
# control random-number generators
torch.manual_seed(1234)
random.seed(1234)
# Set default for floating point to torch.float64
torch.set_default_tensor_type(torch.DoubleTensor)
# Offset added to det(L_i) term in nonsymmetric low-rank DPP log-likelihood, to promote
# positive-definiteness and improve numerical stability for Cholesky decomposition
epsilon = 1e-5
class LogLikelihood(object):
@staticmethod
def compute_log_likelihood(model, baskets, alpha_regularization=0.,
beta_regularization=0.,
reduce=True, checks=False, mapped=True):
"""
Computes nonsymmetric low-rank DPP log-likelihood
"""
num_baskets = len(baskets)
batchnorm = "BatchNorm" in str(model.embeddings)
# Get the symmetric and nonsymmetric embedding components of each product in the catalog
B = None
C = None
if model.disable_nonsym_embeddings:
V = model.forward(model.all_items_in_catalog_set_var)
else:
V, B, D = model.forward(model.all_items_in_catalog_set_var)
C = D - D.transpose(0, 1)
# Compute first term (numerator) of nonsymmetric low-rank DPP likelihood
first_term = LogLikelihood.compute_log_likelihood_batches(
model, baskets, V=V, B=B, C=C, reduce=reduce)
# Compute denominator of nonsymmetric low-rank DPP likelihood (normalization constant)
# Symmetric component
# Use dual form of L for the symmetric component of the normalizer
V_transpose = V.transpose(0, 1)
L_dual = V_transpose.mm(V)
num_sym_embedding_dims = L_dual.size(0)
identity_num_sym_embedding_dims = torch.eye(num_sym_embedding_dims).to(model.device)
logpartition = torch.slogdet(L_dual + identity_num_sym_embedding_dims)[1]
if not model.disable_nonsym_embeddings:
# Use Woodbury formula and matrix determinant lemma to efficiently compute nonsymmetric
# components of normalizer
B_transpose = B.transpose(0, 1)
logpartition = logpartition + torch.slogdet(C)[1]
logpartition = logpartition + torch.slogdet(
torch.inverse(C) + B_transpose.mm(B) - B_transpose.mm(V).mm(torch.inverse(
identity_num_sym_embedding_dims + L_dual)).mm(V_transpose).mm(B))[1]
# don't forget smooth the normalization term too (lest DPP is no longer
# a probability density)
if batchnorm:
second_term = 0
else:
second_term = logpartition.to(model.device)
# L2-style regularization
third_term = None
if alpha_regularization != 0 or \
beta_regularization != 0:
third_term = model.reg(
V, B, C, model.lambda_vec,
torch.Tensor([alpha_regularization]).to(model.device),
torch.Tensor([beta_regularization]).to(model.device))
else:
third_term = 0.
# if reduce is set, then at this point logliks holds the sum of logliks
# over all baskets in this minibatch, else it's just a list of the
# latter
if reduce:
first_term /= num_baskets # this now the avg loglik over all bsks
logliks = first_term - second_term - third_term
else:
logliks = first_term - second_term - third_term
if checks:
if reduce and alpha_regularization == 0.:
assert logliks <= 0
return logliks
# Compute the log-likelihood term for a collection of baskets (first term
# of DPP log-likelihood).
@staticmethod
def compute_log_likelihood_baskets(model, baskets, V, B=None, C=None, reduce=True):
num_baskets = len(baskets)
# Get embeddings for each basket
V_embeddings = [V[basket] for basket in baskets]
if not model.disable_nonsym_embeddings:
B_embeddings = [B[basket] for basket in baskets]
# Compute first term (numerator) of nonsymmetric low-rank DPP likelihood
if reduce:
first_term = 0
else:
first_term = torch.zeros(num_baskets).to(model.device)
for i, V_i in enumerate(V_embeddings):
# Symmetric component
L_i_symm = V_i.mm(V_i.transpose(0, 1))
# Nonsymmetric components
if not model.disable_nonsym_embeddings:
B_i = B_embeddings[i]
nonsymm_i = B_i.mm(C).mm(B_i.transpose(0, 1))
# Add epsilon * I to improve numerical stability
eye_L_i = torch.eye(L_i_symm.size()[0]).to(model.device)
if model.disable_nonsym_embeddings:
tmp = torch.slogdet(L_i_symm + epsilon * eye_L_i)[1]
else:
tmp = torch.slogdet(L_i_symm + epsilon * eye_L_i + nonsymm_i)[1]
tmp = tmp.to(model.device)
if reduce:
first_term += tmp
else:
first_term[i] = tmp
return first_term
# Compute the log-likelihood term for a collection of baskets (first term
# of DPP log-likelihood) with batch matrix-multiplication.
@staticmethod
def compute_log_likelihood_batches(model, baskets, V, B=None, C=None, reduce=True):
# Get embeddings for each basket
V_batch = pad_sequence([V[basket] for basket in baskets], batch_first=True)
# Define mask matrix for padding one in diagonals in L_V.
mask = ((V_batch != 0).sum(dim=-1) > 0).detach()
# Batch matrix-multiplication of all baskets
if model.disable_nonsym_embeddings:
L_V = V_batch.bmm(V_batch.transpose(1, 2))
elif (V - B).norm() == 0.0: # Nonsymmetric DPP when B == V
C_plus_I = C + torch.eye(C.shape[0]).to(model.device)
# For bathc mm, matrix C should be expanded with batch size.
L_V = V_batch.bmm(
C_plus_I.unsqueeze(0).expand(len(baskets), *C_plus_I.size())
).bmm(V_batch.transpose(1,2))
else:
C_plus_I = C + torch.eye(C.shape[0]).to(model.device)
B_batch = pad_sequence([B[basket] for basket in baskets], batch_first=True)
L_V = V_batch.bmm(V_batch.transpose(1, 2)) + B_batch.bmm(
C_plus_I.unsqueeze(0).expand(len(baskets), *C_plus_I.size())
).bmm(V_batch.transpose(1,2))
# Fill ones in the L(i,i) when entry i is padded. This can preserve the
# determinant value without degeneration.
max_num_items = V_batch.shape[1]
idx = torch.arange(max_num_items)
L_V[:,idx,idx] = (L_V[:,idx,idx] + epsilon) * mask + (~mask) * 1.0
first_term = torch.logdet(L_V)
if reduce:
return first_term.sum()
else:
return first_term
class VocabularyMapper(object):
"""
Maps categorical values onto indices in a vocabulary
"""
def __init__(self, vocab):
self.vocab = np.unique(vocab)
self.vocab.sort()
def __call__(self, values):
return np.searchsorted(self.vocab, values)
class PackedLoggers(object):
"""
Combine a bunch of loggers into 1.
"""
def __init__(self, loggers):
self.loggers = loggers
def add_scalar(self, *args, **kwargs):
for logger in self.loggers:
logger.add_scalar(*args, **kwargs)
def add_histogram(self, *args, **kwargs):
for logger in self.loggers:
if hasattr(logger, "add_histogram"):
logger.add_histogram(*args, **kwargs)
def add_embedding(self, model, val_data, **kwargs):
out = model.forward(val_data)
out = torch.cat((out.data, torch.ones(len(out), 1)), 1)
for logger in self.loggers:
if hasattr(logger, "add_embedding"):
self.logger.add_embedding(
out, metadata=out.data, label_img=val_data.data.double(),
**kwargs)
def new_iteration(self):
for logger in self.loggers:
if hasattr(logger, "new_iteration"):
logger.new_iteration()
def model_checkpoint(self, model, **kwargs):
for logger in self.loggers:
if hasattr(logger, "model_checkpoint"):
logger.model_checkpoint(model, **kwargs)
def str2bool(v):
"""
Converts a user-supplied yes/no response to boolean
"""
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2list(s, separator=",", transform=float):
"""
Convert comma-separated string into list
"""
if not s:
return []
return list(map(transform, s.split(separator)))
def str2loi(s, separator=","):
return str2list(s, separator=separator, transform=int)
def parse_cmdline_args():
"""
Parses command-line arguments / options for this software.
"""
parser = argparse.ArgumentParser(
description='Train a symmetric or nonsymmetric DPP',
epilog=("Example usage: python main.py --dataset_name basket_ids"
"--input_file data/1_100_100_100_apparel_regs.csv"
"--num_sym_embedding_dims 30 --num_nonsym_embedding_dims 30"),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--hogwild', type=str2bool, default="false",
help='whether to enable HogWild parallel training')
parser.add_argument("--inference", type=str2bool,
default=True, help="run inference on val / test data")
parser.add_argument("--tsne", action="store_true", default=False,
help="do t-SNE projections of embeddings")
parser.add_argument("--scores_file", type=str,
default="nonsymmetric-DPP-eval-scores",
help="pickle file where inference scores will be written (pandas dataframe format)")
parser.add_argument(
'--num_bootstraps', type=int, default=20,
help='number of bootstraps for evaluation scores')
parser.add_argument("--disable_eval", type=str2bool, default="true",
help="disable model evaluation during training")
parser.add_argument(
'--batch_size', type=int, default=200,
help='batch size for creating training data')
parser.add_argument(
'--input_file', type=str, default=None,
help='input file path')
parser.add_argument(
'--input_file_test_negatives', type=str, default=None,
help='input file test negatives')
parser.add_argument(
'--disjoint_sets_file_w', type=str, default=None,
help='input file disjoint_sets_file_w')
parser.add_argument(
'--input_file_disjoint_sets', type=str, default=None,
help='input file input_file_disjoint_sets')
parser.add_argument(
'--num_iterations', type=int, default=1000,
help='number of passes to do over data during training')
parser.add_argument(
'--num_baskets', type=int,
help='number of baskets to use in experiment (limits catalog size)')
parser.add_argument(
'--max_basket_size', type=int, default=np.inf,
help='maximum size of the baskets to use in experiment')
parser.add_argument('--alpha', type=float, default=0.1,
help='L2 regularization parameter for symmetric component')
# parser.add_argument('--beta', type=float, default=0.0,
# help='L2 regularization parameter for nonsymmetric component')
parser.add_argument(
'--use_metadata', type=str2bool, default="false",
help='whether to use product meta-data to enrich embeddings')
parser.add_argument(
'--use_price', type=str2bool, default="false",
help='whether to use product price meta-data to enrich embeddings')
parser.add_argument(
'--use_fasttext', type=str2bool, default="false",
help='whether to use product description FastText to enrich embeddings')
parser.add_argument(
'--prepend_meta', type=str2bool, default="true",
help='whether to include meta-data before or after computing embedding')
parser.add_argument(
'--num_threads', type=int, default=1,
help='num_threads to use for intra-process parallelism')
parser.add_argument(
'--db_path', required=False, default="logs.db",
help='path to db where `pyml_experiments` logs will be written')
parser.add_argument(
'--disable_gpu', type=str2bool, default="false",
help='disable gpu usage')
dataset_parser = parser.add_argument_group("dataset specification options")
dataset_parser.add_argument(
'--dataset_name', type=str,
default="basket_ids", help='Name of the dataset to use. Currently either "basket_ids", "uk", or "instacart" is supported.')
model_parser = parser.add_argument_group("model / optimizer options")
model_parser.add_argument('--hidden_dims', type=str2loi, default="",
help=('comma separated list of hidden layer '
'dimensions'))
model_parser.add_argument(
'--num_sym_embedding_dims', type=int, default=30,
help='number of final embedding dims for symmetric kernel component to use')
model_parser.add_argument(
'--num_nonsym_embedding_dims', type=int, default=30,
help='number of final embedding dims for nonsymmetric kernel component to use')
model_parser.add_argument(
'--product_id_embedding_dim', type=int, default=30,
help='number of product id embeddings dims to use')
model_parser.add_argument(
'--aisle_id_embedding_dim', type=int, default=20,
help='number of aisle id embeddings dims to use(currently used for Instacart dataset only)')
model_parser.add_argument(
'--department_id_embedding_dim', type=int, default=20,
help='number of department id embeddings dims to use(currently used for Instacart dataset only)')
model_parser.add_argument(
'--learning_rate', type=float, default=0.1,
help='initial learning rate for optimizer')
# model_parser.add_argument(
# '--optimizer', choices=["adam", "adagrad", "sgd", "rmsprop"], type=str,
# default="adam", help='optimizer to use training the model')
model_parser.add_argument(
'--activation', choices=["selu", "relu", "tanh"], type=str,
default="selu", help='non-linear activation to use')
model_parser.add_argument(
'--dropout', type=float, default=0,
help='amount of dropout to use')
model_parser.add_argument(
'--persisted_model_dir', type=str, default="saved_models",
help='Path to the dir where model will be/was persisted. ')
model_parser.add_argument(
'--num_val_baskets', type=int, default=300)
model_parser.add_argument(
'--num_test_baskets', type=int, default=2000)
args = parser.parse_args()
# sanitize some arguments which have ranges
if args.hogwild and args.num_threads < 2:
raise ValueError("--hogwild true but --num_threads 1 < 2")
if args.inference and args.scores_file is None:
raise ValueError("no --scores_file specified with --inference")
args.product_id_embedding_dim = args.num_sym_embedding_dims
args.scores_file = Header + args.scores_file
args.persisted_model_dir = Header + args.persisted_model_dir
if args.input_file is None and args.dataset_name == "basket_ids":
args.input_file = "data/belgian-retail.csv"
if args.input_file is not None:
args.input_file = Header + args.input_file
if args.input_file_test_negatives is not None:
args.input_file_test_negatives = Header + args.input_file_test_negatives
if args.disjoint_sets_file_w is not None:
args.disjoint_sets_file_w = Header + args.disjoint_sets_file_w
if args.input_file_disjoint_sets is not None:
args.input_file_disjoint_sets = Header + args.input_file_disjoint_sets
return args
| 39.504717 | 132 | 0.641194 |
acf4782a48c8992a68dd0c285637ac3696792668 | 4,052 | py | Python | test.py | elegantchaos/atom-elegantchaos-syntax-theme | d0f8cfff771f31aecaa6a8126c2bd137b1e6fb05 | [
"MIT"
] | null | null | null | test.py | elegantchaos/atom-elegantchaos-syntax-theme | d0f8cfff771f31aecaa6a8126c2bd137b1e6fb05 | [
"MIT"
] | null | null | null | test.py | elegantchaos/atom-elegantchaos-syntax-theme | d0f8cfff771f31aecaa6a8126c2bd137b1e6fb05 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf8 -*-
import os
import subprocess
import sys
import errors
import getopt
PROCESSED_ARGUMENTS = []
PROCESSED_OPTIONS = {}
def exit_with_message(message, error):
print(message)
exit(error)
def exit_if_failed_with_message(result, output, message):
if result != 0:
exit_with_message(message, result)
def getopt_options_from_options(options):
global PROCESSED_OPTIONS
options["debug-args"] = { "default" : False }
optkeys = []
for key in options.keys():
defaultValue = options[key].get("default")
getoptkey = key
if (defaultValue != None) and (defaultValue != True) and (defaultValue != False):
getoptkey += "="
optkeys += [getoptkey]
PROCESSED_OPTIONS[key] = defaultValue
return optkeys
def option_name_from_getopt_name(optname):
if optname[:2] == "--":
cleanName = optname[2:]
elif optname[0] == "-":
cleanName = optname[1:]
else:
cleanName = optname
return cleanName
def exit_if_too_few_arguments(args, count, usage):
argc = len(args)
if (argc < count):
name = os.path.basename(sys.argv[0])
message = "Error: too few arguments were supplied.\n\nUsage {0} {1}.".format(name, usage)
message = message.format(name) # usage can contain {0} itself
exit_with_message(message, errors.ERROR_WRONG_ARGUMENTS)
def process_options(options):
global PROCESSED_OPTIONS
argv = sys.argv
try:
optkeys = getopt_options_from_options(options)
(optlist, args) = getopt.gnu_getopt(argv[1:], "", optkeys)
for optname, optvalue in optlist:
cleanName = option_name_from_getopt_name(optname)
if optvalue:
PROCESSED_OPTIONS[cleanName]=optvalue
else:
defaultValue = options[cleanName].get("default")
if (defaultValue == True) or (defaultValue == False):
PROCESSED_OPTIONS[cleanName]=True
return args
except getopt.GetoptError as e:
print "Error: {0}".format(e)
exit(errors.ERROR_UNKNOWN_OPTION)
def check_arguments(count, usage, options = {}):
global PROCESSED_ARGUMENTS
if options:
args = process_options(options)
else:
args = sys.argv[1:]
PROCESSED_ARGUMENTS += args
exit_if_too_few_arguments(args, count, usage)
if PROCESSED_OPTIONS.get("debug-args"):
print "Arguments: {0}".format(PROCESSED_ARGUMENTS)
print "Options: {0}".format(PROCESSED_OPTIONS)
def get_argument(index):
return PROCESSED_ARGUMENTS[index - 1]
def get_option(key):
return PROCESSED_OPTIONS.get(key)
def expand_directory(path):
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
return path
def read_text(path):
text = ""
with open(path, "r") as inputFile:
text = inputFile.read()
return text
def write_text(path, text):
with open(path, "w") as outputFile:
outputFile.write(text)
def view_file(path):
subprocess.call(["open", path])
def view_url(path):
subprocess.call(["open", path])
def got_tool(tool):
try:
subprocess.check_output(["/usr/bin/which", tool])
return True
except subprocess.CalledProcessError:
return False
def html_link_attributes(text, attributes):
return "<a " + " ".join(attributes) + ">" + text + "</a>"
def html_link(text, url):
attributes = [ "href=\"" + url + "\""]
return html_link_attributes(text, attributes)
def script_name():
return os.path.basename(sys.argv[0])
def script_base():
cmd = os.path.realpath(sys.argv[0])
path = os.path.dirname(cmd)
return path
def script_relative(path):
return os.path.join(script_base(), path)
def call_output_and_result(cmd):
try:
return (0, subprocess.check_output(cmd, stderr = subprocess.STDOUT))
except subprocess.CalledProcessError as e:
return (e.returncode, e.output)
| 27.013333 | 101 | 0.644373 |
acf479b64452d8fcb398cfde70d38dac772b655c | 327 | py | Python | Core/ClustersQueuesModules/pythonRandom.py | WarwickRSE/HPC4DS | 88f52c446f3e93bfaa391b4abeda4753ed9be123 | [
"BSD-3-Clause"
] | 2 | 2020-12-01T18:11:48.000Z | 2020-12-17T10:30:19.000Z | Core/ClustersQueuesModules/pythonRandom.py | WarwickRSE/HPC4DS | 88f52c446f3e93bfaa391b4abeda4753ed9be123 | [
"BSD-3-Clause"
] | null | null | null | Core/ClustersQueuesModules/pythonRandom.py | WarwickRSE/HPC4DS | 88f52c446f3e93bfaa391b4abeda4753ed9be123 | [
"BSD-3-Clause"
] | 2 | 2020-12-14T13:09:01.000Z | 2020-12-19T21:55:41.000Z | from datetime import datetime
from numpy import random
# Get current microseconds (assuming system can do this)
dt = datetime.now()
seed = dt.microsecond
#Swap between these to get either a fixed random number
# Or one that will hopefully differ on multiple processors
seed = 347910
random.seed(seed)
print(random.rand())
| 20.4375 | 58 | 0.776758 |
acf479ddabf9e08b6d87e33de1f2159b80857e0f | 4,655 | py | Python | code/perception.py | BenDu89/p1-search-and-sample-return | 86a068713a03e4e40929eb87c873c67fb95e634a | [
"MIT"
] | 146 | 2017-05-22T16:44:49.000Z | 2022-02-26T02:22:01.000Z | code/perception.py | BenDu89/p1-search-and-sample-return | 86a068713a03e4e40929eb87c873c67fb95e634a | [
"MIT"
] | 7 | 2017-05-30T15:54:05.000Z | 2021-08-17T09:25:27.000Z | code/perception.py | BenDu89/p1-search-and-sample-return | 86a068713a03e4e40929eb87c873c67fb95e634a | [
"MIT"
] | 629 | 2017-05-23T18:13:52.000Z | 2021-12-17T19:09:43.000Z | import numpy as np
import cv2
# Identify pixels above the threshold
# Threshold of RGB > 160 does a nice job of identifying ground pixels only
def color_thresh(img, rgb_thresh=(160, 160, 160)):
# Create an array of zeros same xy size as img, but single channel
color_select = np.zeros_like(img[:,:,0])
# Require that each pixel be above all three threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
above_thresh = (img[:,:,0] > rgb_thresh[0]) \
& (img[:,:,1] > rgb_thresh[1]) \
& (img[:,:,2] > rgb_thresh[2])
# Index the array of zeros with the boolean array and set to 1
color_select[above_thresh] = 1
# Return the binary image
return color_select
# Define a function to convert from image coords to rover coords
def rover_coords(binary_img):
# Identify nonzero pixels
ypos, xpos = binary_img.nonzero()
# Calculate pixel positions with reference to the rover position being at the
# center bottom of the image.
x_pixel = -(ypos - binary_img.shape[0]).astype(np.float)
y_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float)
return x_pixel, y_pixel
# Define a function to convert to radial coords in rover space
def to_polar_coords(x_pixel, y_pixel):
# Convert (x_pixel, y_pixel) to (distance, angle)
# in polar coordinates in rover space
# Calculate distance to each pixel
dist = np.sqrt(x_pixel**2 + y_pixel**2)
# Calculate angle away from vertical for each pixel
angles = np.arctan2(y_pixel, x_pixel)
return dist, angles
# Define a function to map rover space pixels to world space
def rotate_pix(xpix, ypix, yaw):
# Convert yaw to radians
yaw_rad = yaw * np.pi / 180
xpix_rotated = (xpix * np.cos(yaw_rad)) - (ypix * np.sin(yaw_rad))
ypix_rotated = (xpix * np.sin(yaw_rad)) + (ypix * np.cos(yaw_rad))
# Return the result
return xpix_rotated, ypix_rotated
def translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale):
# Apply a scaling and a translation
xpix_translated = (xpix_rot / scale) + xpos
ypix_translated = (ypix_rot / scale) + ypos
# Return the result
return xpix_translated, ypix_translated
# Define a function to apply rotation and translation (and clipping)
# Once you define the two functions above this function should work
def pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):
# Apply rotation
xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)
# Apply translation
xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)
# Perform rotation, translation and clipping all at once
x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)
y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)
# Return the result
return x_pix_world, y_pix_world
# Define a function to perform a perspective transform
def perspect_transform(img, src, dst):
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image
return warped
# Apply the above functions in succession and update the Rover state accordingly
def perception_step(Rover):
# Perform perception steps to update Rover()
# TODO:
# NOTE: camera image is coming to you in Rover.img
# 1) Define source and destination points for perspective transform
# 2) Apply perspective transform
# 3) Apply color threshold to identify navigable terrain/obstacles/rock samples
# 4) Update Rover.vision_image (this will be displayed on left side of screen)
# Example: Rover.vision_image[:,:,0] = obstacle color-thresholded binary image
# Rover.vision_image[:,:,1] = rock_sample color-thresholded binary image
# Rover.vision_image[:,:,2] = navigable terrain color-thresholded binary image
# 5) Convert map image pixel values to rover-centric coords
# 6) Convert rover-centric pixel values to world coordinates
# 7) Update Rover worldmap (to be displayed on right side of screen)
# Example: Rover.worldmap[obstacle_y_world, obstacle_x_world, 0] += 1
# Rover.worldmap[rock_y_world, rock_x_world, 1] += 1
# Rover.worldmap[navigable_y_world, navigable_x_world, 2] += 1
# 8) Convert rover-centric pixel positions to polar coordinates
# Update Rover pixel distances and angles
# Rover.nav_dists = rover_centric_pixel_distances
# Rover.nav_angles = rover_centric_angles
return Rover | 42.706422 | 101 | 0.691085 |
acf47df31389698b739620e1cf9a9d3b4736f5d6 | 11,731 | py | Python | tests/python/pants_test/engine/test_round_engine.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/engine/test_round_engine.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | 1 | 2018-09-04T17:37:34.000Z | 2018-09-04T19:42:58.000Z | tests/python/pants_test/engine/test_round_engine.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
from pants.engine.round_engine import RoundEngine
from pants.goal.goal import Goal
from pants.task.task import Task
from pants.testutil.engine.base_engine_test import EngineTestBase
from pants.testutil.test_base import TestBase
class RoundEngineTest(EngineTestBase, TestBase):
def setUp(self):
super().setUp()
self.set_options_for_scope('', explain=False)
for outer in ['goal1', 'goal2', 'goal3', 'goal4', 'goal5']:
for inner in ['task1', 'task2', 'task3', 'task4', 'task5']:
self.set_options_for_scope(f'{outer}.{inner}',
level='info', colors=False)
self.engine = RoundEngine()
self.actions = []
self._context = None
def tearDown(self):
if self._context is not None:
self.assertTrue(not self._context or self._context.is_unlocked())
super().tearDown()
def alternate_target_roots_action(self, tag):
return 'alternate_target_roots', tag, self._context
def prepare_action(self, tag):
return 'prepare', tag, self._context
def execute_action(self, tag):
return 'execute', tag, self._context
def construct_action(self, tag):
return 'construct', tag, self._context
def record(self, tag, product_types=None, required_data=None, optional_data=None,
alternate_target_roots=None):
class RecordingTask(Task):
options_scope = tag
@classmethod
def product_types(cls):
return product_types or []
@classmethod
def alternate_target_roots(cls, options, address_mapper, build_graph):
self.actions.append(self.alternate_target_roots_action(tag))
return alternate_target_roots
@classmethod
def prepare(cls, options, round_manager):
for product in (required_data or ()):
round_manager.require_data(product)
for product in (optional_data or ()):
round_manager.optional_data(product)
self.actions.append(self.prepare_action(tag))
def __init__(me, *args, **kwargs):
super(RecordingTask, me).__init__(*args, **kwargs)
self.actions.append(self.construct_action(tag))
def execute(me):
self.actions.append(self.execute_action(tag))
return RecordingTask
def install_task(self, name, product_types=None, goal=None, required_data=None,
optional_data=None, alternate_target_roots=None):
"""Install a task to goal and return all installed tasks of the goal.
This is needed to initialize tasks' context.
"""
task_type = self.record(name, product_types, required_data, optional_data,
alternate_target_roots)
return super().install_task(name=name, action=task_type, goal=goal).task_types()
def create_context(self, for_task_types=None, target_roots=None):
self._context = self.context(for_task_types=for_task_types, target_roots=target_roots)
self.assertTrue(self._context.is_unlocked())
def assert_actions(self, *expected_execute_ordering):
expected_pre_execute_actions = set()
expected_execute_actions = []
for action in expected_execute_ordering:
expected_pre_execute_actions.add(self.alternate_target_roots_action(action))
expected_pre_execute_actions.add(self.prepare_action(action))
expected_execute_actions.append(self.construct_action(action))
expected_execute_actions.append(self.execute_action(action))
expeceted_execute_actions_length = len(expected_execute_ordering) * 2
self.assertEqual(expected_pre_execute_actions,
set(self.actions[:-expeceted_execute_actions_length]))
self.assertEqual(expected_execute_actions, self.actions[-expeceted_execute_actions_length:])
def test_lifecycle_ordering(self):
task1 = self.install_task('task1', goal='goal1', product_types=['1'])
task2 = self.install_task('task2', goal='goal1', product_types=['2'], required_data=['1'])
task3 = self.install_task('task3', goal='goal3', product_types=['3'], required_data=['2'])
task4 = self.install_task('task4', goal='goal4', required_data=['1', '2', '3'])
self.create_context(for_task_types=task1+task2+task3+task4)
self.engine.attempt(self._context, self.as_goals('goal4'))
self.assert_actions('task1', 'task2', 'task3', 'task4')
def test_lifecycle_ordering_install_order_invariant(self):
# Here we swap the order of goal3 and goal4 task installation from the order in
# `test_lifecycle_ordering` above. We can't swap task1 and task2 since they purposefully
# do have an implicit order dependence with a dep inside the same goal.
task1 = self.install_task('task1', goal='goal1', product_types=['1'])
task2 = self.install_task('task2', goal='goal1', product_types=['2'], required_data=['1'])
task3 = self.install_task('task4', goal='goal4', required_data=['1', '2', '3'])
task4 = self.install_task('task3', goal='goal3', product_types=['3'], required_data=['2'])
self.create_context(for_task_types=task1+task2+task3+task4)
self.engine.attempt(self._context, self.as_goals('goal4'))
self.assert_actions('task1', 'task2', 'task3', 'task4')
def test_inter_goal_dep(self):
task1 = self.install_task('task1', goal='goal1', product_types=['1'])
task2 = self.install_task('task2', goal='goal1', required_data=['1'])
self.create_context(for_task_types=task1+task2)
self.engine.attempt(self._context, self.as_goals('goal1'))
self.assert_actions('task1', 'task2')
def test_inter_goal_dep_self_cycle_ok(self):
task = self.install_task('task1', goal='goal1', product_types=['1'],
required_data=['1'])
self.create_context(for_task_types=task)
self.engine.attempt(self._context, self.as_goals('goal1'))
self.assert_actions('task1')
def test_inter_goal_dep_downstream(self):
task1 = self.install_task('task1', goal='goal1', required_data=['1'])
task2 = self.install_task('task2', goal='goal1', product_types=['1'])
self.create_context(for_task_types=task1+task2)
with self.assertRaises(self.engine.TaskOrderError):
self.engine.attempt(self._context, self.as_goals('goal1'))
def test_missing_product(self):
task = self.install_task('task1', goal='goal1', required_data=['1'])
self.create_context(for_task_types=task)
with self.assertRaises(self.engine.MissingProductError):
self.engine.attempt(self._context, self.as_goals('goal1'))
def test_missing_optional_product(self):
task = self.install_task('task1', goal='goal1', optional_data=['1'])
self.create_context(for_task_types=task)
# Shouldn't raise, as the missing product is optional.
self.engine.attempt(self._context, self.as_goals('goal1'))
def test_goal_cycle_direct(self):
task1 = self.install_task('task1', goal='goal1', required_data=['2'], product_types=['1'])
task2 = self.install_task('task2', goal='goal2', required_data=['1'], product_types=['2'])
self.create_context(for_task_types=task1+task2)
for goal in ('goal1', 'goal2'):
with self.assertRaises(self.engine.GoalCycleError):
self.engine.attempt(self._context, self.as_goals(goal))
def test_goal_cycle_indirect(self):
task1 = self.install_task('task1', goal='goal1', required_data=['2'], product_types=['1'])
task2 = self.install_task('task2', goal='goal2', required_data=['3'], product_types=['2'])
task3 = self.install_task('task3', goal='goal3', required_data=['1'], product_types=['3'])
self.create_context(for_task_types=task1+task2+task3)
for goal in ('goal1', 'goal2', 'goal3'):
with self.assertRaises(self.engine.GoalCycleError):
self.engine.attempt(self._context, self.as_goals(goal))
def test_goal_ordering_unconstrained_respects_cli_order(self):
task1 = self.install_task('task1', goal='goal1')
task2 = self.install_task('task2', goal='goal2')
task3 = self.install_task('task3', goal='goal3')
self.create_context(for_task_types=task1+task2+task3)
for permutation in itertools.permutations([('task1', 'goal1'),
('task2', 'goal2'),
('task3', 'goal3')]):
self.actions = []
self.engine.attempt(self._context, self.as_goals(*[goal for task, goal in permutation]))
expected_execute_actions = [task for task, goal in permutation]
self.assert_actions(*expected_execute_actions)
def test_goal_ordering_constrained_conflicts_cli_order(self):
task1 = self.install_task('task1', goal='goal1', required_data=['2'])
task2 = self.install_task('task2', goal='goal2', product_types=['2'])
self.create_context(for_task_types=task1+task2)
self.engine.attempt(self._context, self.as_goals('goal1', 'goal2'))
self.assert_actions('task2', 'task1')
def test_goal_ordering_mixed_constraints_and_cli_order(self):
task1 = self.install_task('task1', goal='goal1')
task2 = self.install_task('task2', goal='goal2')
task3 = self.install_task('task3', goal='goal3')
task4 = self.install_task('task4', goal='goal4', required_data=['5'])
task5 = self.install_task('task5', goal='goal5', product_types=['5'])
self.create_context(for_task_types=task1+task2+task3+task4+task5)
self.engine.attempt(self._context,
self.as_goals('goal1', 'goal2', 'goal4', 'goal5', 'goal3'))
self.assert_actions('task1', 'task2', 'task5', 'task4', 'task3')
def test_cli_goals_deduped(self):
task1 = self.install_task('task1', goal='goal1')
task2 = self.install_task('task2', goal='goal2')
task3 = self.install_task('task3', goal='goal3')
self.create_context(for_task_types=task1+task2+task3)
self.engine.attempt(self._context,
self.as_goals('goal1', 'goal2', 'goal1', 'goal3', 'goal2'))
self.assert_actions('task1', 'task2', 'task3')
def test_task_subclass_singletons(self):
# Install the same task class twice (before/after Goal.clear()) and confirm that the
# resulting task is equal.
class MyTask(Task):
pass
def install():
reg = super(RoundEngineTest, self).install_task(name='task1', action=MyTask, goal='goal1')
return reg.task_types()
task1_pre, = install()
Goal.clear()
task1_post, = install()
self.assertEqual(task1_pre, task1_post)
def test_replace_target_roots(self):
task1 = self.install_task('task1', goal='goal1')
task2 = self.install_task('task2', goal='goal2', alternate_target_roots=[42])
self.create_context(for_task_types=task1+task2)
self.assertEqual([], self._context.target_roots)
self.engine.attempt(self._context, self.as_goals('goal1', 'goal2'))
self.assertEqual([42], self._context.target_roots)
def test_replace_target_roots_conflict(self):
task1 = self.install_task('task1', goal='goal1', alternate_target_roots=[42])
task2 = self.install_task('task2', goal='goal2', alternate_target_roots=[1, 2])
self.create_context(for_task_types=task1+task2)
with self.assertRaises(self.engine.TargetRootsReplacement.ConflictingProposalsError):
self.engine.attempt(self._context, self.as_goals('goal1', 'goal2'))
def test_replace_target_roots_to_empty_list(self):
task1 = self.install_task('task1', goal='goal1')
task2 = self.install_task('task2', goal='goal2', alternate_target_roots=[])
target = self.make_target('t')
self.create_context(for_task_types=task1+task2, target_roots=[target])
self.engine.attempt(self._context, self.as_goals('goal1', 'goal2'))
self.assertEqual([], self._context.target_roots)
| 46.367589 | 96 | 0.703606 |
acf47e158f9fcaa949c4f57659b58ace199e1ea7 | 994 | py | Python | test/test_phones.py | VitalyW/python_training | 5d01a5d9c434038319e87189226d96c98ebfd3a7 | [
"Apache-2.0"
] | null | null | null | test/test_phones.py | VitalyW/python_training | 5d01a5d9c434038319e87189226d96c98ebfd3a7 | [
"Apache-2.0"
] | null | null | null | test/test_phones.py | VitalyW/python_training | 5d01a5d9c434038319e87189226d96c98ebfd3a7 | [
"Apache-2.0"
] | null | null | null | import re
def test_phones_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
def test_phones_on_contact_view_page(app):
contact_from_view_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_view_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.homephone, contact.mobilephone, contact.workphone, contact.secondaryphone]))))
| 39.76 | 126 | 0.712274 |
acf47f06183fa6e8b3d841f14147e02428d838b3 | 7,317 | py | Python | lib/layers/lang_encoder.py | wangshauitj/Mutatt | 53de5d064fa488f2c2bf7ecedec45eec0cc5f96b | [
"MIT"
] | null | null | null | lib/layers/lang_encoder.py | wangshauitj/Mutatt | 53de5d064fa488f2c2bf7ecedec45eec0cc5f96b | [
"MIT"
] | null | null | null | lib/layers/lang_encoder.py | wangshauitj/Mutatt | 53de5d064fa488f2c2bf7ecedec45eec0cc5f96b | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class RNNEncoder(nn.Module):
def __init__(self, dict_emb, vocab_size, word_embedding_size, word_vec_size, hidden_size, bidirectional=False,
input_dropout_p=0, dropout_p=0, n_layers=1, rnn_type='lstm', variable_lengths=True):
super(RNNEncoder, self).__init__()
self.variable_lengths = variable_lengths
self.embedding1 = nn.Embedding(vocab_size, word_embedding_size - 300)
self.embedding2 = nn.Embedding(vocab_size, 300)
self.embedding2.weight = nn.Parameter(torch.from_numpy(dict_emb).float())
self.input_dropout = nn.Dropout(input_dropout_p)
self.mlp = nn.Sequential(nn.Linear(word_embedding_size, word_vec_size),
nn.ReLU())
self.rnn_type = rnn_type
self.rnn = getattr(nn, rnn_type.upper())(word_vec_size, hidden_size, n_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=dropout_p)
self.num_dirs = 2 if bidirectional else 1
def forward(self, input_labels):
"""
Inputs:
- input_labels: Variable long (batch, seq_len)
Outputs:
- output : Variable float (batch, max_len, hidden_size * num_dirs)
- hidden : Variable float (batch, num_layers * num_dirs * hidden_size)
- embedded: Variable float (batch, max_len, word_vec_size)
****** max_len equal seq_len *******
"""
if self.variable_lengths:
input_lengths = (input_labels != 0).sum(1) # Variable (batch, ) (n,1) dim:1 seq's(label) real length
# make ixs
input_lengths_list = input_lengths.data.cpu().numpy().tolist() # 14 tolist [14] ,[5,5] tolist [5,5];tensor-list
sorted_input_lengths_list = np.sort(input_lengths_list)[
::-1].tolist() # list of sorted input_lengths [::-1] reverse step=1
sort_ixs = np.argsort(input_lengths_list)[
::-1].tolist() # list of int sort_ixs, descending # little-big index reverse tolist
s2r = {s: r for r, s in enumerate(sort_ixs)} # O(n)
recover_ixs = [s2r[s] for s in range(len(input_lengths_list))] # list of int recover ixs
assert max(input_lengths_list) == input_labels.size(1)
# move to long tensor
sort_ixs = input_labels.data.new(sort_ixs).long() # Variable long
recover_ixs = input_labels.data.new(recover_ixs).long() # Variable long
# sort input_labels by descending order
input_labels = input_labels[sort_ixs]
# embed
embedded = torch.cat([self.embedding1(input_labels), self.embedding2(input_labels)],
2) # (n, seq_len, word_embedding_size)
embedded = self.input_dropout(embedded) # (n, seq_len, word_embedding_size)
embedded = self.mlp(embedded) # (n, seq_len, word_vec_size)
if self.variable_lengths:
embedded = nn.utils.rnn.pack_padded_sequence(embedded, sorted_input_lengths_list, batch_first=True)
# forward rnn
output, hidden = self.rnn(embedded)
# recover
if self.variable_lengths:
# embedded (batch, seq_len, word_vec_size)
embedded, _ = nn.utils.rnn.pad_packed_sequence(embedded, batch_first=True)
embedded = embedded[recover_ixs]
# recover rnn
output, _ = nn.utils.rnn.pad_packed_sequence(output, batch_first=True) # (batch, max_len, hidden)
output = output[recover_ixs]
# recover hidden
if self.rnn_type == 'lstm':
hidden = hidden[0] # we only use hidden states for the final hidden representation
hidden = hidden[:, recover_ixs, :] # (num_layers * num_dirs, batch, hidden_size)
hidden = hidden.transpose(0, 1).contiguous() # (batch, num_layers * num_dirs, hidden_size)
hidden = hidden.view(hidden.size(0), -1) # (batch, num_layers * num_dirs * hidden_size)
return output, hidden, embedded
class PhraseEmbedding(nn.Module):
def __init__(self):
super(PhraseEmbedding, self).__init__()
self.unigramConv = nn.Conv1d(512, 512, 1, padding=0)
self.bigramConv = nn.Conv1d(512, 512, 2, padding=1)
self.trigramConv = nn.Conv1d(512, 512, 3, padding=1) # out_channel 1 or maxlen?
self.fuse = nn.Sequential(nn.Tanh(),
nn.Dropout(0.5))
def forward(self, word_embedding):
"""
Inputs:
- word_embedding: Variable float (batch, max_len, word_vec_size)
Outputs:
- phrase_embedding: Variable float (batch, max_len, word_vec_size)
"""
max_len = word_embedding.size(1)
word_vec_size = word_embedding.size(2)
word_embedding = word_embedding.transpose(1, 2)
unigram = self.unigramConv(word_embedding)
bigram = self.bigramConv(word_embedding)
trigram = self.trigramConv(word_embedding)
bigram = bigram.narrow(-1, 1, max_len)
unigram_dim = unigram.transpose(1, 2).contiguous().view(-1, max_len, word_vec_size, 1)
bigram_dim = bigram.transpose(1, 2).contiguous().view(-1, max_len, word_vec_size, 1)
trigram_dim = trigram.transpose(1, 2).contiguous().view(-1, max_len, word_vec_size, 1)
phrase_feat = torch.cat([unigram_dim, bigram_dim, trigram_dim], 3)
phrase_embedding = torch.max(phrase_feat, -1)[0]
phrase_embedding = self.fuse(phrase_embedding)
# print(phrase_embedding.size())
# print(phrase_embedding.shape)
# exit()
return phrase_embedding
class PhraseAttention(nn.Module):
def __init__(self, input_dim):
super(PhraseAttention, self).__init__()
# initialize pivot
self.fc = nn.Linear(input_dim, 1)
def forward(self, context, embedded, input_labels):
"""
Inputs:
- context : Variable float (batch, seq_len, input_dim)
- embedded: Variable float (batch, seq_len, word_vec_size)
- input_labels: Variable long (batch, seq_len)
Outputs:
- attn : Variable float (batch, seq_len)
- weighted_emb: Variable float (batch, word_vec_size)
"""
cxt_scores = self.fc(context).squeeze(2) # (batch, seq_len)
attn = F.softmax(cxt_scores) # (batch, seq_len), attn.sum(1) = 1.
# mask zeros
is_not_zero = (input_labels != 0).float() # (batch, seq_len)
attn = attn * is_not_zero # (batch, seq_len)
attn = attn / attn.sum(1).view(attn.size(0), 1).expand(attn.size(0), attn.size(1)) # (batch, seq_len)
# compute weighted embedding
attn3 = attn.unsqueeze(1) # (batch, 1, seq_len)
weighted_emb = torch.bmm(attn3, embedded) # (batch, 1, word_vec_size)
weighted_emb = weighted_emb.squeeze(1) # (batch, word_vec_size)
return attn, weighted_emb
| 45.447205 | 124 | 0.619926 |
acf47fe9bc8415653bb48e0769b9b5ce919a0fba | 1,559 | py | Python | brooklinevoiceapp/mycity/test/integration_tests/test_trash_day_intent.py | jaumb/BrooklineVoiceApp | 08e1e83bc6ab11a082449b9e1b6a62b9c644a045 | [
"MIT"
] | null | null | null | brooklinevoiceapp/mycity/test/integration_tests/test_trash_day_intent.py | jaumb/BrooklineVoiceApp | 08e1e83bc6ab11a082449b9e1b6a62b9c644a045 | [
"MIT"
] | null | null | null | brooklinevoiceapp/mycity/test/integration_tests/test_trash_day_intent.py | jaumb/BrooklineVoiceApp | 08e1e83bc6ab11a082449b9e1b6a62b9c644a045 | [
"MIT"
] | null | null | null | """ Integration tests for TrashDayIntent """
import mycity.test.test_constants as test_constants
import mycity.test.integration_tests.intent_base_case as base_case
import mycity.test.integration_tests.intent_test_mixins as mix_ins
import mycity.intents.trash_day_intent as trash_intent
import mycity.intents.intent_constants as intent_constants
import copy
MOCK_RESPONSE = test_constants.GET_TRASH_PICKUP_API_MOCK
class TrashPickupTestCase(mix_ins.RepromptTextTestMixIn,
mix_ins.CardTitleTestMixIn,
base_case.IntentBaseCase):
intent_to_test = "TrashDayIntent"
expected_title = trash_intent.CARD_TITLE_TRASH_DAY
returns_reprompt_text = False
def setUp(self):
super().setUp()
# Patch requests.get in TrashDayIntent
self.mock_requests(get_data=copy.deepcopy(test_constants.GET_ADDRESS_CANDIDATES_API_MOCK),
post_data=copy.deepcopy(test_constants.GET_TRASH_PICKUP_API_MOCK))
def test_response_contains_day_of_the_week(self):
response = self.controller.on_intent(self.request)
self.assertTrue("Wednesday" in response.output_speech)
def test_no_feature_results(self):
self.mock_requests(get_data=copy.deepcopy(test_constants.GET_ADDRESS_CANDIDATES_API_MOCK),
post_data=copy.deepcopy(test_constants.NO_RESPONSE_TRASH_PICKUP_API_MOCK))
response = self.controller.on_intent(self.request)
self.assertEqual(response.output_speech, intent_constants.NO_RESULTS_RESPONSE)
| 44.542857 | 101 | 0.760103 |
acf4802d40507c9554bd9a2f0cedc9460eda9a13 | 14,001 | py | Python | model/DeepMFNet.py | shib0li/DMFAL | cbaebd099d8abb6f6e68b4f6c8912f5802517be7 | [
"MIT"
] | null | null | null | model/DeepMFNet.py | shib0li/DMFAL | cbaebd099d8abb6f6e68b4f6c8912f5802517be7 | [
"MIT"
] | null | null | null | model/DeepMFNet.py | shib0li/DMFAL | cbaebd099d8abb6f6e68b4f6c8912f5802517be7 | [
"MIT"
] | null | null | null | import numpy as np
import torch
from torch.optim import Adam
import time
import dataset_active as dataset
from model.BaseNet import AdaptiveBaseNet
from infrastructure.misc import *
def generate_uniform_inputs(N, lb, ub, seed=None):
rand_state = np.random.get_state()
if seed is None:
seed = int(time.time()*1000000%(0xFFFFFFFF))
if lb.size != ub.size:
raise Exception('Error: check the lower bound and upper bound')
else:
dim = lb.size
try:
np.random.seed(seed)
noise = np.random.uniform(0,1,size=[N,dim])
scale = (ub - lb).reshape([1,-1])
except:
raise Exception('Error occured when generating uniform noise...')
finally:
np.random.set_state(rand_state)
#
X = noise*scale + lb
X = X.reshape([N, dim])
return X
#
class DeepMFNet:
def __init__(self, opt, synD):
self.device = torch.device(opt.placement)
self.torch_type = opt.torch_type
self.data = synD
self.logger = opt.logger
self.verbose = opt.verbose
self.M = opt.M
self.input_dims = opt.input_dim_list
self.output_dims = opt.output_dim_list
self.base_dims = opt.base_dim_list
self.hlayers = opt.hlayers_list
self.max_epochs = opt.max_epochs
self.print_freq = opt.print_freq
self.reg_strength = opt.reg_strength
self.learning_rate = opt.learning_rate
self.activation = opt.activation
self.opt_lr = opt.opt_lr
self.nns_list, self.nns_params_list, self.log_tau_list = self.init_model_params()
def init_model_params(self,):
nns_list = []
nns_params_list = []
log_tau_list = []
for m in range(self.M):
if m == 0:
in_dim = self.input_dims[m]
else:
in_dim = self.input_dims[m] + self.base_dims[m-1]
#
layers = [in_dim] + self.hlayers[m] + [self.base_dims[m]] + [self.output_dims[m]]
if self.verbose:
print(layers)
#
nn = AdaptiveBaseNet(layers, self.activation, self.device, self.torch_type)
nn_params = nn.parameters()
log_tau = torch.tensor(0.0, device=self.device, requires_grad=True, dtype=self.torch_type)
nns_list.append(nn)
nns_params_list.append(nn_params)
log_tau_list.append(log_tau)
#
return nns_list, nns_params_list, log_tau_list
def forward(self, X, m, sample=False):
# first fidelity
Y_m, base_m = self.nns_list[0].forward(X, sample)
# propagate to the other fidelity levels
for i in range(1,m+1):
X_concat = torch.cat((base_m, X), dim=1)
# print(X_concat.shape)
Y_m, base_m = self.nns_list[i].forward(X_concat, sample)
#
return Y_m, base_m
def eval_llh(self, X, Y, m):
Ns = 1
llh_samples_list = []
for ns in range(Ns):
pred_sample, _ = self.forward(X, m, sample=True)
log_prob_sample = torch.sum(-0.5*torch.square(torch.exp(self.log_tau_list[m]))*torch.square(pred_sample-Y) +\
self.log_tau_list[m] - 0.5*np.log(2*np.pi))
llh_samples_list.append(log_prob_sample)
#
return sum(llh_samples_list)
def batch_eval_llh(self, X_list, Y_list):
llh_list = []
for m in range(self.M):
llh_m = self.eval_llh(X_list[m], Y_list[m], m)
llh_list.append(llh_m)
#
return sum(llh_list)
def batch_eval_kld(self,):
kld_list = []
for m in range(self.M):
kld_list.append(self.nns_list[m]._eval_kld())
#
return sum(kld_list)
def batch_eval_reg(self,):
reg_list = []
for m in range(self.M):
reg_list.append(self.nns_list[m]._eval_reg())
#
return sum(reg_list)
# def batch_eval_rmse(self, X_list, Y_list):
# rmse_list = []
# for m in range(self.M):
# rmse = self.eval_rmse(X_list[m], Y_list[m], m)
# rmse_list.append(rmse)
# #
# return rmse_list
# def batch_eval_ground_rmse(self, X_list, Y_list):
# rmse_list = []
# for m in range(self.M):
# rmse = self.eval_ground_rmse(X_list[m], Y_list[m], m)
# rmse_list.append(rmse)
# #
# return rmse_list
def eval_rmse(self, m, N_X, N_Y, train=True):
# inputs are normalized
N_pred, _ = self.forward(N_X, m, sample=False)
scales = self.data.get_scales(m, train)
Y = N_Y*scales['y_std'] + scales['y_mean']
pred = N_pred*scales['y_std'] + scales['y_mean']
rmse = torch.sqrt(torch.mean(torch.square(Y-pred)))
n_rmse = rmse/scales['y_std']
return rmse.data.cpu().numpy(), n_rmse.data.cpu().numpy()
def eval_rmse_ground(self, m, N_X, np_y_ground, train=True):
# inputs are normalized
N_pred, _ = self.forward(N_X, m, sample=False)
scales = self.data.get_scales(m, train)
mu = np.mean(np_y_ground)
sig = np.std(np_y_ground)
# np_N_y_ground = (np_y_ground - np.mean(np_y_ground))/np.std(np_y_ground)
np_N_pred = N_pred.data.cpu().numpy()
interp_np_N_pred = self.data.interp_to_ground(np_N_pred, m)
interp_np_pred = interp_np_N_pred*sig + mu
rmse = np.sqrt(np.mean(np.square(np_y_ground-interp_np_pred)))
n_rmse = rmse/sig
return rmse, n_rmse
# def eval_rmse(self, m, N_X, N_Y, train=True):
# # inputs are normalized
# N_pred, _ = self.forward(N_X, m, sample=False)
# scales = self.data.get_scales(m, train)
# # Y = N_Y*scales['y_std'] + scales['y_mean']
# # pred = N_pred*scales['y_std'] + scales['y_mean']
# nrmse = torch.sqrt(torch.mean(torch.square(N_Y-N_pred)))
# rmse = nrmse*scales['y_std']
# return rmse.data.cpu().numpy(), nrmse.data.cpu().numpy()
# def eval_rmse_ground(self, m, N_X, np_y_ground, train=True):
# # inputs are normalized
# N_pred, _ = self.forward(N_X, m, sample=False)
# scales = self.data.get_scales(m, train=True)
# # np_N_y_ground = (np_y_ground - np.mean(np_y_ground))/np.std(np_y_ground)
# np_N_y_ground = (np_y_ground - scales['y_mean'])/scales['y_std']
# np_N_pred = N_pred.data.cpu().numpy()
# interp_np_N_pred = self.data.interp_to_ground(np_N_pred, m)
# n_rmse = np.sqrt(np.mean(np.square(np_N_y_ground-interp_np_N_pred)))
# rmse = n_rmse*scales['y_std']
# return n_rmse, n_rmse
# def eval_rmse_ground(self, m, N_X, np_y_ground, train=True):
# # inputs are normalized
# N_pred, _ = self.forward(N_X, m, sample=False)
# # scales = self.data.get_scales(m, train=True)
# np_N_pred = N_pred.data.cpu().numpy()
# interp_np_N_pred = self.data.interp_to_ground(np_N_pred, m)
# mu = np.mean(np_y_ground)
# sig = np.std(np_y_ground)
# np_N_y_ground = (np_y_ground - np.mean(np_y_ground))/np.std(np_y_ground)
# # mu = scales['y_mean']
# # sig = scales['y_std']
# # inter_np_pred = interp_np_N_pred*sig + mu
# nrmse = np.sqrt(np.mean(np.square(interp_np_N_pred-np_N_y_ground)))
# rmse = nrmse*sig
# return rmse, nrmse
def init_train_optimizer(self, lr, weight_decay):
opt_params = []
for m in range(self.M):
for nn_param_name, nn_param in self.nns_params_list[m].items():
# print(nn_param_name)
opt_params.append({'params':nn_param, 'lr':lr})
#
opt_params.append({'params':self.log_tau_list[m], 'lr':lr})
#
return Adam(opt_params, lr=lr, weight_decay=weight_decay)
def train(self,):
if self.verbose:
print('train the model ...')
X_train_list = []
y_train_list = []
np_y_train_ground_list = []
X_test_list = []
y_test_list = []
np_y_test_ground_list = []
for m in range(self.M):
np_X_train, np_y_train, np_y_train_ground = self.data.get_data(m,train=True, normalize=True, noise=0.01)
np_X_test, np_y_test, np_y_test_ground = self.data.get_data(m,train=False, normalize=True, noise=0.00)
X_train_list.append(torch.tensor(np_X_train, device=self.device, dtype=self.torch_type))
y_train_list.append(torch.tensor(np_y_train, device=self.device, dtype=self.torch_type))
np_y_train_ground_list.append(np_y_train_ground)
X_test_list.append(torch.tensor(np_X_test, device=self.device, dtype=self.torch_type))
y_test_list.append(torch.tensor(np_y_test, device=self.device, dtype=self.torch_type))
np_y_test_ground_list.append(np_y_test_ground)
#
hist_test_rmse = []
hist_test_ground_rmse = []
optimizer_train = self.init_train_optimizer(self.learning_rate, 0.0)
start_time = time.time()
for epoch in range(self.max_epochs+1):
optimizer_train.zero_grad()
loss = -self.batch_eval_llh(X_train_list, y_train_list) + self.batch_eval_kld() + self.reg_strength*self.batch_eval_reg()
loss.backward(retain_graph=True)
optimizer_train.step()
if epoch % self.print_freq == 0:
if self.verbose:
print('======================================')
print('%d-th epoch: loss=%.7f' % (epoch, loss))
print('======================================')
self.logger.write('=============================================================\n')
self.logger.write(str(epoch) + '-th epoch: loss=' + str(loss.data.cpu().numpy()) +\
', time_elapsed:' + str(time.time()-start_time) + '\n')
self.logger.write('=============================================================\n')
buff_test_nRmse = []
buff_test_nRmse_ground = []
for m in range(self.M):
train_rmse, n_train_rmse = self.eval_rmse(m, X_train_list[m], y_train_list[m], train=True)
test_rmse, n_test_rmse = self.eval_rmse(m, X_test_list[m], y_test_list[m], train=False)
train_ground_rmse, n_train_ground_rmse = self.eval_rmse_ground(
m, X_train_list[m], np_y_train_ground_list[m], train=True)
test_ground_rmse, n_test_ground_rmse = self.eval_rmse_ground(
m, X_test_list[m], np_y_test_ground_list[m], train=False)
buff_test_nRmse.append(n_test_rmse)
buff_test_nRmse_ground.append(n_test_ground_rmse)
if self.verbose:
print(' m=%d:' % (m))
print(' * (origin) train_rmse=%.7f, test_rmse=%.7f' % (n_train_rmse, n_test_rmse))
print(' * (ground) train_rmse=%.7f, test_rmse=%.7f' % (n_train_ground_rmse, n_test_ground_rmse))
# print(' * (ground) train_rmse=%.7f, test_rmse=%.7f' % (train_ground_rmse, test_ground_rmse))
# if verbose
self.logger.write('m='+str(m)+'\n')
self.logger.write(' * (origin) train_rmse='+str(n_train_rmse)+', test_rmse='+str(n_test_rmse)+'\n')
self.logger.write(' * (ground) train_rmse='+str(n_train_ground_rmse)+',test_rmse='+str(n_test_ground_rmse)+'\n')
self.logger.write(' * log_tau_m='+str(self.log_tau_list[m].data.cpu().numpy())+'\n')
# for m
hist_test_rmse.append(np.array(buff_test_nRmse))
hist_test_ground_rmse.append(np.array(buff_test_nRmse_ground))
# if epoch
self.logger.flush()
# for epoch
N_pred, _ = self.forward(X_test_list[-1], self.M-1, sample=False)
res = {}
res['test_rmse'] = np.array(hist_test_rmse)
res['test_ground_rmse'] = np.array(hist_test_ground_rmse)
res['N_predict'] = N_pred.data.cpu().numpy()
return res
def dummy_predict(self, Nt=10):
# used to time the prediction
if self.verbose:
print('train the model ...')
X_train_list = []
y_train_list = []
np_y_train_ground_list = []
X_test_list = []
y_test_list = []
np_y_test_ground_list = []
m = self.M-1
Xtr, ytr, yground = self.data.get_data(m,train=True, normalize=True, noise=0.01)
in_dim = Xtr.shape[0]
dummy_X = generate_uniform_inputs(N=Nt, lb=self.data.Mfn.lb, ub=self.data.Mfn.ub)
dummy_X = torch.from_numpy(dummy_X).float().to(self.device)
t_start = time.time()
dummy_pred, _ = self.forward(dummy_X, m, sample=False)
# dummy_interp = self.data.interp_to_ground(dummy_pred.data.cpu().numpy(), m)
# print(dummy_interp.shape)
t_end = time.time() - t_start
return t_end
| 34.57037 | 133 | 0.544818 |
acf480795ca95584a8fa17c44a3504b3720f09ef | 5,010 | py | Python | examples/tree_pretrain/utils/learning_rate.py | jiakai0419/Curvature-Learning-Framework | f90165660ff321172bd7ab7da0e7fe2b3abcb70e | [
"Apache-2.0"
] | 86 | 2021-08-03T08:30:26.000Z | 2022-03-13T10:18:16.000Z | examples/tree_pretrain/utils/learning_rate.py | jiakai0419/Curvature-Learning-Framework | f90165660ff321172bd7ab7da0e7fe2b3abcb70e | [
"Apache-2.0"
] | 3 | 2021-11-03T06:25:08.000Z | 2021-12-22T08:58:00.000Z | examples/tree_pretrain/utils/learning_rate.py | jiakai0419/Curvature-Learning-Framework | f90165660ff321172bd7ab7da0e7fe2b3abcb70e | [
"Apache-2.0"
] | 23 | 2021-09-05T07:41:20.000Z | 2022-02-11T07:58:42.000Z | # Copyright (C) 2016-2021 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning rate warm up"""
from tensorflow.python.framework import ops
import tensorflow as tf
class LearningRate(object):
"""Gradually warm-up(increasing and decreasing) learning rate in optimizer.
Includes three stages: warm up stage, increasing stage and decay stage.
"""
def __init__(self,
lr=1e-2,
lr_warm=1e-3,
lr_end=1e-4,
warm_step=1e5,
increase_step=1e6,
decay_step=1e8):
"""Initialize
Args:
lr_warm (float): The learning rate changes from 0 to lr_warm in the warm up stage.
lr (float): The learning rate changes from lr_warm to lr in the increasing stage.
lr_end (float): The learning rate changes from lr to lr_end in the decay stage.
warm_step (int): The step between 0 and warm_step is in the warm up stage.
increase_step (int): The step between warm_step and increase_step is in the increasing stage.
decay_step (int): The step between warm_step and decay_step is in the decay stage.
"""
super(LearningRate, self).__init__()
self.lr = float(max(lr, 0.0))
self.lr_warm = float(max(lr_warm, 0.0))
self.lr_end = float(max(lr_end, 0.0))
self.warm_step = float(max(warm_step, 0))
self.increase_step = float(max(increase_step, 0))
self.decay_step = float(max(decay_step, 0))
self.step = 0
def get_step(self):
"""Gets current training step.
Returns:
int: current training step.
"""
return tf.to_float(tf.train.get_or_create_global_step())
def _warm_up_lr(self, step):
"""Computes learning rate in the warm up stage.
Args:
step (int): current step.
Returns:
float: The updated learning rate.
"""
return self.lr_warm * step / self.warm_step
def _increase_lr(self, step):
"""Computes learning rate in the increasing stage.
Args:
step (int): current step.
Returns:
float: The updated learning rate.
"""
ratio = (step - self.warm_step) / (self.increase_step - self.warm_step)
return self.lr_warm + ratio * (self.lr - self.lr_warm)
def _decay_lr(self, step):
"""Computes learning rate in the decay stage.
Args:
step (int): current step.
Returns:
float: The updated learning rate.
"""
ratio = (step - self.increase_step) / \
(self.decay_step - self.increase_step)
return self.lr_end + (1.0 - ratio) * (self.lr - self.lr_end)
def _end_lr(self, step):
"""Computes learning rate after the decay stage.
Args:
step (int): current step.
Returns:
float: The updated learning rate.
"""
return self.lr_end
def _less_than(self, a, b):
"""Returns the truth value of (a < b) element-wise.
a is a Tensor, b is a float/int.
Args:
a (tensor): A tensor.
b (float/int): A float or int value.`
Returns:
tensor: A tensor of type bool.
"""
b = ops.convert_to_tensor(b, dtype=a.dtype.base_dtype)
return tf.math.less(a, b)
def get_lr(self):
"""Computes the learning rate according to the training step.
Returns:
float: The updated learning rate.
"""
current_step = self.get_step()
lr = tf.cond(
self._less_than(current_step, self.warm_step),
lambda: self._warm_up_lr(current_step),
lambda: tf.cond(
self._less_than(current_step, self.increase_step),
lambda: self._increase_lr(current_step),
lambda: tf.cond(
self._less_than(current_step, self.decay_step),
lambda: self._decay_lr(current_step),
lambda: self._end_lr(current_step)
)
)
)
return lr
def __call__(self):
return ops.convert_to_tensor(self.get_lr(), dtype=tf.float32)
| 32.745098 | 105 | 0.572255 |
acf4812bfe36455d2235947edb6965d4d29300c4 | 445 | py | Python | dexguru_sdk/__init__.py | sprataa/dg-sdk-python | 4cbc231f067167ecae21d74db6b7011645f68a13 | [
"MIT"
] | 11 | 2021-09-15T14:29:13.000Z | 2022-03-23T01:38:10.000Z | dexguru_sdk/__init__.py | sprataa/dg-sdk-python | 4cbc231f067167ecae21d74db6b7011645f68a13 | [
"MIT"
] | null | null | null | dexguru_sdk/__init__.py | sprataa/dg-sdk-python | 4cbc231f067167ecae21d74db6b7011645f68a13 | [
"MIT"
] | 6 | 2021-09-26T02:50:10.000Z | 2022-02-01T14:13:18.000Z | from .models import *
from .sdk.dg_sdk import DexGuru
__all__ = [
'models',
'AmmModel',
'AmmListModel',
'ChainModel',
'ChainsListModel',
'DexGuru',
'TokenFinanceModel',
'TokensFinanceListModel',
'TokenHistoryModel',
'TokensHistoryListModel',
'TokensInventoryListModel',
'TokenInventoryModel',
'WalletModel',
'WalletsListModel',
'SwapBurnMintModel',
'SwapsBurnsMintsListModel',
]
| 19.347826 | 31 | 0.665169 |
acf48138ae2fbcbc5ce9a7e8ae230fa94b8338f0 | 5,173 | py | Python | nuqql/ui.py | modk/nuqql | c0142b207115a9a225970fb0e1d38092ba85ae1d | [
"MIT"
] | null | null | null | nuqql/ui.py | modk/nuqql | c0142b207115a9a225970fb0e1d38092ba85ae1d | [
"MIT"
] | null | null | null | nuqql/ui.py | modk/nuqql | c0142b207115a9a225970fb0e1d38092ba85ae1d | [
"MIT"
] | null | null | null | """
User Interface part of nuqql
"""
#######################
# USER INTERFACE PART #
#######################
import curses
import curses.ascii
import datetime
import nuqql.config
import nuqql.conversation
import nuqql.history
def handle_message(backend, acc_id, tstamp, sender, msg):
"""
Handle message from backend
"""
# convert timestamp
tstamp = datetime.datetime.fromtimestamp(tstamp)
# look for an existing conversation and use it
for conv in nuqql.conversation.CONVERSATIONS:
if conv.backend is backend and \
conv.account and conv.account.aid == acc_id and \
conv.name == sender:
# log message
log_msg = conv.log(conv.name, msg, tstamp=tstamp)
nuqql.history.log(conv, log_msg)
# if window is not already active notify user
if not conv.is_active():
conv.notify()
return
# nothing found, log to main window
backend.conversation.log(sender, msg, tstamp=tstamp)
def update_buddy(buddy):
"""
Update buddy in UI
"""
# look for existing buddy
for conv in nuqql.conversation.CONVERSATIONS:
if not isinstance(conv, nuqql.conversation.BuddyConversation):
continue
conv_buddy = conv.peers[0]
if conv_buddy is buddy:
conv.wins.list_win.redraw()
def add_buddy(buddy):
"""
Add a new buddy to UI
"""
# add a new conversation for the new buddy
conv = nuqql.conversation.BuddyConversation(buddy.backend, buddy.account,
buddy.name)
conv.peers.append(buddy)
conv.wins.list_win.add(conv)
conv.wins.list_win.redraw()
# check if there are unread messages for this new buddy in the history
last_log_msg = nuqql.history.get_last_log_line(conv)
last_read_msg = nuqql.history.get_lastread(conv)
if last_log_msg:
if not last_read_msg or not last_log_msg.is_equal(last_read_msg):
# there are unread messages, notify user if
# conversation is inactive
if not conv.is_active():
conv.notify()
def read_input():
"""
Read user input and return it to caller
"""
# try to get input from user (timeout set in init())
try:
wch = nuqql.win.MAIN_WINS["screen"].get_wch()
except curses.error:
# no user input...
wch = None
return wch
def show_terminal_warning():
"""
Show a warning that the terminal size is invalid, if it fits on screen
"""
# clear terminal
nuqql.win.MAIN_WINS["screen"].clear()
# check if terminal is big enough for at least one character
max_y, max_x = nuqql.win.MAIN_WINS["screen"].getmaxyx()
if max_y < 1:
return
if max_x < 1:
return
# print as much of the error message as possible
msg = "Invalid terminal size. Please resize."[:max_x - 1]
nuqql.win.MAIN_WINS["screen"].addstr(0, 0, msg)
def is_input_valid(char):
"""
Helper that checks if input is valid
"""
# is there a char at all?
if char is None:
return False
# check for embedded 0 byte
if char == "\0":
return False
return True
def handle_input():
"""
Read and handle user input
"""
# wait for user input and get timeout or character to process
char = read_input()
# handle user input
if not is_input_valid(char):
# No valid input, keep waiting for input
return True
# if terminal size is not valid, stop here
if not nuqql.config.WinConfig.is_terminal_valid():
show_terminal_warning()
return True
# if terminal resized, resize and redraw active windows
if char == curses.KEY_RESIZE:
nuqql.conversation.resize_main_window()
return True
# pass user input to active conversation
for conv in nuqql.conversation.CONVERSATIONS:
if conv.is_active():
conv.process_input(char)
return True
# if no conversation is active pass input to active list window
if nuqql.win.MAIN_WINS["list"].state.active:
# list window navigation
nuqql.win.MAIN_WINS["input"].redraw()
nuqql.win.MAIN_WINS["log"].redraw()
nuqql.win.MAIN_WINS["list"].process_input(char)
return True
# list window is also inactive -> user quit
return False
def start(stdscr, func):
"""
Start UI and run provided function
"""
# save stdscr
nuqql.win.MAIN_WINS["screen"] = stdscr
# configuration
stdscr.timeout(10)
# clear everything
stdscr.clear()
stdscr.refresh()
# make sure window config is loaded
nuqql.config.init_win(stdscr)
# create main windows, if terminal size is valid, otherwise just stop here
if not nuqql.config.WinConfig.is_terminal_valid():
return "Terminal size invalid."
nuqql.conversation.create_main_windows()
# run function provided by caller
return func()
def init(func):
"""
Initialize UI
"""
retval = curses.wrapper(start, func)
if retval and retval != "":
print(retval)
| 24.751196 | 78 | 0.627682 |
acf482488036ac76c0f8b48fc85a8cd52f0b2ce1 | 2,681 | py | Python | openmdao/surrogate_models/tests/test_map.py | toddrme2178/OpenMDAO | 379cc6216d13d380e11cb3a46f03960981de4660 | [
"Apache-2.0"
] | null | null | null | openmdao/surrogate_models/tests/test_map.py | toddrme2178/OpenMDAO | 379cc6216d13d380e11cb3a46f03960981de4660 | [
"Apache-2.0"
] | null | null | null | openmdao/surrogate_models/tests/test_map.py | toddrme2178/OpenMDAO | 379cc6216d13d380e11cb3a46f03960981de4660 | [
"Apache-2.0"
] | 1 | 2018-07-27T06:39:15.000Z | 2018-07-27T06:39:15.000Z | from openmdao.api import Group, Problem, MetaModelUnStructured, NearestNeighbor
from openmdao.utils.assert_utils import assert_rel_error
import numpy as np
import unittest
class CompressorMap(MetaModelUnStructured):
def __init__(self):
super(CompressorMap, self).__init__()
self.add_input('Nc', val=1.0)
self.add_input('Rline', val=2.0)
self.add_input('alpha', val=0.0)
self.add_output('PR', val=1.0, surrogate=NearestNeighbor(interpolant_type='linear'))
self.add_output('eff', val=1.0, surrogate=NearestNeighbor(interpolant_type='linear'))
self.add_output('Wc', val=1.0, surrogate=NearestNeighbor(interpolant_type='linear'))
class TestMap(unittest.TestCase):
def test_comp_map(self):
# create compressor map and save reference to options (for training data)
c = CompressorMap()
m = c.options
# add compressor map to problem
p = Problem()
p.model.add_subsystem('compmap', c)
p.setup()
# train metamodel
Nc = np.array([0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1])
Rline = np.array([1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0])
alpha = np.array([0.0, 1.0])
Nc_mat, Rline_mat, alpha_mat = np.meshgrid(Nc, Rline, alpha, sparse=False)
m['train:Nc'] = Nc_mat.flatten()
m['train:Rline'] = Rline_mat.flatten()
m['train:alpha'] = alpha_mat.flatten()
m['train:PR'] = m['train:Nc']*m['train:Rline']+m['train:alpha']
m['train:eff'] = m['train:Nc']*m['train:Rline']**2+m['train:alpha']
m['train:Wc'] = m['train:Nc']**2*m['train:Rline']**2+m['train:alpha']
# check predicted values
p['compmap.Nc'] = 0.9
p['compmap.Rline'] = 2.0
p['compmap.alpha'] = 0.0
p.run_model()
tol = 1e-1
assert_rel_error(self, p['compmap.PR'], p['compmap.Nc']*p['compmap.Rline']+p['compmap.alpha'], tol)
assert_rel_error(self, p['compmap.eff'], p['compmap.Nc']*p['compmap.Rline']**2+p['compmap.alpha'], tol)
assert_rel_error(self, p['compmap.Wc'], p['compmap.Nc']**2*p['compmap.Rline']**2+p['compmap.alpha'], tol)
p['compmap.Nc'] = 0.95
p['compmap.Rline'] = 2.1
p['compmap.alpha'] = 0.0
p.run_model()
assert_rel_error(self, p['compmap.PR'], p['compmap.Nc']*p['compmap.Rline']+p['compmap.alpha'], tol)
assert_rel_error(self, p['compmap.eff'], p['compmap.Nc']*p['compmap.Rline']**2+p['compmap.alpha'], tol)
assert_rel_error(self, p['compmap.Wc'], p['compmap.Nc']**2*p['compmap.Rline']**2+p['compmap.alpha'], tol)
if __name__ == "__main__":
unittest.main()
| 37.760563 | 113 | 0.603879 |
acf4828313355df48423a08f15ce167bf02e18b1 | 8,026 | py | Python | isi_sdk_7_2/isi_sdk_7_2/models/cloud_account_create_params.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_7_2/isi_sdk_7_2/models/cloud_account_create_params.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_7_2/isi_sdk_7_2/models/cloud_account_create_params.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 2
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_7_2.models.cloud_account import CloudAccount # noqa: F401,E501
class CloudAccountCreateParams(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_username': 'str',
'enabled': 'bool',
'key': 'str',
'name': 'str',
'uri': 'str',
'type': 'str'
}
attribute_map = {
'account_username': 'account_username',
'enabled': 'enabled',
'key': 'key',
'name': 'name',
'uri': 'uri',
'type': 'type'
}
def __init__(self, account_username=None, enabled=None, key=None, name=None, uri=None, type=None): # noqa: E501
"""CloudAccountCreateParams - a model defined in Swagger""" # noqa: E501
self._account_username = None
self._enabled = None
self._key = None
self._name = None
self._uri = None
self._type = None
self.discriminator = None
self.account_username = account_username
if enabled is not None:
self.enabled = enabled
self.key = key
self.name = name
self.uri = uri
self.type = type
@property
def account_username(self):
"""Gets the account_username of this CloudAccountCreateParams. # noqa: E501
The username required to authenticate against the cloud service # noqa: E501
:return: The account_username of this CloudAccountCreateParams. # noqa: E501
:rtype: str
"""
return self._account_username
@account_username.setter
def account_username(self, account_username):
"""Sets the account_username of this CloudAccountCreateParams.
The username required to authenticate against the cloud service # noqa: E501
:param account_username: The account_username of this CloudAccountCreateParams. # noqa: E501
:type: str
"""
if account_username is None:
raise ValueError("Invalid value for `account_username`, must not be `None`") # noqa: E501
self._account_username = account_username
@property
def enabled(self):
"""Gets the enabled of this CloudAccountCreateParams. # noqa: E501
Whether or not this account should be used for cloud storage # noqa: E501
:return: The enabled of this CloudAccountCreateParams. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this CloudAccountCreateParams.
Whether or not this account should be used for cloud storage # noqa: E501
:param enabled: The enabled of this CloudAccountCreateParams. # noqa: E501
:type: bool
"""
self._enabled = enabled
@property
def key(self):
"""Gets the key of this CloudAccountCreateParams. # noqa: E501
A valid authentication key for connecting to the cloud # noqa: E501
:return: The key of this CloudAccountCreateParams. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this CloudAccountCreateParams.
A valid authentication key for connecting to the cloud # noqa: E501
:param key: The key of this CloudAccountCreateParams. # noqa: E501
:type: str
"""
if key is None:
raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
self._key = key
@property
def name(self):
"""Gets the name of this CloudAccountCreateParams. # noqa: E501
A unique name for this account # noqa: E501
:return: The name of this CloudAccountCreateParams. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CloudAccountCreateParams.
A unique name for this account # noqa: E501
:param name: The name of this CloudAccountCreateParams. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def uri(self):
"""Gets the uri of this CloudAccountCreateParams. # noqa: E501
A valid URI pointing to the location of the cloud storage # noqa: E501
:return: The uri of this CloudAccountCreateParams. # noqa: E501
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""Sets the uri of this CloudAccountCreateParams.
A valid URI pointing to the location of the cloud storage # noqa: E501
:param uri: The uri of this CloudAccountCreateParams. # noqa: E501
:type: str
"""
if uri is None:
raise ValueError("Invalid value for `uri`, must not be `None`") # noqa: E501
self._uri = uri
@property
def type(self):
"""Gets the type of this CloudAccountCreateParams. # noqa: E501
The type of cloud protocol required (e.g., 'ran', 'azure') # noqa: E501
:return: The type of this CloudAccountCreateParams. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this CloudAccountCreateParams.
The type of cloud protocol required (e.g., 'ran', 'azure') # noqa: E501
:param type: The type of this CloudAccountCreateParams. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["ran", "azure"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudAccountCreateParams):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.947761 | 116 | 0.587466 |
acf48293aa6b01aaf3d2537745c3892525b44860 | 18,951 | py | Python | metalibm_hw_blocks/ml_fixed_mpfma.py | metalibm/metalibm-clone | d04839e58950a156b79b763b9f45cb874e21ebfe | [
"MIT"
] | null | null | null | metalibm_hw_blocks/ml_fixed_mpfma.py | metalibm/metalibm-clone | d04839e58950a156b79b763b9f45cb874e21ebfe | [
"MIT"
] | null | null | null | metalibm_hw_blocks/ml_fixed_mpfma.py | metalibm/metalibm-clone | d04839e58950a156b79b763b9f45cb874e21ebfe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# last-modified: Mar 7th, 2018
# Author(s): Nicolas Brunie <nbrunie@kalray.eu>
###############################################################################
import sys
import sollya
from sollya import Interval, ceil, floor, round, log2
S2 = sollya.SollyaObject(2)
from sollya import parse as sollya_parse
from metalibm_core.core.ml_operations import *
from metalibm_core.core.ml_formats import *
from metalibm_core.core.ml_table import ML_Table
from metalibm_core.code_generation.vhdl_backend import VHDLBackend
from metalibm_core.core.polynomials import *
from metalibm_core.core.ml_entity import ML_Entity, ML_EntityBasis, DefaultEntityArgTemplate
from metalibm_core.utility.ml_template import *
from metalibm_core.utility.log_report import Log
from metalibm_core.utility.rtl_debug_utils import (
debug_std, debug_dec, debug_cst_dec)
from metalibm_core.utility.num_utils import ulp
from metalibm_core.utility.gappa_utils import is_gappa_installed
from metalibm_core.utility.rtl_debug_utils import *
from metalibm_core.core.ml_hdl_format import *
from metalibm_core.core.ml_hdl_operations import *
from metalibm_hw_blocks.lzc import ML_LeadingZeroCounter
## Wrapper for zero extension
# @param op the input operation tree
# @param s integer size of the extension
# @return the Zero extended operation node
def zext(op,s):
s = int(s)
op_size = op.get_precision().get_bit_size()
ext_precision = ML_StdLogicVectorFormat(op_size + s)
return ZeroExt(op, s, precision = ext_precision)
## Generate the right zero extended output from @p optree
def rzext(optree, ext_size):
ext_size = int(ext_size)
op_size = optree.get_precision().get_bit_size()
ext_format = ML_StdLogicVectorFormat(ext_size)
out_format = ML_StdLogicVectorFormat(op_size + ext_size)
return Concatenation(optree, Constant(0, precision = ext_format), precision = out_format)
class FP_FIXED_MPFMA(ML_Entity("fp_fixed_mpfma")):
def __init__(self,
arg_template = DefaultEntityArgTemplate,
precision = ML_Binary32,
target = VHDLBackend(),
debug_flag = False,
output_file = "fp_fixed_mpfma.vhd",
entity_name = "fp_fixed_mpfma",
language = VHDL_Code,
vector_size = 1,
):
# initializing I/O precision
precision = ArgDefault.select_value([arg_template.precision, precision])
io_precisions = [precision] * 2
# initializing base class
ML_EntityBasis.__init__(self,
base_name = "fp_fixed_mpfma",
entity_name = entity_name,
output_file = output_file,
io_precisions = io_precisions,
backend = target,
debug_flag = debug_flag,
language = language,
arg_template = arg_template
)
self.precision = precision.get_base_format()
self.io_precision = precision
# number of extra bits to add to the accumulator fixed precision
self.extra_digit = arg_template.extra_digit
min_prod_exp = self.precision.get_emin_subnormal() * 2
self.acc_lsb_index = min_prod_exp
# select sign-magintude encoded accumulator
self.sign_magnitude = arg_template.sign_magnitude
# enable/disable operator pipelining
self.pipelined = arg_template.pipelined
@staticmethod
def get_default_args(**kw):
default_mapping = {
"extra_digit" : 0,
"sign_magnitude" : False,
"pipelined" : False
}
default_mapping.update(kw)
return DefaultEntityArgTemplate(
**default_mapping
)
def get_acc_lsb_index(self):
return self.acc_lsb_index
def generate_scheme(self):
## Generate Fused multiply and add comput <x> . <y> + <z>
Log.report(Log.Info, "generating fixed MPFMA with {ed} extra digit(s) and sign-magnitude accumulator: {sm}".format(ed = self.extra_digit, sm = self.sign_magnitude))
def get_virtual_cst(prec, value, language):
return prec.get_support_format().get_cst(
prec.get_base_format().get_integer_coding(value, language))
## convert @p value from an input floating-point precision
# @p in_precision to an output support format @p out_precision
io_precision = self.io_precision
# declaring standard clock and reset input signal
#clk = self.implementation.add_input_signal("clk", ML_StdLogic)
# reset = self.implementation.add_input_signal("reset", ML_StdLogic)
# declaring main input variable
# maximum weigth for a mantissa product digit
max_prod_exp = self.precision.get_emax() * 2 + 1
# minimum wieght for a mantissa product digit
min_prod_exp = self.precision.get_emin_subnormal() * 2
## Most and least significant digit index for the
# accumulator
acc_msb_index = max_prod_exp + self.extra_digit
acc_lsb_index = min_prod_exp
acc_width = acc_msb_index - min_prod_exp + 1
# precision of the accumulator
acc_prec = ML_StdLogicVectorFormat(acc_width)
reset = self.implementation.add_input_signal("reset", ML_StdLogic)
vx = self.implementation.add_input_signal("x", io_precision)
vy = self.implementation.add_input_signal("y", io_precision)
# Inserting post-input pipeline stage
if self.pipelined: self.implementation.start_new_stage()
acc = self.implementation.add_input_signal("acc", acc_prec)
if self.sign_magnitude:
# the accumulator is in sign-magnitude representation
sign_acc = self.implementation.add_input_signal("sign_acc", ML_StdLogic)
else:
sign_acc = CopySign(acc, precision = ML_StdLogic, tag = "sign_acc", debug = debug_std)
vx_precision = self.precision
vy_precision = self.precision
result_precision = acc_prec
# precision for first operand vx which is to be statically
# positionned
p = vx_precision.get_mantissa_size()
# precision for second operand vy which is to be dynamically shifted
q = vy_precision.get_mantissa_size()
# vx must be aligned with vy
# the largest shit amount (in absolute value) is precision + 2
# (1 guard bit and 1 rounding bit)
exp_vx_precision = ML_StdLogicVectorFormat(vx_precision.get_exponent_size())
exp_vy_precision = ML_StdLogicVectorFormat(vy_precision.get_exponent_size())
mant_vx_precision = ML_StdLogicVectorFormat(p-1)
mant_vy_precision = ML_StdLogicVectorFormat(q-1)
mant_vx = MantissaExtraction(vx, precision = mant_vx_precision)
mant_vy = MantissaExtraction(vy, precision = mant_vy_precision)
exp_vx = ExponentExtraction(vx, precision = exp_vx_precision, tag = "exp_vx", debug = debug_dec)
exp_vy = ExponentExtraction(vy, precision = exp_vy_precision, tag = "exp_vy", debug = debug_dec)
# Maximum number of leading zero for normalized <vx> mantissa
L_x = 0
# Maximum number of leading zero for normalized <vy> mantissa
L_y = 0
# Maximum number of leading zero for the product of <x>.<y>
# mantissa.
L_xy = L_x + L_y + 1
sign_vx = CopySign(vx, precision = ML_StdLogic)
sign_vy = CopySign(vy, precision = ML_StdLogic)
# determining if the operation is an addition (effective_op = '0')
# or a subtraction (effective_op = '1')
sign_xy = BitLogicXor(sign_vx, sign_vy, precision = ML_StdLogic, tag = "sign_xy", debug = debug_std)
effective_op = BitLogicXor(sign_xy, sign_acc, precision = ML_StdLogic, tag = "effective_op", debug = debug_std)
exp_vx_bias = vx_precision.get_bias()
exp_vy_bias = vy_precision.get_bias()
# <acc> is statically positionned in the datapath,
# it may even constitute the whole datapath
#
# the product is shifted with respect to the fix accumulator
exp_bias = (exp_vx_bias + exp_vy_bias)
# because of the mantissa range [1, 2[, the product exponent
# is located one bit to the right (lower) of the product MSB
prod_exp_offset = 1
# Determine a working precision to accomodate exponent difference
# FIXME: check interval and exponent operations size
exp_precision_ext_size = max(
vx_precision.get_exponent_size(),
vy_precision.get_exponent_size(),
abs(ceil(log2(abs(acc_msb_index)))),
abs(ceil(log2(abs(acc_lsb_index)))),
abs(ceil(log2(abs(exp_bias + prod_exp_offset)))),
) + 2
Log.report(Log.Info, "exp_precision_ext_size={}".format(exp_precision_ext_size))
exp_precision_ext = ML_StdLogicVectorFormat(exp_precision_ext_size)
# static accumulator exponent
exp_acc = Constant(acc_msb_index, precision = exp_precision_ext, tag = "exp_acc", debug = debug_cst_dec)
# Y is first aligned offset = max(o+L_y,q) + 2 bits to the left of x
# and then shifted right by
# exp_diff = exp_x - exp_y + offset
# exp_vx in [emin, emax]
# exp_vx - exp_vx + p +2 in [emin-emax + p + 2, emax - emin + p + 2]
exp_diff = UnsignedSubtraction(
exp_acc,
UnsignedAddition(
UnsignedAddition(
zext(exp_vy, exp_precision_ext_size - vy_precision.get_exponent_size()),
zext(exp_vx, exp_precision_ext_size - vx_precision.get_exponent_size()),
precision = exp_precision_ext
),
Constant(exp_bias + prod_exp_offset, precision = exp_precision_ext, tag = "diff_bias", debug = debug_cst_dec),
precision = exp_precision_ext,
tag = "pre_exp_diff",
debug = debug_dec
),
precision = exp_precision_ext,
tag = "exp_diff",
debug = debug_dec
)
exp_precision_ext_signed = get_signed_precision(exp_precision_ext)
signed_exp_diff = SignCast(
exp_diff,
specifier = SignCast.Signed,
precision = exp_precision_ext_signed
)
datapath_full_width = acc_width
# the maximum exp diff is the size of the datapath
# minus the bit size of the product
max_exp_diff = datapath_full_width - (p + q)
exp_diff_lt_0 = Comparison(
signed_exp_diff,
Constant(0, precision = exp_precision_ext_signed),
specifier = Comparison.Less,
precision = ML_Bool,
tag = "exp_diff_lt_0",
debug = debug_std
)
exp_diff_gt_max_diff = Comparison(signed_exp_diff, Constant(max_exp_diff, precision = exp_precision_ext_signed), specifier = Comparison.Greater, precision = ML_Bool)
shift_amount_prec = ML_StdLogicVectorFormat(int(floor(log2(max_exp_diff))+1))
mant_shift = Select(
exp_diff_lt_0,
Constant(0, precision = shift_amount_prec),
Select(
exp_diff_gt_max_diff,
Constant(max_exp_diff, precision = shift_amount_prec),
Truncate(exp_diff, precision = shift_amount_prec),
precision = shift_amount_prec
),
precision = shift_amount_prec,
tag = "mant_shift",
debug = debug_dec
)
prod_prec = ML_StdLogicVectorFormat(p+q)
prod = UnsignedMultiplication(
mant_vx,
mant_vy,
precision = prod_prec,
tag = "prod",
debug = debug_std
)
# attempt at pipelining the operator
# self.implementation.start_new_stage()
mant_ext_size = datapath_full_width - (p+q)
shift_prec = ML_StdLogicVectorFormat(datapath_full_width)
shifted_prod = BitLogicRightShift(rzext(prod, mant_ext_size), mant_shift, precision = shift_prec, tag = "shifted_prod", debug = debug_std)
## Inserting a pipeline stage after the product shifting
if self.pipelined: self.implementation.start_new_stage()
if self.sign_magnitude:
# the accumulator is in sign-magnitude representation
acc_negated = Select(
Comparison(
sign_xy,
sign_acc,
specifier = Comparison.Equal,
precision = ML_Bool
),
acc,
BitLogicNegate(acc, precision = acc_prec),
precision = acc_prec
)
# one extra MSB bit is added to the final addition
# to detect overflows
add_width = acc_width + 1
add_prec = ML_StdLogicVectorFormat(add_width)
# FIXME: implement with a proper compound adder
mant_add_p0_ext = UnsignedAddition(
zext(shifted_prod, 1),
zext(acc_negated, 1),
precision = add_prec
)
mant_add_p1_ext = UnsignedAddition(
mant_add_p0_ext,
Constant(1, precision = ML_StdLogic),
precision = add_prec,
tag = "mant_add",
debug = debug_std
)
# discarding carry overflow bit
mant_add_p0 = SubSignalSelection(mant_add_p0_ext, 0, acc_width - 1, precision = acc_prec)
mant_add_p1 = SubSignalSelection(mant_add_p1_ext, 0, acc_width - 1, precision = acc_prec)
mant_add_pre_sign = CopySign(mant_add_p1_ext, precision = ML_StdLogic, tag = "mant_add_pre_sign", debug = debug_std)
mant_add = Select(
Comparison(
sign_xy,
sign_acc,
specifier = Comparison.Equal,
precision = ML_Bool
),
mant_add_p0,
Select(
Comparison(
mant_add_pre_sign,
Constant(1, precision = ML_StdLogic),
specifier = Comparison.Equal,
precision = ML_Bool
),
mant_add_p1,
BitLogicNegate(
mant_add_p0,
precision = acc_prec
),
precision = acc_prec,
),
precision = acc_prec,
tag = "mant_add"
)
# if both operands had the same sign, then
# mant_add is necessarily positive and the result
# sign matches the input sign
# if both operands had opposite signs, then
# the result sign matches the product sign
# if mant_add is positive, else the accumulator sign
output_sign = Select(
Comparison(
effective_op,
Constant(1, precision = ML_StdLogic),
specifier = Comparison.Equal,
precision = ML_Bool
),
# if the effective op is a subtraction (prod - acc)
BitLogicXor(
sign_acc,
mant_add_pre_sign,
precision = ML_StdLogic
),
# the effective op is an addition, thus result and
# acc share sign
sign_acc,
precision = ML_StdLogic,
tag = "output_sign"
)
if self.pipelined: self.implementation.start_new_stage()
# adding output
self.implementation.add_output_signal("vr_sign", output_sign)
self.implementation.add_output_signal("vr_acc", mant_add)
else:
# 2s complement encoding of the accumulator,
# the accumulator is never negated, only the producted
# is negated if negative
# negate shifted prod when required
shifted_prod_op = Select(
Comparison(
sign_xy,
Constant(1, precision = ML_StdLogic),
specifier = Comparison.Equal,
precision = ML_Bool
),
Negation(shifted_prod, precision = shift_prec),
shifted_prod,
precision = shift_prec
)
add_prec = shift_prec # ML_StdLogicVectorFormat(datapath_full_width + 1)
mant_add = UnsignedAddition(
shifted_prod_op,
acc,
precision = acc_prec,
tag = "mant_add",
debug = debug_std
)
if self.pipelined: self.implementation.start_new_stage()
self.implementation.add_output_signal("vr_acc", mant_add)
return [self.implementation]
def numeric_emulate(self, io_map):
vx = io_map["x"]
vy = io_map["y"]
acc = io_map["acc"]
result = {}
acc_lsb_index = self.get_acc_lsb_index()
if self.sign_magnitude:
sign_acc = io_map["sign_acc"]
acc = -acc if sign_acc else acc
result_value = int(sollya.nearestint((vx * vy + acc *S2**acc_lsb_index)*S2**-acc_lsb_index))
result_sign = 1 if result_value < 0 else 0
result["vr_sign"] = result_sign
result["vr_acc"] = abs(result_value)
else:
result_value = int(sollya.nearestint((vx * vy + acc *S2**acc_lsb_index)*S2**-acc_lsb_index))
result["vr_acc"] = result_value
return result
standard_test_cases = [
#({
#"y": ML_Binary16.get_value_from_integer_coding("bab9", base = 16),
#"x": ML_Binary16.get_value_from_integer_coding("bbff", base = 16),
#"acc": int("1000000011111001011000111000101000101101110110001010011000101001001111100010101001", 2),
#"sign_acc": 0
#}, None),
({
"y": ML_Binary16.get_value_from_integer_coding("bbff", base = 16),
"x": ML_Binary16.get_value_from_integer_coding("bbfa", base = 16),
"acc": int("1000100010100111001111000001000001101100110110011010001001011011000010010111111001", 2),
"sign_acc": 1}, None),
]
if __name__ == "__main__":
# auto-test
arg_template = ML_EntityArgTemplate(default_entity_name = "new_fp_fixed_mpfma", default_output_file = "ml_fp_fixed_mpfma.vhd" )
# extra digit command line argument
arg_template.parser.add_argument("--extra-digit", dest = "extra_digit", type=int, default = 0, help = "set the number of accumulator extra digits")
arg_template.parser.add_argument("--sign-magnitude", dest = "sign_magnitude", action = "store_const", default = False, const = True, help = "set sign-magnitude encoding for the accumulator")
# argument extraction
args = parse_arg_index_list = arg_template.arg_extraction()
ml_hw_fp_fixed_mpfma = FP_FIXED_MPFMA(args)
ml_hw_fp_fixed_mpfma.gen_implementation()
| 37.231827 | 194 | 0.671152 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.